1 //===-- LICM.cpp - Loop Invariant Code Motion Pass ------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This pass performs loop invariant code motion, attempting to remove as much 10 // code from the body of a loop as possible. It does this by either hoisting 11 // code into the preheader block, or by sinking code to the exit blocks if it is 12 // safe. This pass also promotes must-aliased memory locations in the loop to 13 // live in registers, thus hoisting and sinking "invariant" loads and stores. 14 // 15 // This pass uses alias analysis for two purposes: 16 // 17 // 1. Moving loop invariant loads and calls out of loops. If we can determine 18 // that a load or call inside of a loop never aliases anything stored to, 19 // we can hoist it or sink it like any other instruction. 20 // 2. Scalar Promotion of Memory - If there is a store instruction inside of 21 // the loop, we try to move the store to happen AFTER the loop instead of 22 // inside of the loop. This can only happen if a few conditions are true: 23 // A. The pointer stored through is loop invariant 24 // B. There are no stores or loads in the loop which _may_ alias the 25 // pointer. There are no calls in the loop which mod/ref the pointer. 26 // If these conditions are true, we can promote the loads and stores in the 27 // loop of the pointer to use a temporary alloca'd variable. We then use 28 // the SSAUpdater to construct the appropriate SSA form for the value. 29 // 30 //===----------------------------------------------------------------------===// 31 32 #include "llvm/Transforms/Scalar/LICM.h" 33 #include "llvm/ADT/SetOperations.h" 34 #include "llvm/ADT/Statistic.h" 35 #include "llvm/Analysis/AliasAnalysis.h" 36 #include "llvm/Analysis/AliasSetTracker.h" 37 #include "llvm/Analysis/BasicAliasAnalysis.h" 38 #include "llvm/Analysis/BlockFrequencyInfo.h" 39 #include "llvm/Analysis/CaptureTracking.h" 40 #include "llvm/Analysis/ConstantFolding.h" 41 #include "llvm/Analysis/GlobalsModRef.h" 42 #include "llvm/Analysis/GuardUtils.h" 43 #include "llvm/Analysis/LazyBlockFrequencyInfo.h" 44 #include "llvm/Analysis/Loads.h" 45 #include "llvm/Analysis/LoopInfo.h" 46 #include "llvm/Analysis/LoopIterator.h" 47 #include "llvm/Analysis/LoopPass.h" 48 #include "llvm/Analysis/MemoryBuiltins.h" 49 #include "llvm/Analysis/MemorySSA.h" 50 #include "llvm/Analysis/MemorySSAUpdater.h" 51 #include "llvm/Analysis/MustExecute.h" 52 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 53 #include "llvm/Analysis/ScalarEvolution.h" 54 #include "llvm/Analysis/ScalarEvolutionAliasAnalysis.h" 55 #include "llvm/Analysis/TargetLibraryInfo.h" 56 #include "llvm/Analysis/ValueTracking.h" 57 #include "llvm/IR/CFG.h" 58 #include "llvm/IR/Constants.h" 59 #include "llvm/IR/DataLayout.h" 60 #include "llvm/IR/DebugInfoMetadata.h" 61 #include "llvm/IR/DerivedTypes.h" 62 #include "llvm/IR/Dominators.h" 63 #include "llvm/IR/Instructions.h" 64 #include "llvm/IR/IntrinsicInst.h" 65 #include "llvm/IR/LLVMContext.h" 66 #include "llvm/IR/Metadata.h" 67 #include "llvm/IR/PatternMatch.h" 68 #include "llvm/IR/PredIteratorCache.h" 69 #include "llvm/InitializePasses.h" 70 #include "llvm/Support/CommandLine.h" 71 #include "llvm/Support/Debug.h" 72 #include "llvm/Support/raw_ostream.h" 73 #include "llvm/Transforms/Scalar.h" 74 #include "llvm/Transforms/Scalar/LoopPassManager.h" 75 #include "llvm/Transforms/Utils/AssumeBundleBuilder.h" 76 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 77 #include "llvm/Transforms/Utils/Local.h" 78 #include "llvm/Transforms/Utils/LoopUtils.h" 79 #include "llvm/Transforms/Utils/SSAUpdater.h" 80 #include <algorithm> 81 #include <utility> 82 using namespace llvm; 83 84 #define DEBUG_TYPE "licm" 85 86 STATISTIC(NumCreatedBlocks, "Number of blocks created"); 87 STATISTIC(NumClonedBranches, "Number of branches cloned"); 88 STATISTIC(NumSunk, "Number of instructions sunk out of loop"); 89 STATISTIC(NumHoisted, "Number of instructions hoisted out of loop"); 90 STATISTIC(NumMovedLoads, "Number of load insts hoisted or sunk"); 91 STATISTIC(NumMovedCalls, "Number of call insts hoisted or sunk"); 92 STATISTIC(NumPromoted, "Number of memory locations promoted to registers"); 93 94 /// Memory promotion is enabled by default. 95 static cl::opt<bool> 96 DisablePromotion("disable-licm-promotion", cl::Hidden, cl::init(false), 97 cl::desc("Disable memory promotion in LICM pass")); 98 99 static cl::opt<bool> ControlFlowHoisting( 100 "licm-control-flow-hoisting", cl::Hidden, cl::init(false), 101 cl::desc("Enable control flow (and PHI) hoisting in LICM")); 102 103 static cl::opt<unsigned> HoistSinkColdnessThreshold( 104 "licm-coldness-threshold", cl::Hidden, cl::init(4), 105 cl::desc("Relative coldness Threshold of hoisting/sinking destination " 106 "block for LICM to be considered beneficial")); 107 108 static cl::opt<uint32_t> MaxNumUsesTraversed( 109 "licm-max-num-uses-traversed", cl::Hidden, cl::init(8), 110 cl::desc("Max num uses visited for identifying load " 111 "invariance in loop using invariant start (default = 8)")); 112 113 // Default value of zero implies we use the regular alias set tracker mechanism 114 // instead of the cross product using AA to identify aliasing of the memory 115 // location we are interested in. 116 static cl::opt<int> 117 LICMN2Theshold("licm-n2-threshold", cl::Hidden, cl::init(0), 118 cl::desc("How many instruction to cross product using AA")); 119 120 // Experimental option to allow imprecision in LICM in pathological cases, in 121 // exchange for faster compile. This is to be removed if MemorySSA starts to 122 // address the same issue. This flag applies only when LICM uses MemorySSA 123 // instead on AliasSetTracker. LICM calls MemorySSAWalker's 124 // getClobberingMemoryAccess, up to the value of the Cap, getting perfect 125 // accuracy. Afterwards, LICM will call into MemorySSA's getDefiningAccess, 126 // which may not be precise, since optimizeUses is capped. The result is 127 // correct, but we may not get as "far up" as possible to get which access is 128 // clobbering the one queried. 129 cl::opt<unsigned> llvm::SetLicmMssaOptCap( 130 "licm-mssa-optimization-cap", cl::init(100), cl::Hidden, 131 cl::desc("Enable imprecision in LICM in pathological cases, in exchange " 132 "for faster compile. Caps the MemorySSA clobbering calls.")); 133 134 // Experimentally, memory promotion carries less importance than sinking and 135 // hoisting. Limit when we do promotion when using MemorySSA, in order to save 136 // compile time. 137 cl::opt<unsigned> llvm::SetLicmMssaNoAccForPromotionCap( 138 "licm-mssa-max-acc-promotion", cl::init(250), cl::Hidden, 139 cl::desc("[LICM & MemorySSA] When MSSA in LICM is disabled, this has no " 140 "effect. When MSSA in LICM is enabled, then this is the maximum " 141 "number of accesses allowed to be present in a loop in order to " 142 "enable memory promotion.")); 143 144 static bool inSubLoop(BasicBlock *BB, Loop *CurLoop, LoopInfo *LI); 145 static bool isNotUsedOrFreeInLoop(const Instruction &I, const Loop *CurLoop, 146 const LoopSafetyInfo *SafetyInfo, 147 TargetTransformInfo *TTI, bool &FreeInLoop); 148 static void hoist(Instruction &I, const DominatorTree *DT, const Loop *CurLoop, 149 BasicBlock *Dest, ICFLoopSafetyInfo *SafetyInfo, 150 MemorySSAUpdater *MSSAU, ScalarEvolution *SE, 151 OptimizationRemarkEmitter *ORE); 152 static bool sink(Instruction &I, LoopInfo *LI, DominatorTree *DT, 153 BlockFrequencyInfo *BFI, const Loop *CurLoop, 154 ICFLoopSafetyInfo *SafetyInfo, MemorySSAUpdater *MSSAU, 155 OptimizationRemarkEmitter *ORE); 156 static bool isSafeToExecuteUnconditionally(Instruction &Inst, 157 const DominatorTree *DT, 158 const Loop *CurLoop, 159 const LoopSafetyInfo *SafetyInfo, 160 OptimizationRemarkEmitter *ORE, 161 const Instruction *CtxI = nullptr); 162 static bool pointerInvalidatedByLoop(MemoryLocation MemLoc, 163 AliasSetTracker *CurAST, Loop *CurLoop, 164 AAResults *AA); 165 static bool pointerInvalidatedByLoopWithMSSA(MemorySSA *MSSA, MemoryUse *MU, 166 Loop *CurLoop, 167 SinkAndHoistLICMFlags &Flags); 168 static Instruction *cloneInstructionInExitBlock( 169 Instruction &I, BasicBlock &ExitBlock, PHINode &PN, const LoopInfo *LI, 170 const LoopSafetyInfo *SafetyInfo, MemorySSAUpdater *MSSAU); 171 172 static void eraseInstruction(Instruction &I, ICFLoopSafetyInfo &SafetyInfo, 173 AliasSetTracker *AST, MemorySSAUpdater *MSSAU); 174 175 static void moveInstructionBefore(Instruction &I, Instruction &Dest, 176 ICFLoopSafetyInfo &SafetyInfo, 177 MemorySSAUpdater *MSSAU, ScalarEvolution *SE); 178 179 namespace { 180 struct LoopInvariantCodeMotion { 181 bool runOnLoop(Loop *L, AAResults *AA, LoopInfo *LI, DominatorTree *DT, 182 BlockFrequencyInfo *BFI, TargetLibraryInfo *TLI, 183 TargetTransformInfo *TTI, ScalarEvolution *SE, MemorySSA *MSSA, 184 OptimizationRemarkEmitter *ORE); 185 186 LoopInvariantCodeMotion(unsigned LicmMssaOptCap, 187 unsigned LicmMssaNoAccForPromotionCap) 188 : LicmMssaOptCap(LicmMssaOptCap), 189 LicmMssaNoAccForPromotionCap(LicmMssaNoAccForPromotionCap) {} 190 191 private: 192 unsigned LicmMssaOptCap; 193 unsigned LicmMssaNoAccForPromotionCap; 194 195 std::unique_ptr<AliasSetTracker> 196 collectAliasInfoForLoop(Loop *L, LoopInfo *LI, AAResults *AA); 197 std::unique_ptr<AliasSetTracker> 198 collectAliasInfoForLoopWithMSSA(Loop *L, AAResults *AA, 199 MemorySSAUpdater *MSSAU); 200 }; 201 202 struct LegacyLICMPass : public LoopPass { 203 static char ID; // Pass identification, replacement for typeid 204 LegacyLICMPass( 205 unsigned LicmMssaOptCap = SetLicmMssaOptCap, 206 unsigned LicmMssaNoAccForPromotionCap = SetLicmMssaNoAccForPromotionCap) 207 : LoopPass(ID), LICM(LicmMssaOptCap, LicmMssaNoAccForPromotionCap) { 208 initializeLegacyLICMPassPass(*PassRegistry::getPassRegistry()); 209 } 210 211 bool runOnLoop(Loop *L, LPPassManager &LPM) override { 212 if (skipLoop(L)) 213 return false; 214 215 auto *SE = getAnalysisIfAvailable<ScalarEvolutionWrapperPass>(); 216 MemorySSA *MSSA = EnableMSSALoopDependency 217 ? (&getAnalysis<MemorySSAWrapperPass>().getMSSA()) 218 : nullptr; 219 bool hasProfileData = L->getHeader()->getParent()->hasProfileData(); 220 BlockFrequencyInfo *BFI = 221 hasProfileData ? &getAnalysis<LazyBlockFrequencyInfoPass>().getBFI() 222 : nullptr; 223 // For the old PM, we can't use OptimizationRemarkEmitter as an analysis 224 // pass. Function analyses need to be preserved across loop transformations 225 // but ORE cannot be preserved (see comment before the pass definition). 226 OptimizationRemarkEmitter ORE(L->getHeader()->getParent()); 227 return LICM.runOnLoop( 228 L, &getAnalysis<AAResultsWrapperPass>().getAAResults(), 229 &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(), 230 &getAnalysis<DominatorTreeWrapperPass>().getDomTree(), BFI, 231 &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI( 232 *L->getHeader()->getParent()), 233 &getAnalysis<TargetTransformInfoWrapperPass>().getTTI( 234 *L->getHeader()->getParent()), 235 SE ? &SE->getSE() : nullptr, MSSA, &ORE); 236 } 237 238 /// This transformation requires natural loop information & requires that 239 /// loop preheaders be inserted into the CFG... 240 /// 241 void getAnalysisUsage(AnalysisUsage &AU) const override { 242 AU.addPreserved<DominatorTreeWrapperPass>(); 243 AU.addPreserved<LoopInfoWrapperPass>(); 244 AU.addRequired<TargetLibraryInfoWrapperPass>(); 245 if (EnableMSSALoopDependency) { 246 AU.addRequired<MemorySSAWrapperPass>(); 247 AU.addPreserved<MemorySSAWrapperPass>(); 248 } 249 AU.addRequired<TargetTransformInfoWrapperPass>(); 250 getLoopAnalysisUsage(AU); 251 LazyBlockFrequencyInfoPass::getLazyBFIAnalysisUsage(AU); 252 AU.addPreserved<LazyBlockFrequencyInfoPass>(); 253 AU.addPreserved<LazyBranchProbabilityInfoPass>(); 254 } 255 256 private: 257 LoopInvariantCodeMotion LICM; 258 }; 259 } // namespace 260 261 PreservedAnalyses LICMPass::run(Loop &L, LoopAnalysisManager &AM, 262 LoopStandardAnalysisResults &AR, LPMUpdater &) { 263 // For the new PM, we also can't use OptimizationRemarkEmitter as an analysis 264 // pass. Function analyses need to be preserved across loop transformations 265 // but ORE cannot be preserved (see comment before the pass definition). 266 OptimizationRemarkEmitter ORE(L.getHeader()->getParent()); 267 268 LoopInvariantCodeMotion LICM(LicmMssaOptCap, LicmMssaNoAccForPromotionCap); 269 if (!LICM.runOnLoop(&L, &AR.AA, &AR.LI, &AR.DT, AR.BFI, &AR.TLI, &AR.TTI, 270 &AR.SE, AR.MSSA, &ORE)) 271 return PreservedAnalyses::all(); 272 273 auto PA = getLoopPassPreservedAnalyses(); 274 275 PA.preserve<DominatorTreeAnalysis>(); 276 PA.preserve<LoopAnalysis>(); 277 if (AR.MSSA) 278 PA.preserve<MemorySSAAnalysis>(); 279 280 return PA; 281 } 282 283 char LegacyLICMPass::ID = 0; 284 INITIALIZE_PASS_BEGIN(LegacyLICMPass, "licm", "Loop Invariant Code Motion", 285 false, false) 286 INITIALIZE_PASS_DEPENDENCY(LoopPass) 287 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 288 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 289 INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass) 290 INITIALIZE_PASS_DEPENDENCY(LazyBFIPass) 291 INITIALIZE_PASS_END(LegacyLICMPass, "licm", "Loop Invariant Code Motion", false, 292 false) 293 294 Pass *llvm::createLICMPass() { return new LegacyLICMPass(); } 295 Pass *llvm::createLICMPass(unsigned LicmMssaOptCap, 296 unsigned LicmMssaNoAccForPromotionCap) { 297 return new LegacyLICMPass(LicmMssaOptCap, LicmMssaNoAccForPromotionCap); 298 } 299 300 /// Hoist expressions out of the specified loop. Note, alias info for inner 301 /// loop is not preserved so it is not a good idea to run LICM multiple 302 /// times on one loop. 303 bool LoopInvariantCodeMotion::runOnLoop( 304 Loop *L, AAResults *AA, LoopInfo *LI, DominatorTree *DT, 305 BlockFrequencyInfo *BFI, TargetLibraryInfo *TLI, TargetTransformInfo *TTI, 306 ScalarEvolution *SE, MemorySSA *MSSA, OptimizationRemarkEmitter *ORE) { 307 bool Changed = false; 308 309 assert(L->isLCSSAForm(*DT) && "Loop is not in LCSSA form."); 310 311 // If this loop has metadata indicating that LICM is not to be performed then 312 // just exit. 313 if (hasDisableLICMTransformsHint(L)) { 314 return false; 315 } 316 317 std::unique_ptr<AliasSetTracker> CurAST; 318 std::unique_ptr<MemorySSAUpdater> MSSAU; 319 bool NoOfMemAccTooLarge = false; 320 unsigned LicmMssaOptCounter = 0; 321 322 if (!MSSA) { 323 LLVM_DEBUG(dbgs() << "LICM: Using Alias Set Tracker.\n"); 324 CurAST = collectAliasInfoForLoop(L, LI, AA); 325 } else { 326 LLVM_DEBUG(dbgs() << "LICM: Using MemorySSA.\n"); 327 MSSAU = std::make_unique<MemorySSAUpdater>(MSSA); 328 329 unsigned AccessCapCount = 0; 330 for (auto *BB : L->getBlocks()) { 331 if (auto *Accesses = MSSA->getBlockAccesses(BB)) { 332 for (const auto &MA : *Accesses) { 333 (void)MA; 334 AccessCapCount++; 335 if (AccessCapCount > LicmMssaNoAccForPromotionCap) { 336 NoOfMemAccTooLarge = true; 337 break; 338 } 339 } 340 } 341 if (NoOfMemAccTooLarge) 342 break; 343 } 344 } 345 346 // Get the preheader block to move instructions into... 347 BasicBlock *Preheader = L->getLoopPreheader(); 348 349 // Compute loop safety information. 350 ICFLoopSafetyInfo SafetyInfo; 351 SafetyInfo.computeLoopSafetyInfo(L); 352 353 // We want to visit all of the instructions in this loop... that are not parts 354 // of our subloops (they have already had their invariants hoisted out of 355 // their loop, into this loop, so there is no need to process the BODIES of 356 // the subloops). 357 // 358 // Traverse the body of the loop in depth first order on the dominator tree so 359 // that we are guaranteed to see definitions before we see uses. This allows 360 // us to sink instructions in one pass, without iteration. After sinking 361 // instructions, we perform another pass to hoist them out of the loop. 362 SinkAndHoistLICMFlags Flags = {NoOfMemAccTooLarge, LicmMssaOptCounter, 363 LicmMssaOptCap, LicmMssaNoAccForPromotionCap, 364 /*IsSink=*/true}; 365 if (L->hasDedicatedExits()) 366 Changed |= 367 sinkRegion(DT->getNode(L->getHeader()), AA, LI, DT, BFI, TLI, TTI, L, 368 CurAST.get(), MSSAU.get(), &SafetyInfo, Flags, ORE); 369 Flags.IsSink = false; 370 if (Preheader) 371 Changed |= 372 hoistRegion(DT->getNode(L->getHeader()), AA, LI, DT, BFI, TLI, L, 373 CurAST.get(), MSSAU.get(), SE, &SafetyInfo, Flags, ORE); 374 375 // Now that all loop invariants have been removed from the loop, promote any 376 // memory references to scalars that we can. 377 // Don't sink stores from loops without dedicated block exits. Exits 378 // containing indirect branches are not transformed by loop simplify, 379 // make sure we catch that. An additional load may be generated in the 380 // preheader for SSA updater, so also avoid sinking when no preheader 381 // is available. 382 if (!DisablePromotion && Preheader && L->hasDedicatedExits() && 383 !NoOfMemAccTooLarge) { 384 // Figure out the loop exits and their insertion points 385 SmallVector<BasicBlock *, 8> ExitBlocks; 386 L->getUniqueExitBlocks(ExitBlocks); 387 388 // We can't insert into a catchswitch. 389 bool HasCatchSwitch = llvm::any_of(ExitBlocks, [](BasicBlock *Exit) { 390 return isa<CatchSwitchInst>(Exit->getTerminator()); 391 }); 392 393 if (!HasCatchSwitch) { 394 SmallVector<Instruction *, 8> InsertPts; 395 SmallVector<MemoryAccess *, 8> MSSAInsertPts; 396 InsertPts.reserve(ExitBlocks.size()); 397 if (MSSAU) 398 MSSAInsertPts.reserve(ExitBlocks.size()); 399 for (BasicBlock *ExitBlock : ExitBlocks) { 400 InsertPts.push_back(&*ExitBlock->getFirstInsertionPt()); 401 if (MSSAU) 402 MSSAInsertPts.push_back(nullptr); 403 } 404 405 PredIteratorCache PIC; 406 407 bool Promoted = false; 408 409 // Build an AST using MSSA. 410 if (!CurAST.get()) 411 CurAST = collectAliasInfoForLoopWithMSSA(L, AA, MSSAU.get()); 412 413 // Loop over all of the alias sets in the tracker object. 414 for (AliasSet &AS : *CurAST) { 415 // We can promote this alias set if it has a store, if it is a "Must" 416 // alias set, if the pointer is loop invariant, and if we are not 417 // eliminating any volatile loads or stores. 418 if (AS.isForwardingAliasSet() || !AS.isMod() || !AS.isMustAlias() || 419 !L->isLoopInvariant(AS.begin()->getValue())) 420 continue; 421 422 assert( 423 !AS.empty() && 424 "Must alias set should have at least one pointer element in it!"); 425 426 SmallSetVector<Value *, 8> PointerMustAliases; 427 for (const auto &ASI : AS) 428 PointerMustAliases.insert(ASI.getValue()); 429 430 Promoted |= promoteLoopAccessesToScalars( 431 PointerMustAliases, ExitBlocks, InsertPts, MSSAInsertPts, PIC, LI, 432 DT, TLI, L, CurAST.get(), MSSAU.get(), &SafetyInfo, ORE); 433 } 434 435 // Once we have promoted values across the loop body we have to 436 // recursively reform LCSSA as any nested loop may now have values defined 437 // within the loop used in the outer loop. 438 // FIXME: This is really heavy handed. It would be a bit better to use an 439 // SSAUpdater strategy during promotion that was LCSSA aware and reformed 440 // it as it went. 441 if (Promoted) 442 formLCSSARecursively(*L, *DT, LI, SE); 443 444 Changed |= Promoted; 445 } 446 } 447 448 // Check that neither this loop nor its parent have had LCSSA broken. LICM is 449 // specifically moving instructions across the loop boundary and so it is 450 // especially in need of sanity checking here. 451 assert(L->isLCSSAForm(*DT) && "Loop not left in LCSSA form after LICM!"); 452 assert((L->isOutermost() || L->getParentLoop()->isLCSSAForm(*DT)) && 453 "Parent loop not left in LCSSA form after LICM!"); 454 455 if (MSSAU.get() && VerifyMemorySSA) 456 MSSAU->getMemorySSA()->verifyMemorySSA(); 457 458 if (Changed && SE) 459 SE->forgetLoopDispositions(L); 460 return Changed; 461 } 462 463 /// Walk the specified region of the CFG (defined by all blocks dominated by 464 /// the specified block, and that are in the current loop) in reverse depth 465 /// first order w.r.t the DominatorTree. This allows us to visit uses before 466 /// definitions, allowing us to sink a loop body in one pass without iteration. 467 /// 468 bool llvm::sinkRegion(DomTreeNode *N, AAResults *AA, LoopInfo *LI, 469 DominatorTree *DT, BlockFrequencyInfo *BFI, 470 TargetLibraryInfo *TLI, TargetTransformInfo *TTI, 471 Loop *CurLoop, AliasSetTracker *CurAST, 472 MemorySSAUpdater *MSSAU, ICFLoopSafetyInfo *SafetyInfo, 473 SinkAndHoistLICMFlags &Flags, 474 OptimizationRemarkEmitter *ORE) { 475 476 // Verify inputs. 477 assert(N != nullptr && AA != nullptr && LI != nullptr && DT != nullptr && 478 CurLoop != nullptr && SafetyInfo != nullptr && 479 "Unexpected input to sinkRegion."); 480 assert(((CurAST != nullptr) ^ (MSSAU != nullptr)) && 481 "Either AliasSetTracker or MemorySSA should be initialized."); 482 483 // We want to visit children before parents. We will enque all the parents 484 // before their children in the worklist and process the worklist in reverse 485 // order. 486 SmallVector<DomTreeNode *, 16> Worklist = collectChildrenInLoop(N, CurLoop); 487 488 bool Changed = false; 489 for (DomTreeNode *DTN : reverse(Worklist)) { 490 BasicBlock *BB = DTN->getBlock(); 491 // Only need to process the contents of this block if it is not part of a 492 // subloop (which would already have been processed). 493 if (inSubLoop(BB, CurLoop, LI)) 494 continue; 495 496 for (BasicBlock::iterator II = BB->end(); II != BB->begin();) { 497 Instruction &I = *--II; 498 499 // If the instruction is dead, we would try to sink it because it isn't 500 // used in the loop, instead, just delete it. 501 if (isInstructionTriviallyDead(&I, TLI)) { 502 LLVM_DEBUG(dbgs() << "LICM deleting dead inst: " << I << '\n'); 503 salvageKnowledge(&I); 504 salvageDebugInfo(I); 505 ++II; 506 eraseInstruction(I, *SafetyInfo, CurAST, MSSAU); 507 Changed = true; 508 continue; 509 } 510 511 // Check to see if we can sink this instruction to the exit blocks 512 // of the loop. We can do this if the all users of the instruction are 513 // outside of the loop. In this case, it doesn't even matter if the 514 // operands of the instruction are loop invariant. 515 // 516 bool FreeInLoop = false; 517 if (!I.mayHaveSideEffects() && 518 isNotUsedOrFreeInLoop(I, CurLoop, SafetyInfo, TTI, FreeInLoop) && 519 canSinkOrHoistInst(I, AA, DT, CurLoop, CurAST, MSSAU, true, &Flags, 520 ORE)) { 521 if (sink(I, LI, DT, BFI, CurLoop, SafetyInfo, MSSAU, ORE)) { 522 if (!FreeInLoop) { 523 ++II; 524 salvageDebugInfo(I); 525 eraseInstruction(I, *SafetyInfo, CurAST, MSSAU); 526 } 527 Changed = true; 528 } 529 } 530 } 531 } 532 if (MSSAU && VerifyMemorySSA) 533 MSSAU->getMemorySSA()->verifyMemorySSA(); 534 return Changed; 535 } 536 537 namespace { 538 // This is a helper class for hoistRegion to make it able to hoist control flow 539 // in order to be able to hoist phis. The way this works is that we initially 540 // start hoisting to the loop preheader, and when we see a loop invariant branch 541 // we make note of this. When we then come to hoist an instruction that's 542 // conditional on such a branch we duplicate the branch and the relevant control 543 // flow, then hoist the instruction into the block corresponding to its original 544 // block in the duplicated control flow. 545 class ControlFlowHoister { 546 private: 547 // Information about the loop we are hoisting from 548 LoopInfo *LI; 549 DominatorTree *DT; 550 Loop *CurLoop; 551 MemorySSAUpdater *MSSAU; 552 553 // A map of blocks in the loop to the block their instructions will be hoisted 554 // to. 555 DenseMap<BasicBlock *, BasicBlock *> HoistDestinationMap; 556 557 // The branches that we can hoist, mapped to the block that marks a 558 // convergence point of their control flow. 559 DenseMap<BranchInst *, BasicBlock *> HoistableBranches; 560 561 public: 562 ControlFlowHoister(LoopInfo *LI, DominatorTree *DT, Loop *CurLoop, 563 MemorySSAUpdater *MSSAU) 564 : LI(LI), DT(DT), CurLoop(CurLoop), MSSAU(MSSAU) {} 565 566 void registerPossiblyHoistableBranch(BranchInst *BI) { 567 // We can only hoist conditional branches with loop invariant operands. 568 if (!ControlFlowHoisting || !BI->isConditional() || 569 !CurLoop->hasLoopInvariantOperands(BI)) 570 return; 571 572 // The branch destinations need to be in the loop, and we don't gain 573 // anything by duplicating conditional branches with duplicate successors, 574 // as it's essentially the same as an unconditional branch. 575 BasicBlock *TrueDest = BI->getSuccessor(0); 576 BasicBlock *FalseDest = BI->getSuccessor(1); 577 if (!CurLoop->contains(TrueDest) || !CurLoop->contains(FalseDest) || 578 TrueDest == FalseDest) 579 return; 580 581 // We can hoist BI if one branch destination is the successor of the other, 582 // or both have common successor which we check by seeing if the 583 // intersection of their successors is non-empty. 584 // TODO: This could be expanded to allowing branches where both ends 585 // eventually converge to a single block. 586 SmallPtrSet<BasicBlock *, 4> TrueDestSucc, FalseDestSucc; 587 TrueDestSucc.insert(succ_begin(TrueDest), succ_end(TrueDest)); 588 FalseDestSucc.insert(succ_begin(FalseDest), succ_end(FalseDest)); 589 BasicBlock *CommonSucc = nullptr; 590 if (TrueDestSucc.count(FalseDest)) { 591 CommonSucc = FalseDest; 592 } else if (FalseDestSucc.count(TrueDest)) { 593 CommonSucc = TrueDest; 594 } else { 595 set_intersect(TrueDestSucc, FalseDestSucc); 596 // If there's one common successor use that. 597 if (TrueDestSucc.size() == 1) 598 CommonSucc = *TrueDestSucc.begin(); 599 // If there's more than one pick whichever appears first in the block list 600 // (we can't use the value returned by TrueDestSucc.begin() as it's 601 // unpredicatable which element gets returned). 602 else if (!TrueDestSucc.empty()) { 603 Function *F = TrueDest->getParent(); 604 auto IsSucc = [&](BasicBlock &BB) { return TrueDestSucc.count(&BB); }; 605 auto It = std::find_if(F->begin(), F->end(), IsSucc); 606 assert(It != F->end() && "Could not find successor in function"); 607 CommonSucc = &*It; 608 } 609 } 610 // The common successor has to be dominated by the branch, as otherwise 611 // there will be some other path to the successor that will not be 612 // controlled by this branch so any phi we hoist would be controlled by the 613 // wrong condition. This also takes care of avoiding hoisting of loop back 614 // edges. 615 // TODO: In some cases this could be relaxed if the successor is dominated 616 // by another block that's been hoisted and we can guarantee that the 617 // control flow has been replicated exactly. 618 if (CommonSucc && DT->dominates(BI, CommonSucc)) 619 HoistableBranches[BI] = CommonSucc; 620 } 621 622 bool canHoistPHI(PHINode *PN) { 623 // The phi must have loop invariant operands. 624 if (!ControlFlowHoisting || !CurLoop->hasLoopInvariantOperands(PN)) 625 return false; 626 // We can hoist phis if the block they are in is the target of hoistable 627 // branches which cover all of the predecessors of the block. 628 SmallPtrSet<BasicBlock *, 8> PredecessorBlocks; 629 BasicBlock *BB = PN->getParent(); 630 for (BasicBlock *PredBB : predecessors(BB)) 631 PredecessorBlocks.insert(PredBB); 632 // If we have less predecessor blocks than predecessors then the phi will 633 // have more than one incoming value for the same block which we can't 634 // handle. 635 // TODO: This could be handled be erasing some of the duplicate incoming 636 // values. 637 if (PredecessorBlocks.size() != pred_size(BB)) 638 return false; 639 for (auto &Pair : HoistableBranches) { 640 if (Pair.second == BB) { 641 // Which blocks are predecessors via this branch depends on if the 642 // branch is triangle-like or diamond-like. 643 if (Pair.first->getSuccessor(0) == BB) { 644 PredecessorBlocks.erase(Pair.first->getParent()); 645 PredecessorBlocks.erase(Pair.first->getSuccessor(1)); 646 } else if (Pair.first->getSuccessor(1) == BB) { 647 PredecessorBlocks.erase(Pair.first->getParent()); 648 PredecessorBlocks.erase(Pair.first->getSuccessor(0)); 649 } else { 650 PredecessorBlocks.erase(Pair.first->getSuccessor(0)); 651 PredecessorBlocks.erase(Pair.first->getSuccessor(1)); 652 } 653 } 654 } 655 // PredecessorBlocks will now be empty if for every predecessor of BB we 656 // found a hoistable branch source. 657 return PredecessorBlocks.empty(); 658 } 659 660 BasicBlock *getOrCreateHoistedBlock(BasicBlock *BB) { 661 if (!ControlFlowHoisting) 662 return CurLoop->getLoopPreheader(); 663 // If BB has already been hoisted, return that 664 if (HoistDestinationMap.count(BB)) 665 return HoistDestinationMap[BB]; 666 667 // Check if this block is conditional based on a pending branch 668 auto HasBBAsSuccessor = 669 [&](DenseMap<BranchInst *, BasicBlock *>::value_type &Pair) { 670 return BB != Pair.second && (Pair.first->getSuccessor(0) == BB || 671 Pair.first->getSuccessor(1) == BB); 672 }; 673 auto It = std::find_if(HoistableBranches.begin(), HoistableBranches.end(), 674 HasBBAsSuccessor); 675 676 // If not involved in a pending branch, hoist to preheader 677 BasicBlock *InitialPreheader = CurLoop->getLoopPreheader(); 678 if (It == HoistableBranches.end()) { 679 LLVM_DEBUG(dbgs() << "LICM using " << InitialPreheader->getName() 680 << " as hoist destination for " << BB->getName() 681 << "\n"); 682 HoistDestinationMap[BB] = InitialPreheader; 683 return InitialPreheader; 684 } 685 BranchInst *BI = It->first; 686 assert(std::find_if(++It, HoistableBranches.end(), HasBBAsSuccessor) == 687 HoistableBranches.end() && 688 "BB is expected to be the target of at most one branch"); 689 690 LLVMContext &C = BB->getContext(); 691 BasicBlock *TrueDest = BI->getSuccessor(0); 692 BasicBlock *FalseDest = BI->getSuccessor(1); 693 BasicBlock *CommonSucc = HoistableBranches[BI]; 694 BasicBlock *HoistTarget = getOrCreateHoistedBlock(BI->getParent()); 695 696 // Create hoisted versions of blocks that currently don't have them 697 auto CreateHoistedBlock = [&](BasicBlock *Orig) { 698 if (HoistDestinationMap.count(Orig)) 699 return HoistDestinationMap[Orig]; 700 BasicBlock *New = 701 BasicBlock::Create(C, Orig->getName() + ".licm", Orig->getParent()); 702 HoistDestinationMap[Orig] = New; 703 DT->addNewBlock(New, HoistTarget); 704 if (CurLoop->getParentLoop()) 705 CurLoop->getParentLoop()->addBasicBlockToLoop(New, *LI); 706 ++NumCreatedBlocks; 707 LLVM_DEBUG(dbgs() << "LICM created " << New->getName() 708 << " as hoist destination for " << Orig->getName() 709 << "\n"); 710 return New; 711 }; 712 BasicBlock *HoistTrueDest = CreateHoistedBlock(TrueDest); 713 BasicBlock *HoistFalseDest = CreateHoistedBlock(FalseDest); 714 BasicBlock *HoistCommonSucc = CreateHoistedBlock(CommonSucc); 715 716 // Link up these blocks with branches. 717 if (!HoistCommonSucc->getTerminator()) { 718 // The new common successor we've generated will branch to whatever that 719 // hoist target branched to. 720 BasicBlock *TargetSucc = HoistTarget->getSingleSuccessor(); 721 assert(TargetSucc && "Expected hoist target to have a single successor"); 722 HoistCommonSucc->moveBefore(TargetSucc); 723 BranchInst::Create(TargetSucc, HoistCommonSucc); 724 } 725 if (!HoistTrueDest->getTerminator()) { 726 HoistTrueDest->moveBefore(HoistCommonSucc); 727 BranchInst::Create(HoistCommonSucc, HoistTrueDest); 728 } 729 if (!HoistFalseDest->getTerminator()) { 730 HoistFalseDest->moveBefore(HoistCommonSucc); 731 BranchInst::Create(HoistCommonSucc, HoistFalseDest); 732 } 733 734 // If BI is being cloned to what was originally the preheader then 735 // HoistCommonSucc will now be the new preheader. 736 if (HoistTarget == InitialPreheader) { 737 // Phis in the loop header now need to use the new preheader. 738 InitialPreheader->replaceSuccessorsPhiUsesWith(HoistCommonSucc); 739 if (MSSAU) 740 MSSAU->wireOldPredecessorsToNewImmediatePredecessor( 741 HoistTarget->getSingleSuccessor(), HoistCommonSucc, {HoistTarget}); 742 // The new preheader dominates the loop header. 743 DomTreeNode *PreheaderNode = DT->getNode(HoistCommonSucc); 744 DomTreeNode *HeaderNode = DT->getNode(CurLoop->getHeader()); 745 DT->changeImmediateDominator(HeaderNode, PreheaderNode); 746 // The preheader hoist destination is now the new preheader, with the 747 // exception of the hoist destination of this branch. 748 for (auto &Pair : HoistDestinationMap) 749 if (Pair.second == InitialPreheader && Pair.first != BI->getParent()) 750 Pair.second = HoistCommonSucc; 751 } 752 753 // Now finally clone BI. 754 ReplaceInstWithInst( 755 HoistTarget->getTerminator(), 756 BranchInst::Create(HoistTrueDest, HoistFalseDest, BI->getCondition())); 757 ++NumClonedBranches; 758 759 assert(CurLoop->getLoopPreheader() && 760 "Hoisting blocks should not have destroyed preheader"); 761 return HoistDestinationMap[BB]; 762 } 763 }; 764 } // namespace 765 766 // Hoisting/sinking instruction out of a loop isn't always beneficial. It's only 767 // only worthwhile if the destination block is actually colder than current 768 // block. 769 static bool worthSinkOrHoistInst(Instruction &I, BasicBlock *DstBlock, 770 OptimizationRemarkEmitter *ORE, 771 BlockFrequencyInfo *BFI) { 772 // Check block frequency only when runtime profile is available 773 // to avoid pathological cases. With static profile, lean towards 774 // hosting because it helps canonicalize the loop for vectorizer. 775 if (!DstBlock->getParent()->hasProfileData()) 776 return true; 777 778 if (!HoistSinkColdnessThreshold || !BFI) 779 return true; 780 781 BasicBlock *SrcBlock = I.getParent(); 782 if (BFI->getBlockFreq(DstBlock).getFrequency() / HoistSinkColdnessThreshold > 783 BFI->getBlockFreq(SrcBlock).getFrequency()) { 784 ORE->emit([&]() { 785 return OptimizationRemarkMissed(DEBUG_TYPE, "SinkHoistInst", &I) 786 << "failed to sink or hoist instruction because containing block " 787 "has lower frequency than destination block"; 788 }); 789 return false; 790 } 791 792 return true; 793 } 794 795 /// Walk the specified region of the CFG (defined by all blocks dominated by 796 /// the specified block, and that are in the current loop) in depth first 797 /// order w.r.t the DominatorTree. This allows us to visit definitions before 798 /// uses, allowing us to hoist a loop body in one pass without iteration. 799 /// 800 bool llvm::hoistRegion(DomTreeNode *N, AAResults *AA, LoopInfo *LI, 801 DominatorTree *DT, BlockFrequencyInfo *BFI, 802 TargetLibraryInfo *TLI, Loop *CurLoop, 803 AliasSetTracker *CurAST, MemorySSAUpdater *MSSAU, 804 ScalarEvolution *SE, ICFLoopSafetyInfo *SafetyInfo, 805 SinkAndHoistLICMFlags &Flags, 806 OptimizationRemarkEmitter *ORE) { 807 // Verify inputs. 808 assert(N != nullptr && AA != nullptr && LI != nullptr && DT != nullptr && 809 CurLoop != nullptr && SafetyInfo != nullptr && 810 "Unexpected input to hoistRegion."); 811 assert(((CurAST != nullptr) ^ (MSSAU != nullptr)) && 812 "Either AliasSetTracker or MemorySSA should be initialized."); 813 814 ControlFlowHoister CFH(LI, DT, CurLoop, MSSAU); 815 816 // Keep track of instructions that have been hoisted, as they may need to be 817 // re-hoisted if they end up not dominating all of their uses. 818 SmallVector<Instruction *, 16> HoistedInstructions; 819 820 // For PHI hoisting to work we need to hoist blocks before their successors. 821 // We can do this by iterating through the blocks in the loop in reverse 822 // post-order. 823 LoopBlocksRPO Worklist(CurLoop); 824 Worklist.perform(LI); 825 bool Changed = false; 826 for (BasicBlock *BB : Worklist) { 827 // Only need to process the contents of this block if it is not part of a 828 // subloop (which would already have been processed). 829 if (inSubLoop(BB, CurLoop, LI)) 830 continue; 831 832 for (BasicBlock::iterator II = BB->begin(), E = BB->end(); II != E;) { 833 Instruction &I = *II++; 834 // Try constant folding this instruction. If all the operands are 835 // constants, it is technically hoistable, but it would be better to 836 // just fold it. 837 if (Constant *C = ConstantFoldInstruction( 838 &I, I.getModule()->getDataLayout(), TLI)) { 839 LLVM_DEBUG(dbgs() << "LICM folding inst: " << I << " --> " << *C 840 << '\n'); 841 if (CurAST) 842 CurAST->copyValue(&I, C); 843 // FIXME MSSA: Such replacements may make accesses unoptimized (D51960). 844 I.replaceAllUsesWith(C); 845 if (isInstructionTriviallyDead(&I, TLI)) 846 eraseInstruction(I, *SafetyInfo, CurAST, MSSAU); 847 Changed = true; 848 continue; 849 } 850 851 // Try hoisting the instruction out to the preheader. We can only do 852 // this if all of the operands of the instruction are loop invariant and 853 // if it is safe to hoist the instruction. We also check block frequency 854 // to make sure instruction only gets hoisted into colder blocks. 855 // TODO: It may be safe to hoist if we are hoisting to a conditional block 856 // and we have accurately duplicated the control flow from the loop header 857 // to that block. 858 if (CurLoop->hasLoopInvariantOperands(&I) && 859 canSinkOrHoistInst(I, AA, DT, CurLoop, CurAST, MSSAU, true, &Flags, 860 ORE) && 861 worthSinkOrHoistInst(I, CurLoop->getLoopPreheader(), ORE, BFI) && 862 isSafeToExecuteUnconditionally( 863 I, DT, CurLoop, SafetyInfo, ORE, 864 CurLoop->getLoopPreheader()->getTerminator())) { 865 hoist(I, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB), SafetyInfo, 866 MSSAU, SE, ORE); 867 HoistedInstructions.push_back(&I); 868 Changed = true; 869 continue; 870 } 871 872 // Attempt to remove floating point division out of the loop by 873 // converting it to a reciprocal multiplication. 874 if (I.getOpcode() == Instruction::FDiv && I.hasAllowReciprocal() && 875 CurLoop->isLoopInvariant(I.getOperand(1))) { 876 auto Divisor = I.getOperand(1); 877 auto One = llvm::ConstantFP::get(Divisor->getType(), 1.0); 878 auto ReciprocalDivisor = BinaryOperator::CreateFDiv(One, Divisor); 879 ReciprocalDivisor->setFastMathFlags(I.getFastMathFlags()); 880 SafetyInfo->insertInstructionTo(ReciprocalDivisor, I.getParent()); 881 ReciprocalDivisor->insertBefore(&I); 882 883 auto Product = 884 BinaryOperator::CreateFMul(I.getOperand(0), ReciprocalDivisor); 885 Product->setFastMathFlags(I.getFastMathFlags()); 886 SafetyInfo->insertInstructionTo(Product, I.getParent()); 887 Product->insertAfter(&I); 888 I.replaceAllUsesWith(Product); 889 eraseInstruction(I, *SafetyInfo, CurAST, MSSAU); 890 891 hoist(*ReciprocalDivisor, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB), 892 SafetyInfo, MSSAU, SE, ORE); 893 HoistedInstructions.push_back(ReciprocalDivisor); 894 Changed = true; 895 continue; 896 } 897 898 auto IsInvariantStart = [&](Instruction &I) { 899 using namespace PatternMatch; 900 return I.use_empty() && 901 match(&I, m_Intrinsic<Intrinsic::invariant_start>()); 902 }; 903 auto MustExecuteWithoutWritesBefore = [&](Instruction &I) { 904 return SafetyInfo->isGuaranteedToExecute(I, DT, CurLoop) && 905 SafetyInfo->doesNotWriteMemoryBefore(I, CurLoop); 906 }; 907 if ((IsInvariantStart(I) || isGuard(&I)) && 908 CurLoop->hasLoopInvariantOperands(&I) && 909 MustExecuteWithoutWritesBefore(I)) { 910 hoist(I, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB), SafetyInfo, 911 MSSAU, SE, ORE); 912 HoistedInstructions.push_back(&I); 913 Changed = true; 914 continue; 915 } 916 917 if (PHINode *PN = dyn_cast<PHINode>(&I)) { 918 if (CFH.canHoistPHI(PN)) { 919 // Redirect incoming blocks first to ensure that we create hoisted 920 // versions of those blocks before we hoist the phi. 921 for (unsigned int i = 0; i < PN->getNumIncomingValues(); ++i) 922 PN->setIncomingBlock( 923 i, CFH.getOrCreateHoistedBlock(PN->getIncomingBlock(i))); 924 hoist(*PN, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB), SafetyInfo, 925 MSSAU, SE, ORE); 926 assert(DT->dominates(PN, BB) && "Conditional PHIs not expected"); 927 Changed = true; 928 continue; 929 } 930 } 931 932 // Remember possibly hoistable branches so we can actually hoist them 933 // later if needed. 934 if (BranchInst *BI = dyn_cast<BranchInst>(&I)) 935 CFH.registerPossiblyHoistableBranch(BI); 936 } 937 } 938 939 // If we hoisted instructions to a conditional block they may not dominate 940 // their uses that weren't hoisted (such as phis where some operands are not 941 // loop invariant). If so make them unconditional by moving them to their 942 // immediate dominator. We iterate through the instructions in reverse order 943 // which ensures that when we rehoist an instruction we rehoist its operands, 944 // and also keep track of where in the block we are rehoisting to to make sure 945 // that we rehoist instructions before the instructions that use them. 946 Instruction *HoistPoint = nullptr; 947 if (ControlFlowHoisting) { 948 for (Instruction *I : reverse(HoistedInstructions)) { 949 if (!llvm::all_of(I->uses(), 950 [&](Use &U) { return DT->dominates(I, U); })) { 951 BasicBlock *Dominator = 952 DT->getNode(I->getParent())->getIDom()->getBlock(); 953 if (!HoistPoint || !DT->dominates(HoistPoint->getParent(), Dominator)) { 954 if (HoistPoint) 955 assert(DT->dominates(Dominator, HoistPoint->getParent()) && 956 "New hoist point expected to dominate old hoist point"); 957 HoistPoint = Dominator->getTerminator(); 958 } 959 LLVM_DEBUG(dbgs() << "LICM rehoisting to " 960 << HoistPoint->getParent()->getName() 961 << ": " << *I << "\n"); 962 moveInstructionBefore(*I, *HoistPoint, *SafetyInfo, MSSAU, SE); 963 HoistPoint = I; 964 Changed = true; 965 } 966 } 967 } 968 if (MSSAU && VerifyMemorySSA) 969 MSSAU->getMemorySSA()->verifyMemorySSA(); 970 971 // Now that we've finished hoisting make sure that LI and DT are still 972 // valid. 973 #ifdef EXPENSIVE_CHECKS 974 if (Changed) { 975 assert(DT->verify(DominatorTree::VerificationLevel::Fast) && 976 "Dominator tree verification failed"); 977 LI->verify(*DT); 978 } 979 #endif 980 981 return Changed; 982 } 983 984 // Return true if LI is invariant within scope of the loop. LI is invariant if 985 // CurLoop is dominated by an invariant.start representing the same memory 986 // location and size as the memory location LI loads from, and also the 987 // invariant.start has no uses. 988 static bool isLoadInvariantInLoop(LoadInst *LI, DominatorTree *DT, 989 Loop *CurLoop) { 990 Value *Addr = LI->getOperand(0); 991 const DataLayout &DL = LI->getModule()->getDataLayout(); 992 const TypeSize LocSizeInBits = DL.getTypeSizeInBits(LI->getType()); 993 994 // It is not currently possible for clang to generate an invariant.start 995 // intrinsic with scalable vector types because we don't support thread local 996 // sizeless types and we don't permit sizeless types in structs or classes. 997 // Furthermore, even if support is added for this in future the intrinsic 998 // itself is defined to have a size of -1 for variable sized objects. This 999 // makes it impossible to verify if the intrinsic envelops our region of 1000 // interest. For example, both <vscale x 32 x i8> and <vscale x 16 x i8> 1001 // types would have a -1 parameter, but the former is clearly double the size 1002 // of the latter. 1003 if (LocSizeInBits.isScalable()) 1004 return false; 1005 1006 // if the type is i8 addrspace(x)*, we know this is the type of 1007 // llvm.invariant.start operand 1008 auto *PtrInt8Ty = PointerType::get(Type::getInt8Ty(LI->getContext()), 1009 LI->getPointerAddressSpace()); 1010 unsigned BitcastsVisited = 0; 1011 // Look through bitcasts until we reach the i8* type (this is invariant.start 1012 // operand type). 1013 while (Addr->getType() != PtrInt8Ty) { 1014 auto *BC = dyn_cast<BitCastInst>(Addr); 1015 // Avoid traversing high number of bitcast uses. 1016 if (++BitcastsVisited > MaxNumUsesTraversed || !BC) 1017 return false; 1018 Addr = BC->getOperand(0); 1019 } 1020 1021 unsigned UsesVisited = 0; 1022 // Traverse all uses of the load operand value, to see if invariant.start is 1023 // one of the uses, and whether it dominates the load instruction. 1024 for (auto *U : Addr->users()) { 1025 // Avoid traversing for Load operand with high number of users. 1026 if (++UsesVisited > MaxNumUsesTraversed) 1027 return false; 1028 IntrinsicInst *II = dyn_cast<IntrinsicInst>(U); 1029 // If there are escaping uses of invariant.start instruction, the load maybe 1030 // non-invariant. 1031 if (!II || II->getIntrinsicID() != Intrinsic::invariant_start || 1032 !II->use_empty()) 1033 continue; 1034 ConstantInt *InvariantSize = cast<ConstantInt>(II->getArgOperand(0)); 1035 // The intrinsic supports having a -1 argument for variable sized objects 1036 // so we should check for that here. 1037 if (InvariantSize->isNegative()) 1038 continue; 1039 uint64_t InvariantSizeInBits = InvariantSize->getSExtValue() * 8; 1040 // Confirm the invariant.start location size contains the load operand size 1041 // in bits. Also, the invariant.start should dominate the load, and we 1042 // should not hoist the load out of a loop that contains this dominating 1043 // invariant.start. 1044 if (LocSizeInBits.getFixedSize() <= InvariantSizeInBits && 1045 DT->properlyDominates(II->getParent(), CurLoop->getHeader())) 1046 return true; 1047 } 1048 1049 return false; 1050 } 1051 1052 namespace { 1053 /// Return true if-and-only-if we know how to (mechanically) both hoist and 1054 /// sink a given instruction out of a loop. Does not address legality 1055 /// concerns such as aliasing or speculation safety. 1056 bool isHoistableAndSinkableInst(Instruction &I) { 1057 // Only these instructions are hoistable/sinkable. 1058 return (isa<LoadInst>(I) || isa<StoreInst>(I) || isa<CallInst>(I) || 1059 isa<FenceInst>(I) || isa<CastInst>(I) || isa<UnaryOperator>(I) || 1060 isa<BinaryOperator>(I) || isa<SelectInst>(I) || 1061 isa<GetElementPtrInst>(I) || isa<CmpInst>(I) || 1062 isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) || 1063 isa<ShuffleVectorInst>(I) || isa<ExtractValueInst>(I) || 1064 isa<InsertValueInst>(I) || isa<FreezeInst>(I)); 1065 } 1066 /// Return true if all of the alias sets within this AST are known not to 1067 /// contain a Mod, or if MSSA knows thare are no MemoryDefs in the loop. 1068 bool isReadOnly(AliasSetTracker *CurAST, const MemorySSAUpdater *MSSAU, 1069 const Loop *L) { 1070 if (CurAST) { 1071 for (AliasSet &AS : *CurAST) { 1072 if (!AS.isForwardingAliasSet() && AS.isMod()) { 1073 return false; 1074 } 1075 } 1076 return true; 1077 } else { /*MSSAU*/ 1078 for (auto *BB : L->getBlocks()) 1079 if (MSSAU->getMemorySSA()->getBlockDefs(BB)) 1080 return false; 1081 return true; 1082 } 1083 } 1084 1085 /// Return true if I is the only Instruction with a MemoryAccess in L. 1086 bool isOnlyMemoryAccess(const Instruction *I, const Loop *L, 1087 const MemorySSAUpdater *MSSAU) { 1088 for (auto *BB : L->getBlocks()) 1089 if (auto *Accs = MSSAU->getMemorySSA()->getBlockAccesses(BB)) { 1090 int NotAPhi = 0; 1091 for (const auto &Acc : *Accs) { 1092 if (isa<MemoryPhi>(&Acc)) 1093 continue; 1094 const auto *MUD = cast<MemoryUseOrDef>(&Acc); 1095 if (MUD->getMemoryInst() != I || NotAPhi++ == 1) 1096 return false; 1097 } 1098 } 1099 return true; 1100 } 1101 } 1102 1103 bool llvm::canSinkOrHoistInst(Instruction &I, AAResults *AA, DominatorTree *DT, 1104 Loop *CurLoop, AliasSetTracker *CurAST, 1105 MemorySSAUpdater *MSSAU, 1106 bool TargetExecutesOncePerLoop, 1107 SinkAndHoistLICMFlags *Flags, 1108 OptimizationRemarkEmitter *ORE) { 1109 assert(((CurAST != nullptr) ^ (MSSAU != nullptr)) && 1110 "Either AliasSetTracker or MemorySSA should be initialized."); 1111 1112 // If we don't understand the instruction, bail early. 1113 if (!isHoistableAndSinkableInst(I)) 1114 return false; 1115 1116 MemorySSA *MSSA = MSSAU ? MSSAU->getMemorySSA() : nullptr; 1117 if (MSSA) 1118 assert(Flags != nullptr && "Flags cannot be null."); 1119 1120 // Loads have extra constraints we have to verify before we can hoist them. 1121 if (LoadInst *LI = dyn_cast<LoadInst>(&I)) { 1122 if (!LI->isUnordered()) 1123 return false; // Don't sink/hoist volatile or ordered atomic loads! 1124 1125 // Loads from constant memory are always safe to move, even if they end up 1126 // in the same alias set as something that ends up being modified. 1127 if (AA->pointsToConstantMemory(LI->getOperand(0))) 1128 return true; 1129 if (LI->hasMetadata(LLVMContext::MD_invariant_load)) 1130 return true; 1131 1132 if (LI->isAtomic() && !TargetExecutesOncePerLoop) 1133 return false; // Don't risk duplicating unordered loads 1134 1135 // This checks for an invariant.start dominating the load. 1136 if (isLoadInvariantInLoop(LI, DT, CurLoop)) 1137 return true; 1138 1139 bool Invalidated; 1140 if (CurAST) 1141 Invalidated = pointerInvalidatedByLoop(MemoryLocation::get(LI), CurAST, 1142 CurLoop, AA); 1143 else 1144 Invalidated = pointerInvalidatedByLoopWithMSSA( 1145 MSSA, cast<MemoryUse>(MSSA->getMemoryAccess(LI)), CurLoop, *Flags); 1146 // Check loop-invariant address because this may also be a sinkable load 1147 // whose address is not necessarily loop-invariant. 1148 if (ORE && Invalidated && CurLoop->isLoopInvariant(LI->getPointerOperand())) 1149 ORE->emit([&]() { 1150 return OptimizationRemarkMissed( 1151 DEBUG_TYPE, "LoadWithLoopInvariantAddressInvalidated", LI) 1152 << "failed to move load with loop-invariant address " 1153 "because the loop may invalidate its value"; 1154 }); 1155 1156 return !Invalidated; 1157 } else if (CallInst *CI = dyn_cast<CallInst>(&I)) { 1158 // Don't sink or hoist dbg info; it's legal, but not useful. 1159 if (isa<DbgInfoIntrinsic>(I)) 1160 return false; 1161 1162 // Don't sink calls which can throw. 1163 if (CI->mayThrow()) 1164 return false; 1165 1166 using namespace PatternMatch; 1167 if (match(CI, m_Intrinsic<Intrinsic::assume>())) 1168 // Assumes don't actually alias anything or throw 1169 return true; 1170 1171 if (match(CI, m_Intrinsic<Intrinsic::experimental_widenable_condition>())) 1172 // Widenable conditions don't actually alias anything or throw 1173 return true; 1174 1175 // Handle simple cases by querying alias analysis. 1176 FunctionModRefBehavior Behavior = AA->getModRefBehavior(CI); 1177 if (Behavior == FMRB_DoesNotAccessMemory) 1178 return true; 1179 if (AAResults::onlyReadsMemory(Behavior)) { 1180 // A readonly argmemonly function only reads from memory pointed to by 1181 // it's arguments with arbitrary offsets. If we can prove there are no 1182 // writes to this memory in the loop, we can hoist or sink. 1183 if (AAResults::onlyAccessesArgPointees(Behavior)) { 1184 // TODO: expand to writeable arguments 1185 for (Value *Op : CI->arg_operands()) 1186 if (Op->getType()->isPointerTy()) { 1187 bool Invalidated; 1188 if (CurAST) 1189 Invalidated = pointerInvalidatedByLoop( 1190 MemoryLocation(Op, LocationSize::unknown(), AAMDNodes()), 1191 CurAST, CurLoop, AA); 1192 else 1193 Invalidated = pointerInvalidatedByLoopWithMSSA( 1194 MSSA, cast<MemoryUse>(MSSA->getMemoryAccess(CI)), CurLoop, 1195 *Flags); 1196 if (Invalidated) 1197 return false; 1198 } 1199 return true; 1200 } 1201 1202 // If this call only reads from memory and there are no writes to memory 1203 // in the loop, we can hoist or sink the call as appropriate. 1204 if (isReadOnly(CurAST, MSSAU, CurLoop)) 1205 return true; 1206 } 1207 1208 // FIXME: This should use mod/ref information to see if we can hoist or 1209 // sink the call. 1210 1211 return false; 1212 } else if (auto *FI = dyn_cast<FenceInst>(&I)) { 1213 // Fences alias (most) everything to provide ordering. For the moment, 1214 // just give up if there are any other memory operations in the loop. 1215 if (CurAST) { 1216 auto Begin = CurAST->begin(); 1217 assert(Begin != CurAST->end() && "must contain FI"); 1218 if (std::next(Begin) != CurAST->end()) 1219 // constant memory for instance, TODO: handle better 1220 return false; 1221 auto *UniqueI = Begin->getUniqueInstruction(); 1222 if (!UniqueI) 1223 // other memory op, give up 1224 return false; 1225 (void)FI; // suppress unused variable warning 1226 assert(UniqueI == FI && "AS must contain FI"); 1227 return true; 1228 } else // MSSAU 1229 return isOnlyMemoryAccess(FI, CurLoop, MSSAU); 1230 } else if (auto *SI = dyn_cast<StoreInst>(&I)) { 1231 if (!SI->isUnordered()) 1232 return false; // Don't sink/hoist volatile or ordered atomic store! 1233 1234 // We can only hoist a store that we can prove writes a value which is not 1235 // read or overwritten within the loop. For those cases, we fallback to 1236 // load store promotion instead. TODO: We can extend this to cases where 1237 // there is exactly one write to the location and that write dominates an 1238 // arbitrary number of reads in the loop. 1239 if (CurAST) { 1240 auto &AS = CurAST->getAliasSetFor(MemoryLocation::get(SI)); 1241 1242 if (AS.isRef() || !AS.isMustAlias()) 1243 // Quick exit test, handled by the full path below as well. 1244 return false; 1245 auto *UniqueI = AS.getUniqueInstruction(); 1246 if (!UniqueI) 1247 // other memory op, give up 1248 return false; 1249 assert(UniqueI == SI && "AS must contain SI"); 1250 return true; 1251 } else { // MSSAU 1252 if (isOnlyMemoryAccess(SI, CurLoop, MSSAU)) 1253 return true; 1254 // If there are more accesses than the Promotion cap, give up, we're not 1255 // walking a list that long. 1256 if (Flags->NoOfMemAccTooLarge) 1257 return false; 1258 // Check store only if there's still "quota" to check clobber. 1259 if (Flags->LicmMssaOptCounter >= Flags->LicmMssaOptCap) 1260 return false; 1261 // If there are interfering Uses (i.e. their defining access is in the 1262 // loop), or ordered loads (stored as Defs!), don't move this store. 1263 // Could do better here, but this is conservatively correct. 1264 // TODO: Cache set of Uses on the first walk in runOnLoop, update when 1265 // moving accesses. Can also extend to dominating uses. 1266 auto *SIMD = MSSA->getMemoryAccess(SI); 1267 for (auto *BB : CurLoop->getBlocks()) 1268 if (auto *Accesses = MSSA->getBlockAccesses(BB)) { 1269 for (const auto &MA : *Accesses) 1270 if (const auto *MU = dyn_cast<MemoryUse>(&MA)) { 1271 auto *MD = MU->getDefiningAccess(); 1272 if (!MSSA->isLiveOnEntryDef(MD) && 1273 CurLoop->contains(MD->getBlock())) 1274 return false; 1275 // Disable hoisting past potentially interfering loads. Optimized 1276 // Uses may point to an access outside the loop, as getClobbering 1277 // checks the previous iteration when walking the backedge. 1278 // FIXME: More precise: no Uses that alias SI. 1279 if (!Flags->IsSink && !MSSA->dominates(SIMD, MU)) 1280 return false; 1281 } else if (const auto *MD = dyn_cast<MemoryDef>(&MA)) { 1282 if (auto *LI = dyn_cast<LoadInst>(MD->getMemoryInst())) { 1283 (void)LI; // Silence warning. 1284 assert(!LI->isUnordered() && "Expected unordered load"); 1285 return false; 1286 } 1287 // Any call, while it may not be clobbering SI, it may be a use. 1288 if (auto *CI = dyn_cast<CallInst>(MD->getMemoryInst())) { 1289 // Check if the call may read from the memory locattion written 1290 // to by SI. Check CI's attributes and arguments; the number of 1291 // such checks performed is limited above by NoOfMemAccTooLarge. 1292 ModRefInfo MRI = AA->getModRefInfo(CI, MemoryLocation::get(SI)); 1293 if (isModOrRefSet(MRI)) 1294 return false; 1295 } 1296 } 1297 } 1298 1299 auto *Source = MSSA->getSkipSelfWalker()->getClobberingMemoryAccess(SI); 1300 Flags->LicmMssaOptCounter++; 1301 // If there are no clobbering Defs in the loop, store is safe to hoist. 1302 return MSSA->isLiveOnEntryDef(Source) || 1303 !CurLoop->contains(Source->getBlock()); 1304 } 1305 } 1306 1307 assert(!I.mayReadOrWriteMemory() && "unhandled aliasing"); 1308 1309 // We've established mechanical ability and aliasing, it's up to the caller 1310 // to check fault safety 1311 return true; 1312 } 1313 1314 /// Returns true if a PHINode is a trivially replaceable with an 1315 /// Instruction. 1316 /// This is true when all incoming values are that instruction. 1317 /// This pattern occurs most often with LCSSA PHI nodes. 1318 /// 1319 static bool isTriviallyReplaceablePHI(const PHINode &PN, const Instruction &I) { 1320 for (const Value *IncValue : PN.incoming_values()) 1321 if (IncValue != &I) 1322 return false; 1323 1324 return true; 1325 } 1326 1327 /// Return true if the instruction is free in the loop. 1328 static bool isFreeInLoop(const Instruction &I, const Loop *CurLoop, 1329 const TargetTransformInfo *TTI) { 1330 1331 if (const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(&I)) { 1332 if (TTI->getUserCost(GEP, TargetTransformInfo::TCK_SizeAndLatency) != 1333 TargetTransformInfo::TCC_Free) 1334 return false; 1335 // For a GEP, we cannot simply use getUserCost because currently it 1336 // optimistically assume that a GEP will fold into addressing mode 1337 // regardless of its users. 1338 const BasicBlock *BB = GEP->getParent(); 1339 for (const User *U : GEP->users()) { 1340 const Instruction *UI = cast<Instruction>(U); 1341 if (CurLoop->contains(UI) && 1342 (BB != UI->getParent() || 1343 (!isa<StoreInst>(UI) && !isa<LoadInst>(UI)))) 1344 return false; 1345 } 1346 return true; 1347 } else 1348 return TTI->getUserCost(&I, TargetTransformInfo::TCK_SizeAndLatency) == 1349 TargetTransformInfo::TCC_Free; 1350 } 1351 1352 /// Return true if the only users of this instruction are outside of 1353 /// the loop. If this is true, we can sink the instruction to the exit 1354 /// blocks of the loop. 1355 /// 1356 /// We also return true if the instruction could be folded away in lowering. 1357 /// (e.g., a GEP can be folded into a load as an addressing mode in the loop). 1358 static bool isNotUsedOrFreeInLoop(const Instruction &I, const Loop *CurLoop, 1359 const LoopSafetyInfo *SafetyInfo, 1360 TargetTransformInfo *TTI, bool &FreeInLoop) { 1361 const auto &BlockColors = SafetyInfo->getBlockColors(); 1362 bool IsFree = isFreeInLoop(I, CurLoop, TTI); 1363 for (const User *U : I.users()) { 1364 const Instruction *UI = cast<Instruction>(U); 1365 if (const PHINode *PN = dyn_cast<PHINode>(UI)) { 1366 const BasicBlock *BB = PN->getParent(); 1367 // We cannot sink uses in catchswitches. 1368 if (isa<CatchSwitchInst>(BB->getTerminator())) 1369 return false; 1370 1371 // We need to sink a callsite to a unique funclet. Avoid sinking if the 1372 // phi use is too muddled. 1373 if (isa<CallInst>(I)) 1374 if (!BlockColors.empty() && 1375 BlockColors.find(const_cast<BasicBlock *>(BB))->second.size() != 1) 1376 return false; 1377 } 1378 1379 if (CurLoop->contains(UI)) { 1380 if (IsFree) { 1381 FreeInLoop = true; 1382 continue; 1383 } 1384 return false; 1385 } 1386 } 1387 return true; 1388 } 1389 1390 static Instruction *cloneInstructionInExitBlock( 1391 Instruction &I, BasicBlock &ExitBlock, PHINode &PN, const LoopInfo *LI, 1392 const LoopSafetyInfo *SafetyInfo, MemorySSAUpdater *MSSAU) { 1393 Instruction *New; 1394 if (auto *CI = dyn_cast<CallInst>(&I)) { 1395 const auto &BlockColors = SafetyInfo->getBlockColors(); 1396 1397 // Sinking call-sites need to be handled differently from other 1398 // instructions. The cloned call-site needs a funclet bundle operand 1399 // appropriate for its location in the CFG. 1400 SmallVector<OperandBundleDef, 1> OpBundles; 1401 for (unsigned BundleIdx = 0, BundleEnd = CI->getNumOperandBundles(); 1402 BundleIdx != BundleEnd; ++BundleIdx) { 1403 OperandBundleUse Bundle = CI->getOperandBundleAt(BundleIdx); 1404 if (Bundle.getTagID() == LLVMContext::OB_funclet) 1405 continue; 1406 1407 OpBundles.emplace_back(Bundle); 1408 } 1409 1410 if (!BlockColors.empty()) { 1411 const ColorVector &CV = BlockColors.find(&ExitBlock)->second; 1412 assert(CV.size() == 1 && "non-unique color for exit block!"); 1413 BasicBlock *BBColor = CV.front(); 1414 Instruction *EHPad = BBColor->getFirstNonPHI(); 1415 if (EHPad->isEHPad()) 1416 OpBundles.emplace_back("funclet", EHPad); 1417 } 1418 1419 New = CallInst::Create(CI, OpBundles); 1420 } else { 1421 New = I.clone(); 1422 } 1423 1424 ExitBlock.getInstList().insert(ExitBlock.getFirstInsertionPt(), New); 1425 if (!I.getName().empty()) 1426 New->setName(I.getName() + ".le"); 1427 1428 if (MSSAU && MSSAU->getMemorySSA()->getMemoryAccess(&I)) { 1429 // Create a new MemoryAccess and let MemorySSA set its defining access. 1430 MemoryAccess *NewMemAcc = MSSAU->createMemoryAccessInBB( 1431 New, nullptr, New->getParent(), MemorySSA::Beginning); 1432 if (NewMemAcc) { 1433 if (auto *MemDef = dyn_cast<MemoryDef>(NewMemAcc)) 1434 MSSAU->insertDef(MemDef, /*RenameUses=*/true); 1435 else { 1436 auto *MemUse = cast<MemoryUse>(NewMemAcc); 1437 MSSAU->insertUse(MemUse, /*RenameUses=*/true); 1438 } 1439 } 1440 } 1441 1442 // Build LCSSA PHI nodes for any in-loop operands. Note that this is 1443 // particularly cheap because we can rip off the PHI node that we're 1444 // replacing for the number and blocks of the predecessors. 1445 // OPT: If this shows up in a profile, we can instead finish sinking all 1446 // invariant instructions, and then walk their operands to re-establish 1447 // LCSSA. That will eliminate creating PHI nodes just to nuke them when 1448 // sinking bottom-up. 1449 for (User::op_iterator OI = New->op_begin(), OE = New->op_end(); OI != OE; 1450 ++OI) 1451 if (Instruction *OInst = dyn_cast<Instruction>(*OI)) 1452 if (Loop *OLoop = LI->getLoopFor(OInst->getParent())) 1453 if (!OLoop->contains(&PN)) { 1454 PHINode *OpPN = 1455 PHINode::Create(OInst->getType(), PN.getNumIncomingValues(), 1456 OInst->getName() + ".lcssa", &ExitBlock.front()); 1457 for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i) 1458 OpPN->addIncoming(OInst, PN.getIncomingBlock(i)); 1459 *OI = OpPN; 1460 } 1461 return New; 1462 } 1463 1464 static void eraseInstruction(Instruction &I, ICFLoopSafetyInfo &SafetyInfo, 1465 AliasSetTracker *AST, MemorySSAUpdater *MSSAU) { 1466 if (AST) 1467 AST->deleteValue(&I); 1468 if (MSSAU) 1469 MSSAU->removeMemoryAccess(&I); 1470 SafetyInfo.removeInstruction(&I); 1471 I.eraseFromParent(); 1472 } 1473 1474 static void moveInstructionBefore(Instruction &I, Instruction &Dest, 1475 ICFLoopSafetyInfo &SafetyInfo, 1476 MemorySSAUpdater *MSSAU, 1477 ScalarEvolution *SE) { 1478 SafetyInfo.removeInstruction(&I); 1479 SafetyInfo.insertInstructionTo(&I, Dest.getParent()); 1480 I.moveBefore(&Dest); 1481 if (MSSAU) 1482 if (MemoryUseOrDef *OldMemAcc = cast_or_null<MemoryUseOrDef>( 1483 MSSAU->getMemorySSA()->getMemoryAccess(&I))) 1484 MSSAU->moveToPlace(OldMemAcc, Dest.getParent(), 1485 MemorySSA::BeforeTerminator); 1486 if (SE) 1487 SE->forgetValue(&I); 1488 } 1489 1490 static Instruction *sinkThroughTriviallyReplaceablePHI( 1491 PHINode *TPN, Instruction *I, LoopInfo *LI, 1492 SmallDenseMap<BasicBlock *, Instruction *, 32> &SunkCopies, 1493 const LoopSafetyInfo *SafetyInfo, const Loop *CurLoop, 1494 MemorySSAUpdater *MSSAU) { 1495 assert(isTriviallyReplaceablePHI(*TPN, *I) && 1496 "Expect only trivially replaceable PHI"); 1497 BasicBlock *ExitBlock = TPN->getParent(); 1498 Instruction *New; 1499 auto It = SunkCopies.find(ExitBlock); 1500 if (It != SunkCopies.end()) 1501 New = It->second; 1502 else 1503 New = SunkCopies[ExitBlock] = cloneInstructionInExitBlock( 1504 *I, *ExitBlock, *TPN, LI, SafetyInfo, MSSAU); 1505 return New; 1506 } 1507 1508 static bool canSplitPredecessors(PHINode *PN, LoopSafetyInfo *SafetyInfo) { 1509 BasicBlock *BB = PN->getParent(); 1510 if (!BB->canSplitPredecessors()) 1511 return false; 1512 // It's not impossible to split EHPad blocks, but if BlockColors already exist 1513 // it require updating BlockColors for all offspring blocks accordingly. By 1514 // skipping such corner case, we can make updating BlockColors after splitting 1515 // predecessor fairly simple. 1516 if (!SafetyInfo->getBlockColors().empty() && BB->getFirstNonPHI()->isEHPad()) 1517 return false; 1518 for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) { 1519 BasicBlock *BBPred = *PI; 1520 if (isa<IndirectBrInst>(BBPred->getTerminator()) || 1521 isa<CallBrInst>(BBPred->getTerminator())) 1522 return false; 1523 } 1524 return true; 1525 } 1526 1527 static void splitPredecessorsOfLoopExit(PHINode *PN, DominatorTree *DT, 1528 LoopInfo *LI, const Loop *CurLoop, 1529 LoopSafetyInfo *SafetyInfo, 1530 MemorySSAUpdater *MSSAU) { 1531 #ifndef NDEBUG 1532 SmallVector<BasicBlock *, 32> ExitBlocks; 1533 CurLoop->getUniqueExitBlocks(ExitBlocks); 1534 SmallPtrSet<BasicBlock *, 32> ExitBlockSet(ExitBlocks.begin(), 1535 ExitBlocks.end()); 1536 #endif 1537 BasicBlock *ExitBB = PN->getParent(); 1538 assert(ExitBlockSet.count(ExitBB) && "Expect the PHI is in an exit block."); 1539 1540 // Split predecessors of the loop exit to make instructions in the loop are 1541 // exposed to exit blocks through trivially replaceable PHIs while keeping the 1542 // loop in the canonical form where each predecessor of each exit block should 1543 // be contained within the loop. For example, this will convert the loop below 1544 // from 1545 // 1546 // LB1: 1547 // %v1 = 1548 // br %LE, %LB2 1549 // LB2: 1550 // %v2 = 1551 // br %LE, %LB1 1552 // LE: 1553 // %p = phi [%v1, %LB1], [%v2, %LB2] <-- non-trivially replaceable 1554 // 1555 // to 1556 // 1557 // LB1: 1558 // %v1 = 1559 // br %LE.split, %LB2 1560 // LB2: 1561 // %v2 = 1562 // br %LE.split2, %LB1 1563 // LE.split: 1564 // %p1 = phi [%v1, %LB1] <-- trivially replaceable 1565 // br %LE 1566 // LE.split2: 1567 // %p2 = phi [%v2, %LB2] <-- trivially replaceable 1568 // br %LE 1569 // LE: 1570 // %p = phi [%p1, %LE.split], [%p2, %LE.split2] 1571 // 1572 const auto &BlockColors = SafetyInfo->getBlockColors(); 1573 SmallSetVector<BasicBlock *, 8> PredBBs(pred_begin(ExitBB), pred_end(ExitBB)); 1574 while (!PredBBs.empty()) { 1575 BasicBlock *PredBB = *PredBBs.begin(); 1576 assert(CurLoop->contains(PredBB) && 1577 "Expect all predecessors are in the loop"); 1578 if (PN->getBasicBlockIndex(PredBB) >= 0) { 1579 BasicBlock *NewPred = SplitBlockPredecessors( 1580 ExitBB, PredBB, ".split.loop.exit", DT, LI, MSSAU, true); 1581 // Since we do not allow splitting EH-block with BlockColors in 1582 // canSplitPredecessors(), we can simply assign predecessor's color to 1583 // the new block. 1584 if (!BlockColors.empty()) 1585 // Grab a reference to the ColorVector to be inserted before getting the 1586 // reference to the vector we are copying because inserting the new 1587 // element in BlockColors might cause the map to be reallocated. 1588 SafetyInfo->copyColors(NewPred, PredBB); 1589 } 1590 PredBBs.remove(PredBB); 1591 } 1592 } 1593 1594 /// When an instruction is found to only be used outside of the loop, this 1595 /// function moves it to the exit blocks and patches up SSA form as needed. 1596 /// This method is guaranteed to remove the original instruction from its 1597 /// position, and may either delete it or move it to outside of the loop. 1598 /// 1599 static bool sink(Instruction &I, LoopInfo *LI, DominatorTree *DT, 1600 BlockFrequencyInfo *BFI, const Loop *CurLoop, 1601 ICFLoopSafetyInfo *SafetyInfo, MemorySSAUpdater *MSSAU, 1602 OptimizationRemarkEmitter *ORE) { 1603 LLVM_DEBUG(dbgs() << "LICM sinking instruction: " << I << "\n"); 1604 ORE->emit([&]() { 1605 return OptimizationRemark(DEBUG_TYPE, "InstSunk", &I) 1606 << "sinking " << ore::NV("Inst", &I); 1607 }); 1608 bool Changed = false; 1609 if (isa<LoadInst>(I)) 1610 ++NumMovedLoads; 1611 else if (isa<CallInst>(I)) 1612 ++NumMovedCalls; 1613 ++NumSunk; 1614 1615 // Iterate over users to be ready for actual sinking. Replace users via 1616 // unreachable blocks with undef and make all user PHIs trivially replaceable. 1617 SmallPtrSet<Instruction *, 8> VisitedUsers; 1618 for (Value::user_iterator UI = I.user_begin(), UE = I.user_end(); UI != UE;) { 1619 auto *User = cast<Instruction>(*UI); 1620 Use &U = UI.getUse(); 1621 ++UI; 1622 1623 if (VisitedUsers.count(User) || CurLoop->contains(User)) 1624 continue; 1625 1626 if (!DT->isReachableFromEntry(User->getParent())) { 1627 U = UndefValue::get(I.getType()); 1628 Changed = true; 1629 continue; 1630 } 1631 1632 // The user must be a PHI node. 1633 PHINode *PN = cast<PHINode>(User); 1634 1635 // Surprisingly, instructions can be used outside of loops without any 1636 // exits. This can only happen in PHI nodes if the incoming block is 1637 // unreachable. 1638 BasicBlock *BB = PN->getIncomingBlock(U); 1639 if (!DT->isReachableFromEntry(BB)) { 1640 U = UndefValue::get(I.getType()); 1641 Changed = true; 1642 continue; 1643 } 1644 1645 VisitedUsers.insert(PN); 1646 if (isTriviallyReplaceablePHI(*PN, I)) 1647 continue; 1648 1649 if (!canSplitPredecessors(PN, SafetyInfo)) 1650 return Changed; 1651 1652 // Split predecessors of the PHI so that we can make users trivially 1653 // replaceable. 1654 splitPredecessorsOfLoopExit(PN, DT, LI, CurLoop, SafetyInfo, MSSAU); 1655 1656 // Should rebuild the iterators, as they may be invalidated by 1657 // splitPredecessorsOfLoopExit(). 1658 UI = I.user_begin(); 1659 UE = I.user_end(); 1660 } 1661 1662 if (VisitedUsers.empty()) 1663 return Changed; 1664 1665 #ifndef NDEBUG 1666 SmallVector<BasicBlock *, 32> ExitBlocks; 1667 CurLoop->getUniqueExitBlocks(ExitBlocks); 1668 SmallPtrSet<BasicBlock *, 32> ExitBlockSet(ExitBlocks.begin(), 1669 ExitBlocks.end()); 1670 #endif 1671 1672 // Clones of this instruction. Don't create more than one per exit block! 1673 SmallDenseMap<BasicBlock *, Instruction *, 32> SunkCopies; 1674 1675 // If this instruction is only used outside of the loop, then all users are 1676 // PHI nodes in exit blocks due to LCSSA form. Just RAUW them with clones of 1677 // the instruction. 1678 // First check if I is worth sinking for all uses. Sink only when it is worth 1679 // across all uses. 1680 SmallSetVector<User*, 8> Users(I.user_begin(), I.user_end()); 1681 SmallVector<PHINode *, 8> ExitPNs; 1682 for (auto *UI : Users) { 1683 auto *User = cast<Instruction>(UI); 1684 1685 if (CurLoop->contains(User)) 1686 continue; 1687 1688 PHINode *PN = cast<PHINode>(User); 1689 assert(ExitBlockSet.count(PN->getParent()) && 1690 "The LCSSA PHI is not in an exit block!"); 1691 if (!worthSinkOrHoistInst(I, PN->getParent(), ORE, BFI)) { 1692 return Changed; 1693 } 1694 1695 ExitPNs.push_back(PN); 1696 } 1697 1698 for (auto *PN : ExitPNs) { 1699 1700 // The PHI must be trivially replaceable. 1701 Instruction *New = sinkThroughTriviallyReplaceablePHI( 1702 PN, &I, LI, SunkCopies, SafetyInfo, CurLoop, MSSAU); 1703 PN->replaceAllUsesWith(New); 1704 eraseInstruction(*PN, *SafetyInfo, nullptr, nullptr); 1705 Changed = true; 1706 } 1707 return Changed; 1708 } 1709 1710 /// When an instruction is found to only use loop invariant operands that 1711 /// is safe to hoist, this instruction is called to do the dirty work. 1712 /// 1713 static void hoist(Instruction &I, const DominatorTree *DT, const Loop *CurLoop, 1714 BasicBlock *Dest, ICFLoopSafetyInfo *SafetyInfo, 1715 MemorySSAUpdater *MSSAU, ScalarEvolution *SE, 1716 OptimizationRemarkEmitter *ORE) { 1717 LLVM_DEBUG(dbgs() << "LICM hoisting to " << Dest->getName() << ": " << I 1718 << "\n"); 1719 ORE->emit([&]() { 1720 return OptimizationRemark(DEBUG_TYPE, "Hoisted", &I) << "hoisting " 1721 << ore::NV("Inst", &I); 1722 }); 1723 1724 // Metadata can be dependent on conditions we are hoisting above. 1725 // Conservatively strip all metadata on the instruction unless we were 1726 // guaranteed to execute I if we entered the loop, in which case the metadata 1727 // is valid in the loop preheader. 1728 if (I.hasMetadataOtherThanDebugLoc() && 1729 // The check on hasMetadataOtherThanDebugLoc is to prevent us from burning 1730 // time in isGuaranteedToExecute if we don't actually have anything to 1731 // drop. It is a compile time optimization, not required for correctness. 1732 !SafetyInfo->isGuaranteedToExecute(I, DT, CurLoop)) 1733 I.dropUnknownNonDebugMetadata(); 1734 1735 if (isa<PHINode>(I)) 1736 // Move the new node to the end of the phi list in the destination block. 1737 moveInstructionBefore(I, *Dest->getFirstNonPHI(), *SafetyInfo, MSSAU, SE); 1738 else 1739 // Move the new node to the destination block, before its terminator. 1740 moveInstructionBefore(I, *Dest->getTerminator(), *SafetyInfo, MSSAU, SE); 1741 1742 I.updateLocationAfterHoist(); 1743 1744 if (isa<LoadInst>(I)) 1745 ++NumMovedLoads; 1746 else if (isa<CallInst>(I)) 1747 ++NumMovedCalls; 1748 ++NumHoisted; 1749 } 1750 1751 /// Only sink or hoist an instruction if it is not a trapping instruction, 1752 /// or if the instruction is known not to trap when moved to the preheader. 1753 /// or if it is a trapping instruction and is guaranteed to execute. 1754 static bool isSafeToExecuteUnconditionally(Instruction &Inst, 1755 const DominatorTree *DT, 1756 const Loop *CurLoop, 1757 const LoopSafetyInfo *SafetyInfo, 1758 OptimizationRemarkEmitter *ORE, 1759 const Instruction *CtxI) { 1760 if (isSafeToSpeculativelyExecute(&Inst, CtxI, DT)) 1761 return true; 1762 1763 bool GuaranteedToExecute = 1764 SafetyInfo->isGuaranteedToExecute(Inst, DT, CurLoop); 1765 1766 if (!GuaranteedToExecute) { 1767 auto *LI = dyn_cast<LoadInst>(&Inst); 1768 if (LI && CurLoop->isLoopInvariant(LI->getPointerOperand())) 1769 ORE->emit([&]() { 1770 return OptimizationRemarkMissed( 1771 DEBUG_TYPE, "LoadWithLoopInvariantAddressCondExecuted", LI) 1772 << "failed to hoist load with loop-invariant address " 1773 "because load is conditionally executed"; 1774 }); 1775 } 1776 1777 return GuaranteedToExecute; 1778 } 1779 1780 namespace { 1781 class LoopPromoter : public LoadAndStorePromoter { 1782 Value *SomePtr; // Designated pointer to store to. 1783 const SmallSetVector<Value *, 8> &PointerMustAliases; 1784 SmallVectorImpl<BasicBlock *> &LoopExitBlocks; 1785 SmallVectorImpl<Instruction *> &LoopInsertPts; 1786 SmallVectorImpl<MemoryAccess *> &MSSAInsertPts; 1787 PredIteratorCache &PredCache; 1788 AliasSetTracker *AST; 1789 MemorySSAUpdater *MSSAU; 1790 LoopInfo &LI; 1791 DebugLoc DL; 1792 int Alignment; 1793 bool UnorderedAtomic; 1794 AAMDNodes AATags; 1795 ICFLoopSafetyInfo &SafetyInfo; 1796 1797 Value *maybeInsertLCSSAPHI(Value *V, BasicBlock *BB) const { 1798 if (Instruction *I = dyn_cast<Instruction>(V)) 1799 if (Loop *L = LI.getLoopFor(I->getParent())) 1800 if (!L->contains(BB)) { 1801 // We need to create an LCSSA PHI node for the incoming value and 1802 // store that. 1803 PHINode *PN = PHINode::Create(I->getType(), PredCache.size(BB), 1804 I->getName() + ".lcssa", &BB->front()); 1805 for (BasicBlock *Pred : PredCache.get(BB)) 1806 PN->addIncoming(I, Pred); 1807 return PN; 1808 } 1809 return V; 1810 } 1811 1812 public: 1813 LoopPromoter(Value *SP, ArrayRef<const Instruction *> Insts, SSAUpdater &S, 1814 const SmallSetVector<Value *, 8> &PMA, 1815 SmallVectorImpl<BasicBlock *> &LEB, 1816 SmallVectorImpl<Instruction *> &LIP, 1817 SmallVectorImpl<MemoryAccess *> &MSSAIP, PredIteratorCache &PIC, 1818 AliasSetTracker *ast, MemorySSAUpdater *MSSAU, LoopInfo &li, 1819 DebugLoc dl, int alignment, bool UnorderedAtomic, 1820 const AAMDNodes &AATags, ICFLoopSafetyInfo &SafetyInfo) 1821 : LoadAndStorePromoter(Insts, S), SomePtr(SP), PointerMustAliases(PMA), 1822 LoopExitBlocks(LEB), LoopInsertPts(LIP), MSSAInsertPts(MSSAIP), 1823 PredCache(PIC), AST(ast), MSSAU(MSSAU), LI(li), DL(std::move(dl)), 1824 Alignment(alignment), UnorderedAtomic(UnorderedAtomic), AATags(AATags), 1825 SafetyInfo(SafetyInfo) {} 1826 1827 bool isInstInList(Instruction *I, 1828 const SmallVectorImpl<Instruction *> &) const override { 1829 Value *Ptr; 1830 if (LoadInst *LI = dyn_cast<LoadInst>(I)) 1831 Ptr = LI->getOperand(0); 1832 else 1833 Ptr = cast<StoreInst>(I)->getPointerOperand(); 1834 return PointerMustAliases.count(Ptr); 1835 } 1836 1837 void doExtraRewritesBeforeFinalDeletion() override { 1838 // Insert stores after in the loop exit blocks. Each exit block gets a 1839 // store of the live-out values that feed them. Since we've already told 1840 // the SSA updater about the defs in the loop and the preheader 1841 // definition, it is all set and we can start using it. 1842 for (unsigned i = 0, e = LoopExitBlocks.size(); i != e; ++i) { 1843 BasicBlock *ExitBlock = LoopExitBlocks[i]; 1844 Value *LiveInValue = SSA.GetValueInMiddleOfBlock(ExitBlock); 1845 LiveInValue = maybeInsertLCSSAPHI(LiveInValue, ExitBlock); 1846 Value *Ptr = maybeInsertLCSSAPHI(SomePtr, ExitBlock); 1847 Instruction *InsertPos = LoopInsertPts[i]; 1848 StoreInst *NewSI = new StoreInst(LiveInValue, Ptr, InsertPos); 1849 if (UnorderedAtomic) 1850 NewSI->setOrdering(AtomicOrdering::Unordered); 1851 NewSI->setAlignment(Align(Alignment)); 1852 NewSI->setDebugLoc(DL); 1853 if (AATags) 1854 NewSI->setAAMetadata(AATags); 1855 1856 if (MSSAU) { 1857 MemoryAccess *MSSAInsertPoint = MSSAInsertPts[i]; 1858 MemoryAccess *NewMemAcc; 1859 if (!MSSAInsertPoint) { 1860 NewMemAcc = MSSAU->createMemoryAccessInBB( 1861 NewSI, nullptr, NewSI->getParent(), MemorySSA::Beginning); 1862 } else { 1863 NewMemAcc = 1864 MSSAU->createMemoryAccessAfter(NewSI, nullptr, MSSAInsertPoint); 1865 } 1866 MSSAInsertPts[i] = NewMemAcc; 1867 MSSAU->insertDef(cast<MemoryDef>(NewMemAcc), true); 1868 // FIXME: true for safety, false may still be correct. 1869 } 1870 } 1871 } 1872 1873 void replaceLoadWithValue(LoadInst *LI, Value *V) const override { 1874 // Update alias analysis. 1875 if (AST) 1876 AST->copyValue(LI, V); 1877 } 1878 void instructionDeleted(Instruction *I) const override { 1879 SafetyInfo.removeInstruction(I); 1880 if (AST) 1881 AST->deleteValue(I); 1882 if (MSSAU) 1883 MSSAU->removeMemoryAccess(I); 1884 } 1885 }; 1886 1887 1888 /// Return true iff we can prove that a caller of this function can not inspect 1889 /// the contents of the provided object in a well defined program. 1890 bool isKnownNonEscaping(Value *Object, const TargetLibraryInfo *TLI) { 1891 if (isa<AllocaInst>(Object)) 1892 // Since the alloca goes out of scope, we know the caller can't retain a 1893 // reference to it and be well defined. Thus, we don't need to check for 1894 // capture. 1895 return true; 1896 1897 // For all other objects we need to know that the caller can't possibly 1898 // have gotten a reference to the object. There are two components of 1899 // that: 1900 // 1) Object can't be escaped by this function. This is what 1901 // PointerMayBeCaptured checks. 1902 // 2) Object can't have been captured at definition site. For this, we 1903 // need to know the return value is noalias. At the moment, we use a 1904 // weaker condition and handle only AllocLikeFunctions (which are 1905 // known to be noalias). TODO 1906 return isAllocLikeFn(Object, TLI) && 1907 !PointerMayBeCaptured(Object, true, true); 1908 } 1909 1910 } // namespace 1911 1912 /// Try to promote memory values to scalars by sinking stores out of the 1913 /// loop and moving loads to before the loop. We do this by looping over 1914 /// the stores in the loop, looking for stores to Must pointers which are 1915 /// loop invariant. 1916 /// 1917 bool llvm::promoteLoopAccessesToScalars( 1918 const SmallSetVector<Value *, 8> &PointerMustAliases, 1919 SmallVectorImpl<BasicBlock *> &ExitBlocks, 1920 SmallVectorImpl<Instruction *> &InsertPts, 1921 SmallVectorImpl<MemoryAccess *> &MSSAInsertPts, PredIteratorCache &PIC, 1922 LoopInfo *LI, DominatorTree *DT, const TargetLibraryInfo *TLI, 1923 Loop *CurLoop, AliasSetTracker *CurAST, MemorySSAUpdater *MSSAU, 1924 ICFLoopSafetyInfo *SafetyInfo, OptimizationRemarkEmitter *ORE) { 1925 // Verify inputs. 1926 assert(LI != nullptr && DT != nullptr && CurLoop != nullptr && 1927 SafetyInfo != nullptr && 1928 "Unexpected Input to promoteLoopAccessesToScalars"); 1929 1930 Value *SomePtr = *PointerMustAliases.begin(); 1931 BasicBlock *Preheader = CurLoop->getLoopPreheader(); 1932 1933 // It is not safe to promote a load/store from the loop if the load/store is 1934 // conditional. For example, turning: 1935 // 1936 // for () { if (c) *P += 1; } 1937 // 1938 // into: 1939 // 1940 // tmp = *P; for () { if (c) tmp +=1; } *P = tmp; 1941 // 1942 // is not safe, because *P may only be valid to access if 'c' is true. 1943 // 1944 // The safety property divides into two parts: 1945 // p1) The memory may not be dereferenceable on entry to the loop. In this 1946 // case, we can't insert the required load in the preheader. 1947 // p2) The memory model does not allow us to insert a store along any dynamic 1948 // path which did not originally have one. 1949 // 1950 // If at least one store is guaranteed to execute, both properties are 1951 // satisfied, and promotion is legal. 1952 // 1953 // This, however, is not a necessary condition. Even if no store/load is 1954 // guaranteed to execute, we can still establish these properties. 1955 // We can establish (p1) by proving that hoisting the load into the preheader 1956 // is safe (i.e. proving dereferenceability on all paths through the loop). We 1957 // can use any access within the alias set to prove dereferenceability, 1958 // since they're all must alias. 1959 // 1960 // There are two ways establish (p2): 1961 // a) Prove the location is thread-local. In this case the memory model 1962 // requirement does not apply, and stores are safe to insert. 1963 // b) Prove a store dominates every exit block. In this case, if an exit 1964 // blocks is reached, the original dynamic path would have taken us through 1965 // the store, so inserting a store into the exit block is safe. Note that this 1966 // is different from the store being guaranteed to execute. For instance, 1967 // if an exception is thrown on the first iteration of the loop, the original 1968 // store is never executed, but the exit blocks are not executed either. 1969 1970 bool DereferenceableInPH = false; 1971 bool SafeToInsertStore = false; 1972 1973 SmallVector<Instruction *, 64> LoopUses; 1974 1975 // We start with an alignment of one and try to find instructions that allow 1976 // us to prove better alignment. 1977 Align Alignment; 1978 // Keep track of which types of access we see 1979 bool SawUnorderedAtomic = false; 1980 bool SawNotAtomic = false; 1981 AAMDNodes AATags; 1982 1983 const DataLayout &MDL = Preheader->getModule()->getDataLayout(); 1984 1985 bool IsKnownThreadLocalObject = false; 1986 if (SafetyInfo->anyBlockMayThrow()) { 1987 // If a loop can throw, we have to insert a store along each unwind edge. 1988 // That said, we can't actually make the unwind edge explicit. Therefore, 1989 // we have to prove that the store is dead along the unwind edge. We do 1990 // this by proving that the caller can't have a reference to the object 1991 // after return and thus can't possibly load from the object. 1992 Value *Object = getUnderlyingObject(SomePtr); 1993 if (!isKnownNonEscaping(Object, TLI)) 1994 return false; 1995 // Subtlety: Alloca's aren't visible to callers, but *are* potentially 1996 // visible to other threads if captured and used during their lifetimes. 1997 IsKnownThreadLocalObject = !isa<AllocaInst>(Object); 1998 } 1999 2000 // Check that all of the pointers in the alias set have the same type. We 2001 // cannot (yet) promote a memory location that is loaded and stored in 2002 // different sizes. While we are at it, collect alignment and AA info. 2003 for (Value *ASIV : PointerMustAliases) { 2004 // Check that all of the pointers in the alias set have the same type. We 2005 // cannot (yet) promote a memory location that is loaded and stored in 2006 // different sizes. 2007 if (SomePtr->getType() != ASIV->getType()) 2008 return false; 2009 2010 for (User *U : ASIV->users()) { 2011 // Ignore instructions that are outside the loop. 2012 Instruction *UI = dyn_cast<Instruction>(U); 2013 if (!UI || !CurLoop->contains(UI)) 2014 continue; 2015 2016 // If there is an non-load/store instruction in the loop, we can't promote 2017 // it. 2018 if (LoadInst *Load = dyn_cast<LoadInst>(UI)) { 2019 if (!Load->isUnordered()) 2020 return false; 2021 2022 SawUnorderedAtomic |= Load->isAtomic(); 2023 SawNotAtomic |= !Load->isAtomic(); 2024 2025 Align InstAlignment = Load->getAlign(); 2026 2027 // Note that proving a load safe to speculate requires proving 2028 // sufficient alignment at the target location. Proving it guaranteed 2029 // to execute does as well. Thus we can increase our guaranteed 2030 // alignment as well. 2031 if (!DereferenceableInPH || (InstAlignment > Alignment)) 2032 if (isSafeToExecuteUnconditionally(*Load, DT, CurLoop, SafetyInfo, 2033 ORE, Preheader->getTerminator())) { 2034 DereferenceableInPH = true; 2035 Alignment = std::max(Alignment, InstAlignment); 2036 } 2037 } else if (const StoreInst *Store = dyn_cast<StoreInst>(UI)) { 2038 // Stores *of* the pointer are not interesting, only stores *to* the 2039 // pointer. 2040 if (UI->getOperand(1) != ASIV) 2041 continue; 2042 if (!Store->isUnordered()) 2043 return false; 2044 2045 SawUnorderedAtomic |= Store->isAtomic(); 2046 SawNotAtomic |= !Store->isAtomic(); 2047 2048 // If the store is guaranteed to execute, both properties are satisfied. 2049 // We may want to check if a store is guaranteed to execute even if we 2050 // already know that promotion is safe, since it may have higher 2051 // alignment than any other guaranteed stores, in which case we can 2052 // raise the alignment on the promoted store. 2053 Align InstAlignment = Store->getAlign(); 2054 2055 if (!DereferenceableInPH || !SafeToInsertStore || 2056 (InstAlignment > Alignment)) { 2057 if (SafetyInfo->isGuaranteedToExecute(*UI, DT, CurLoop)) { 2058 DereferenceableInPH = true; 2059 SafeToInsertStore = true; 2060 Alignment = std::max(Alignment, InstAlignment); 2061 } 2062 } 2063 2064 // If a store dominates all exit blocks, it is safe to sink. 2065 // As explained above, if an exit block was executed, a dominating 2066 // store must have been executed at least once, so we are not 2067 // introducing stores on paths that did not have them. 2068 // Note that this only looks at explicit exit blocks. If we ever 2069 // start sinking stores into unwind edges (see above), this will break. 2070 if (!SafeToInsertStore) 2071 SafeToInsertStore = llvm::all_of(ExitBlocks, [&](BasicBlock *Exit) { 2072 return DT->dominates(Store->getParent(), Exit); 2073 }); 2074 2075 // If the store is not guaranteed to execute, we may still get 2076 // deref info through it. 2077 if (!DereferenceableInPH) { 2078 DereferenceableInPH = isDereferenceableAndAlignedPointer( 2079 Store->getPointerOperand(), Store->getValueOperand()->getType(), 2080 Store->getAlign(), MDL, Preheader->getTerminator(), DT); 2081 } 2082 } else 2083 return false; // Not a load or store. 2084 2085 // Merge the AA tags. 2086 if (LoopUses.empty()) { 2087 // On the first load/store, just take its AA tags. 2088 UI->getAAMetadata(AATags); 2089 } else if (AATags) { 2090 UI->getAAMetadata(AATags, /* Merge = */ true); 2091 } 2092 2093 LoopUses.push_back(UI); 2094 } 2095 } 2096 2097 // If we found both an unordered atomic instruction and a non-atomic memory 2098 // access, bail. We can't blindly promote non-atomic to atomic since we 2099 // might not be able to lower the result. We can't downgrade since that 2100 // would violate memory model. Also, align 0 is an error for atomics. 2101 if (SawUnorderedAtomic && SawNotAtomic) 2102 return false; 2103 2104 // If we're inserting an atomic load in the preheader, we must be able to 2105 // lower it. We're only guaranteed to be able to lower naturally aligned 2106 // atomics. 2107 auto *SomePtrElemType = SomePtr->getType()->getPointerElementType(); 2108 if (SawUnorderedAtomic && 2109 Alignment < MDL.getTypeStoreSize(SomePtrElemType)) 2110 return false; 2111 2112 // If we couldn't prove we can hoist the load, bail. 2113 if (!DereferenceableInPH) 2114 return false; 2115 2116 // We know we can hoist the load, but don't have a guaranteed store. 2117 // Check whether the location is thread-local. If it is, then we can insert 2118 // stores along paths which originally didn't have them without violating the 2119 // memory model. 2120 if (!SafeToInsertStore) { 2121 if (IsKnownThreadLocalObject) 2122 SafeToInsertStore = true; 2123 else { 2124 Value *Object = getUnderlyingObject(SomePtr); 2125 SafeToInsertStore = 2126 (isAllocLikeFn(Object, TLI) || isa<AllocaInst>(Object)) && 2127 !PointerMayBeCaptured(Object, true, true); 2128 } 2129 } 2130 2131 // If we've still failed to prove we can sink the store, give up. 2132 if (!SafeToInsertStore) 2133 return false; 2134 2135 // Otherwise, this is safe to promote, lets do it! 2136 LLVM_DEBUG(dbgs() << "LICM: Promoting value stored to in loop: " << *SomePtr 2137 << '\n'); 2138 ORE->emit([&]() { 2139 return OptimizationRemark(DEBUG_TYPE, "PromoteLoopAccessesToScalar", 2140 LoopUses[0]) 2141 << "Moving accesses to memory location out of the loop"; 2142 }); 2143 ++NumPromoted; 2144 2145 // Look at all the loop uses, and try to merge their locations. 2146 std::vector<const DILocation *> LoopUsesLocs; 2147 for (auto U : LoopUses) 2148 LoopUsesLocs.push_back(U->getDebugLoc().get()); 2149 auto DL = DebugLoc(DILocation::getMergedLocations(LoopUsesLocs)); 2150 2151 // We use the SSAUpdater interface to insert phi nodes as required. 2152 SmallVector<PHINode *, 16> NewPHIs; 2153 SSAUpdater SSA(&NewPHIs); 2154 LoopPromoter Promoter(SomePtr, LoopUses, SSA, PointerMustAliases, ExitBlocks, 2155 InsertPts, MSSAInsertPts, PIC, CurAST, MSSAU, *LI, DL, 2156 Alignment.value(), SawUnorderedAtomic, AATags, 2157 *SafetyInfo); 2158 2159 // Set up the preheader to have a definition of the value. It is the live-out 2160 // value from the preheader that uses in the loop will use. 2161 LoadInst *PreheaderLoad = new LoadInst( 2162 SomePtr->getType()->getPointerElementType(), SomePtr, 2163 SomePtr->getName() + ".promoted", Preheader->getTerminator()); 2164 if (SawUnorderedAtomic) 2165 PreheaderLoad->setOrdering(AtomicOrdering::Unordered); 2166 PreheaderLoad->setAlignment(Alignment); 2167 PreheaderLoad->setDebugLoc(DebugLoc()); 2168 if (AATags) 2169 PreheaderLoad->setAAMetadata(AATags); 2170 SSA.AddAvailableValue(Preheader, PreheaderLoad); 2171 2172 if (MSSAU) { 2173 MemoryAccess *PreheaderLoadMemoryAccess = MSSAU->createMemoryAccessInBB( 2174 PreheaderLoad, nullptr, PreheaderLoad->getParent(), MemorySSA::End); 2175 MemoryUse *NewMemUse = cast<MemoryUse>(PreheaderLoadMemoryAccess); 2176 MSSAU->insertUse(NewMemUse, /*RenameUses=*/true); 2177 } 2178 2179 if (MSSAU && VerifyMemorySSA) 2180 MSSAU->getMemorySSA()->verifyMemorySSA(); 2181 // Rewrite all the loads in the loop and remember all the definitions from 2182 // stores in the loop. 2183 Promoter.run(LoopUses); 2184 2185 if (MSSAU && VerifyMemorySSA) 2186 MSSAU->getMemorySSA()->verifyMemorySSA(); 2187 // If the SSAUpdater didn't use the load in the preheader, just zap it now. 2188 if (PreheaderLoad->use_empty()) 2189 eraseInstruction(*PreheaderLoad, *SafetyInfo, CurAST, MSSAU); 2190 2191 return true; 2192 } 2193 2194 /// Returns an owning pointer to an alias set which incorporates aliasing info 2195 /// from L and all subloops of L. 2196 std::unique_ptr<AliasSetTracker> 2197 LoopInvariantCodeMotion::collectAliasInfoForLoop(Loop *L, LoopInfo *LI, 2198 AAResults *AA) { 2199 auto CurAST = std::make_unique<AliasSetTracker>(*AA); 2200 2201 // Add everything from all the sub loops. 2202 for (Loop *InnerL : L->getSubLoops()) 2203 for (BasicBlock *BB : InnerL->blocks()) 2204 CurAST->add(*BB); 2205 2206 // And merge in this loop (without anything from inner loops). 2207 for (BasicBlock *BB : L->blocks()) 2208 if (LI->getLoopFor(BB) == L) 2209 CurAST->add(*BB); 2210 2211 return CurAST; 2212 } 2213 2214 std::unique_ptr<AliasSetTracker> 2215 LoopInvariantCodeMotion::collectAliasInfoForLoopWithMSSA( 2216 Loop *L, AAResults *AA, MemorySSAUpdater *MSSAU) { 2217 auto *MSSA = MSSAU->getMemorySSA(); 2218 auto CurAST = std::make_unique<AliasSetTracker>(*AA, MSSA, L); 2219 CurAST->addAllInstructionsInLoopUsingMSSA(); 2220 return CurAST; 2221 } 2222 2223 static bool pointerInvalidatedByLoop(MemoryLocation MemLoc, 2224 AliasSetTracker *CurAST, Loop *CurLoop, 2225 AAResults *AA) { 2226 // First check to see if any of the basic blocks in CurLoop invalidate *V. 2227 bool isInvalidatedAccordingToAST = CurAST->getAliasSetFor(MemLoc).isMod(); 2228 2229 if (!isInvalidatedAccordingToAST || !LICMN2Theshold) 2230 return isInvalidatedAccordingToAST; 2231 2232 // Check with a diagnostic analysis if we can refine the information above. 2233 // This is to identify the limitations of using the AST. 2234 // The alias set mechanism used by LICM has a major weakness in that it 2235 // combines all things which may alias into a single set *before* asking 2236 // modref questions. As a result, a single readonly call within a loop will 2237 // collapse all loads and stores into a single alias set and report 2238 // invalidation if the loop contains any store. For example, readonly calls 2239 // with deopt states have this form and create a general alias set with all 2240 // loads and stores. In order to get any LICM in loops containing possible 2241 // deopt states we need a more precise invalidation of checking the mod ref 2242 // info of each instruction within the loop and LI. This has a complexity of 2243 // O(N^2), so currently, it is used only as a diagnostic tool since the 2244 // default value of LICMN2Threshold is zero. 2245 2246 // Don't look at nested loops. 2247 if (CurLoop->begin() != CurLoop->end()) 2248 return true; 2249 2250 int N = 0; 2251 for (BasicBlock *BB : CurLoop->getBlocks()) 2252 for (Instruction &I : *BB) { 2253 if (N >= LICMN2Theshold) { 2254 LLVM_DEBUG(dbgs() << "Alasing N2 threshold exhausted for " 2255 << *(MemLoc.Ptr) << "\n"); 2256 return true; 2257 } 2258 N++; 2259 auto Res = AA->getModRefInfo(&I, MemLoc); 2260 if (isModSet(Res)) { 2261 LLVM_DEBUG(dbgs() << "Aliasing failed on " << I << " for " 2262 << *(MemLoc.Ptr) << "\n"); 2263 return true; 2264 } 2265 } 2266 LLVM_DEBUG(dbgs() << "Aliasing okay for " << *(MemLoc.Ptr) << "\n"); 2267 return false; 2268 } 2269 2270 static bool pointerInvalidatedByLoopWithMSSA(MemorySSA *MSSA, MemoryUse *MU, 2271 Loop *CurLoop, 2272 SinkAndHoistLICMFlags &Flags) { 2273 // For hoisting, use the walker to determine safety 2274 if (!Flags.IsSink) { 2275 MemoryAccess *Source; 2276 // See declaration of SetLicmMssaOptCap for usage details. 2277 if (Flags.LicmMssaOptCounter >= Flags.LicmMssaOptCap) 2278 Source = MU->getDefiningAccess(); 2279 else { 2280 Source = MSSA->getSkipSelfWalker()->getClobberingMemoryAccess(MU); 2281 Flags.LicmMssaOptCounter++; 2282 } 2283 return !MSSA->isLiveOnEntryDef(Source) && 2284 CurLoop->contains(Source->getBlock()); 2285 } 2286 2287 // For sinking, we'd need to check all Defs below this use. The getClobbering 2288 // call will look on the backedge of the loop, but will check aliasing with 2289 // the instructions on the previous iteration. 2290 // For example: 2291 // for (i ... ) 2292 // load a[i] ( Use (LoE) 2293 // store a[i] ( 1 = Def (2), with 2 = Phi for the loop. 2294 // i++; 2295 // The load sees no clobbering inside the loop, as the backedge alias check 2296 // does phi translation, and will check aliasing against store a[i-1]. 2297 // However sinking the load outside the loop, below the store is incorrect. 2298 2299 // For now, only sink if there are no Defs in the loop, and the existing ones 2300 // precede the use and are in the same block. 2301 // FIXME: Increase precision: Safe to sink if Use post dominates the Def; 2302 // needs PostDominatorTreeAnalysis. 2303 // FIXME: More precise: no Defs that alias this Use. 2304 if (Flags.NoOfMemAccTooLarge) 2305 return true; 2306 for (auto *BB : CurLoop->getBlocks()) 2307 if (auto *Accesses = MSSA->getBlockDefs(BB)) 2308 for (const auto &MA : *Accesses) 2309 if (const auto *MD = dyn_cast<MemoryDef>(&MA)) 2310 if (MU->getBlock() != MD->getBlock() || 2311 !MSSA->locallyDominates(MD, MU)) 2312 return true; 2313 return false; 2314 } 2315 2316 /// Little predicate that returns true if the specified basic block is in 2317 /// a subloop of the current one, not the current one itself. 2318 /// 2319 static bool inSubLoop(BasicBlock *BB, Loop *CurLoop, LoopInfo *LI) { 2320 assert(CurLoop->contains(BB) && "Only valid if BB is IN the loop"); 2321 return LI->getLoopFor(BB) != CurLoop; 2322 } 2323