1 //===-- LICM.cpp - Loop Invariant Code Motion Pass ------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This pass performs loop invariant code motion, attempting to remove as much 11 // code from the body of a loop as possible. It does this by either hoisting 12 // code into the preheader block, or by sinking code to the exit blocks if it is 13 // safe. This pass also promotes must-aliased memory locations in the loop to 14 // live in registers, thus hoisting and sinking "invariant" loads and stores. 15 // 16 // This pass uses alias analysis for two purposes: 17 // 18 // 1. Moving loop invariant loads and calls out of loops. If we can determine 19 // that a load or call inside of a loop never aliases anything stored to, 20 // we can hoist it or sink it like any other instruction. 21 // 2. Scalar Promotion of Memory - If there is a store instruction inside of 22 // the loop, we try to move the store to happen AFTER the loop instead of 23 // inside of the loop. This can only happen if a few conditions are true: 24 // A. The pointer stored through is loop invariant 25 // B. There are no stores or loads in the loop which _may_ alias the 26 // pointer. There are no calls in the loop which mod/ref the pointer. 27 // If these conditions are true, we can promote the loads and stores in the 28 // loop of the pointer to use a temporary alloca'd variable. We then use 29 // the SSAUpdater to construct the appropriate SSA form for the value. 30 // 31 //===----------------------------------------------------------------------===// 32 33 #include "llvm/Transforms/Scalar/LICM.h" 34 #include "llvm/ADT/Statistic.h" 35 #include "llvm/Analysis/AliasAnalysis.h" 36 #include "llvm/Analysis/AliasSetTracker.h" 37 #include "llvm/Analysis/BasicAliasAnalysis.h" 38 #include "llvm/Analysis/CaptureTracking.h" 39 #include "llvm/Analysis/ConstantFolding.h" 40 #include "llvm/Analysis/GlobalsModRef.h" 41 #include "llvm/Analysis/Loads.h" 42 #include "llvm/Analysis/LoopInfo.h" 43 #include "llvm/Analysis/LoopPass.h" 44 #include "llvm/Analysis/LoopPassManager.h" 45 #include "llvm/Analysis/MemoryBuiltins.h" 46 #include "llvm/Analysis/ScalarEvolution.h" 47 #include "llvm/Analysis/ScalarEvolutionAliasAnalysis.h" 48 #include "llvm/Analysis/TargetLibraryInfo.h" 49 #include "llvm/Analysis/ValueTracking.h" 50 #include "llvm/IR/CFG.h" 51 #include "llvm/IR/Constants.h" 52 #include "llvm/IR/DataLayout.h" 53 #include "llvm/IR/DerivedTypes.h" 54 #include "llvm/IR/Dominators.h" 55 #include "llvm/IR/Instructions.h" 56 #include "llvm/IR/IntrinsicInst.h" 57 #include "llvm/IR/LLVMContext.h" 58 #include "llvm/IR/Metadata.h" 59 #include "llvm/IR/PredIteratorCache.h" 60 #include "llvm/Support/CommandLine.h" 61 #include "llvm/Support/Debug.h" 62 #include "llvm/Support/raw_ostream.h" 63 #include "llvm/Transforms/Scalar.h" 64 #include "llvm/Transforms/Utils/Local.h" 65 #include "llvm/Transforms/Utils/LoopUtils.h" 66 #include "llvm/Transforms/Utils/SSAUpdater.h" 67 #include <algorithm> 68 #include <utility> 69 using namespace llvm; 70 71 #define DEBUG_TYPE "licm" 72 73 STATISTIC(NumSunk, "Number of instructions sunk out of loop"); 74 STATISTIC(NumHoisted, "Number of instructions hoisted out of loop"); 75 STATISTIC(NumMovedLoads, "Number of load insts hoisted or sunk"); 76 STATISTIC(NumMovedCalls, "Number of call insts hoisted or sunk"); 77 STATISTIC(NumPromoted, "Number of memory locations promoted to registers"); 78 79 static cl::opt<bool> 80 DisablePromotion("disable-licm-promotion", cl::Hidden, 81 cl::desc("Disable memory promotion in LICM pass")); 82 83 static bool inSubLoop(BasicBlock *BB, Loop *CurLoop, LoopInfo *LI); 84 static bool isNotUsedInLoop(const Instruction &I, const Loop *CurLoop, 85 const LoopSafetyInfo *SafetyInfo); 86 static bool hoist(Instruction &I, const DominatorTree *DT, const Loop *CurLoop, 87 const LoopSafetyInfo *SafetyInfo); 88 static bool sink(Instruction &I, const LoopInfo *LI, const DominatorTree *DT, 89 const Loop *CurLoop, AliasSetTracker *CurAST, 90 const LoopSafetyInfo *SafetyInfo); 91 static bool isSafeToExecuteUnconditionally(const Instruction &Inst, 92 const DominatorTree *DT, 93 const Loop *CurLoop, 94 const LoopSafetyInfo *SafetyInfo, 95 const Instruction *CtxI = nullptr); 96 static bool pointerInvalidatedByLoop(Value *V, uint64_t Size, 97 const AAMDNodes &AAInfo, 98 AliasSetTracker *CurAST); 99 static Instruction * 100 CloneInstructionInExitBlock(Instruction &I, BasicBlock &ExitBlock, PHINode &PN, 101 const LoopInfo *LI, 102 const LoopSafetyInfo *SafetyInfo); 103 static bool canSinkOrHoistInst(Instruction &I, AliasAnalysis *AA, 104 DominatorTree *DT, 105 Loop *CurLoop, AliasSetTracker *CurAST, 106 LoopSafetyInfo *SafetyInfo); 107 108 namespace { 109 struct LoopInvariantCodeMotion { 110 bool runOnLoop(Loop *L, AliasAnalysis *AA, LoopInfo *LI, DominatorTree *DT, 111 TargetLibraryInfo *TLI, ScalarEvolution *SE, bool DeleteAST); 112 113 DenseMap<Loop *, AliasSetTracker *> &getLoopToAliasSetMap() { 114 return LoopToAliasSetMap; 115 } 116 117 private: 118 DenseMap<Loop *, AliasSetTracker *> LoopToAliasSetMap; 119 120 AliasSetTracker *collectAliasInfoForLoop(Loop *L, LoopInfo *LI, 121 AliasAnalysis *AA); 122 }; 123 124 struct LegacyLICMPass : public LoopPass { 125 static char ID; // Pass identification, replacement for typeid 126 LegacyLICMPass() : LoopPass(ID) { 127 initializeLegacyLICMPassPass(*PassRegistry::getPassRegistry()); 128 } 129 130 bool runOnLoop(Loop *L, LPPassManager &LPM) override { 131 if (skipLoop(L)) 132 return false; 133 134 auto *SE = getAnalysisIfAvailable<ScalarEvolutionWrapperPass>(); 135 return LICM.runOnLoop(L, 136 &getAnalysis<AAResultsWrapperPass>().getAAResults(), 137 &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(), 138 &getAnalysis<DominatorTreeWrapperPass>().getDomTree(), 139 &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(), 140 SE ? &SE->getSE() : nullptr, false); 141 } 142 143 /// This transformation requires natural loop information & requires that 144 /// loop preheaders be inserted into the CFG... 145 /// 146 void getAnalysisUsage(AnalysisUsage &AU) const override { 147 AU.setPreservesCFG(); 148 AU.addRequired<TargetLibraryInfoWrapperPass>(); 149 getLoopAnalysisUsage(AU); 150 } 151 152 using llvm::Pass::doFinalization; 153 154 bool doFinalization() override { 155 assert(LICM.getLoopToAliasSetMap().empty() && 156 "Didn't free loop alias sets"); 157 return false; 158 } 159 160 private: 161 LoopInvariantCodeMotion LICM; 162 163 /// cloneBasicBlockAnalysis - Simple Analysis hook. Clone alias set info. 164 void cloneBasicBlockAnalysis(BasicBlock *From, BasicBlock *To, 165 Loop *L) override; 166 167 /// deleteAnalysisValue - Simple Analysis hook. Delete value V from alias 168 /// set. 169 void deleteAnalysisValue(Value *V, Loop *L) override; 170 171 /// Simple Analysis hook. Delete loop L from alias set map. 172 void deleteAnalysisLoop(Loop *L) override; 173 }; 174 } 175 176 PreservedAnalyses LICMPass::run(Loop &L, LoopAnalysisManager &AM) { 177 const auto &FAM = 178 AM.getResult<FunctionAnalysisManagerLoopProxy>(L).getManager(); 179 Function *F = L.getHeader()->getParent(); 180 181 auto *AA = FAM.getCachedResult<AAManager>(*F); 182 auto *LI = FAM.getCachedResult<LoopAnalysis>(*F); 183 auto *DT = FAM.getCachedResult<DominatorTreeAnalysis>(*F); 184 auto *TLI = FAM.getCachedResult<TargetLibraryAnalysis>(*F); 185 auto *SE = FAM.getCachedResult<ScalarEvolutionAnalysis>(*F); 186 assert((AA && LI && DT && TLI && SE) && "Analyses for LICM not available"); 187 188 LoopInvariantCodeMotion LICM; 189 190 if (!LICM.runOnLoop(&L, AA, LI, DT, TLI, SE, true)) 191 return PreservedAnalyses::all(); 192 193 // FIXME: There is no setPreservesCFG in the new PM. When that becomes 194 // available, it should be used here. 195 return getLoopPassPreservedAnalyses(); 196 } 197 198 char LegacyLICMPass::ID = 0; 199 INITIALIZE_PASS_BEGIN(LegacyLICMPass, "licm", "Loop Invariant Code Motion", 200 false, false) 201 INITIALIZE_PASS_DEPENDENCY(LoopPass) 202 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 203 INITIALIZE_PASS_END(LegacyLICMPass, "licm", "Loop Invariant Code Motion", false, 204 false) 205 206 Pass *llvm::createLICMPass() { return new LegacyLICMPass(); } 207 208 /// Hoist expressions out of the specified loop. Note, alias info for inner 209 /// loop is not preserved so it is not a good idea to run LICM multiple 210 /// times on one loop. 211 /// We should delete AST for inner loops in the new pass manager to avoid 212 /// memory leak. 213 /// 214 bool LoopInvariantCodeMotion::runOnLoop(Loop *L, AliasAnalysis *AA, 215 LoopInfo *LI, DominatorTree *DT, 216 TargetLibraryInfo *TLI, 217 ScalarEvolution *SE, bool DeleteAST) { 218 bool Changed = false; 219 220 assert(L->isLCSSAForm(*DT) && "Loop is not in LCSSA form."); 221 222 AliasSetTracker *CurAST = collectAliasInfoForLoop(L, LI, AA); 223 224 // Get the preheader block to move instructions into... 225 BasicBlock *Preheader = L->getLoopPreheader(); 226 227 // Compute loop safety information. 228 LoopSafetyInfo SafetyInfo; 229 computeLoopSafetyInfo(&SafetyInfo, L); 230 231 // We want to visit all of the instructions in this loop... that are not parts 232 // of our subloops (they have already had their invariants hoisted out of 233 // their loop, into this loop, so there is no need to process the BODIES of 234 // the subloops). 235 // 236 // Traverse the body of the loop in depth first order on the dominator tree so 237 // that we are guaranteed to see definitions before we see uses. This allows 238 // us to sink instructions in one pass, without iteration. After sinking 239 // instructions, we perform another pass to hoist them out of the loop. 240 // 241 if (L->hasDedicatedExits()) 242 Changed |= sinkRegion(DT->getNode(L->getHeader()), AA, LI, DT, TLI, L, 243 CurAST, &SafetyInfo); 244 if (Preheader) 245 Changed |= hoistRegion(DT->getNode(L->getHeader()), AA, LI, DT, TLI, L, 246 CurAST, &SafetyInfo); 247 248 // Now that all loop invariants have been removed from the loop, promote any 249 // memory references to scalars that we can. 250 if (!DisablePromotion && (Preheader || L->hasDedicatedExits())) { 251 SmallVector<BasicBlock *, 8> ExitBlocks; 252 SmallVector<Instruction *, 8> InsertPts; 253 PredIteratorCache PIC; 254 255 // Loop over all of the alias sets in the tracker object. 256 for (AliasSet &AS : *CurAST) 257 Changed |= promoteLoopAccessesToScalars( 258 AS, ExitBlocks, InsertPts, PIC, LI, DT, TLI, L, CurAST, &SafetyInfo); 259 260 // Once we have promoted values across the loop body we have to recursively 261 // reform LCSSA as any nested loop may now have values defined within the 262 // loop used in the outer loop. 263 // FIXME: This is really heavy handed. It would be a bit better to use an 264 // SSAUpdater strategy during promotion that was LCSSA aware and reformed 265 // it as it went. 266 if (Changed) { 267 formLCSSARecursively(*L, *DT, LI, SE); 268 } 269 } 270 271 // Check that neither this loop nor its parent have had LCSSA broken. LICM is 272 // specifically moving instructions across the loop boundary and so it is 273 // especially in need of sanity checking here. 274 assert(L->isLCSSAForm(*DT) && "Loop not left in LCSSA form after LICM!"); 275 assert((!L->getParentLoop() || L->getParentLoop()->isLCSSAForm(*DT)) && 276 "Parent loop not left in LCSSA form after LICM!"); 277 278 // If this loop is nested inside of another one, save the alias information 279 // for when we process the outer loop. 280 if (L->getParentLoop() && !DeleteAST) 281 LoopToAliasSetMap[L] = CurAST; 282 else 283 delete CurAST; 284 285 if (Changed && SE) 286 SE->forgetLoopDispositions(L); 287 return Changed; 288 } 289 290 /// Walk the specified region of the CFG (defined by all blocks dominated by 291 /// the specified block, and that are in the current loop) in reverse depth 292 /// first order w.r.t the DominatorTree. This allows us to visit uses before 293 /// definitions, allowing us to sink a loop body in one pass without iteration. 294 /// 295 bool llvm::sinkRegion(DomTreeNode *N, AliasAnalysis *AA, LoopInfo *LI, 296 DominatorTree *DT, TargetLibraryInfo *TLI, Loop *CurLoop, 297 AliasSetTracker *CurAST, LoopSafetyInfo *SafetyInfo) { 298 299 // Verify inputs. 300 assert(N != nullptr && AA != nullptr && LI != nullptr && DT != nullptr && 301 CurLoop != nullptr && CurAST != nullptr && SafetyInfo != nullptr && 302 "Unexpected input to sinkRegion"); 303 304 BasicBlock *BB = N->getBlock(); 305 // If this subregion is not in the top level loop at all, exit. 306 if (!CurLoop->contains(BB)) 307 return false; 308 309 // We are processing blocks in reverse dfo, so process children first. 310 bool Changed = false; 311 const std::vector<DomTreeNode *> &Children = N->getChildren(); 312 for (DomTreeNode *Child : Children) 313 Changed |= sinkRegion(Child, AA, LI, DT, TLI, CurLoop, CurAST, SafetyInfo); 314 315 // Only need to process the contents of this block if it is not part of a 316 // subloop (which would already have been processed). 317 if (inSubLoop(BB, CurLoop, LI)) 318 return Changed; 319 320 for (BasicBlock::iterator II = BB->end(); II != BB->begin();) { 321 Instruction &I = *--II; 322 323 // If the instruction is dead, we would try to sink it because it isn't used 324 // in the loop, instead, just delete it. 325 if (isInstructionTriviallyDead(&I, TLI)) { 326 DEBUG(dbgs() << "LICM deleting dead inst: " << I << '\n'); 327 ++II; 328 CurAST->deleteValue(&I); 329 I.eraseFromParent(); 330 Changed = true; 331 continue; 332 } 333 334 // Check to see if we can sink this instruction to the exit blocks 335 // of the loop. We can do this if the all users of the instruction are 336 // outside of the loop. In this case, it doesn't even matter if the 337 // operands of the instruction are loop invariant. 338 // 339 if (isNotUsedInLoop(I, CurLoop, SafetyInfo) && 340 canSinkOrHoistInst(I, AA, DT, CurLoop, CurAST, SafetyInfo)) { 341 ++II; 342 Changed |= sink(I, LI, DT, CurLoop, CurAST, SafetyInfo); 343 } 344 } 345 return Changed; 346 } 347 348 /// Walk the specified region of the CFG (defined by all blocks dominated by 349 /// the specified block, and that are in the current loop) in depth first 350 /// order w.r.t the DominatorTree. This allows us to visit definitions before 351 /// uses, allowing us to hoist a loop body in one pass without iteration. 352 /// 353 bool llvm::hoistRegion(DomTreeNode *N, AliasAnalysis *AA, LoopInfo *LI, 354 DominatorTree *DT, TargetLibraryInfo *TLI, Loop *CurLoop, 355 AliasSetTracker *CurAST, LoopSafetyInfo *SafetyInfo) { 356 // Verify inputs. 357 assert(N != nullptr && AA != nullptr && LI != nullptr && DT != nullptr && 358 CurLoop != nullptr && CurAST != nullptr && SafetyInfo != nullptr && 359 "Unexpected input to hoistRegion"); 360 361 BasicBlock *BB = N->getBlock(); 362 363 // If this subregion is not in the top level loop at all, exit. 364 if (!CurLoop->contains(BB)) 365 return false; 366 367 // Only need to process the contents of this block if it is not part of a 368 // subloop (which would already have been processed). 369 bool Changed = false; 370 if (!inSubLoop(BB, CurLoop, LI)) 371 for (BasicBlock::iterator II = BB->begin(), E = BB->end(); II != E;) { 372 Instruction &I = *II++; 373 // Try constant folding this instruction. If all the operands are 374 // constants, it is technically hoistable, but it would be better to just 375 // fold it. 376 if (Constant *C = ConstantFoldInstruction( 377 &I, I.getModule()->getDataLayout(), TLI)) { 378 DEBUG(dbgs() << "LICM folding inst: " << I << " --> " << *C << '\n'); 379 CurAST->copyValue(&I, C); 380 I.replaceAllUsesWith(C); 381 if (isInstructionTriviallyDead(&I, TLI)) { 382 CurAST->deleteValue(&I); 383 I.eraseFromParent(); 384 } 385 continue; 386 } 387 388 // Try hoisting the instruction out to the preheader. We can only do this 389 // if all of the operands of the instruction are loop invariant and if it 390 // is safe to hoist the instruction. 391 // 392 if (CurLoop->hasLoopInvariantOperands(&I) && 393 canSinkOrHoistInst(I, AA, DT, CurLoop, CurAST, SafetyInfo) && 394 isSafeToExecuteUnconditionally( 395 I, DT, CurLoop, SafetyInfo, 396 CurLoop->getLoopPreheader()->getTerminator())) 397 Changed |= hoist(I, DT, CurLoop, SafetyInfo); 398 } 399 400 const std::vector<DomTreeNode *> &Children = N->getChildren(); 401 for (DomTreeNode *Child : Children) 402 Changed |= hoistRegion(Child, AA, LI, DT, TLI, CurLoop, CurAST, SafetyInfo); 403 return Changed; 404 } 405 406 /// Computes loop safety information, checks loop body & header 407 /// for the possibility of may throw exception. 408 /// 409 void llvm::computeLoopSafetyInfo(LoopSafetyInfo *SafetyInfo, Loop *CurLoop) { 410 assert(CurLoop != nullptr && "CurLoop cant be null"); 411 BasicBlock *Header = CurLoop->getHeader(); 412 // Setting default safety values. 413 SafetyInfo->MayThrow = false; 414 SafetyInfo->HeaderMayThrow = false; 415 // Iterate over header and compute safety info. 416 for (BasicBlock::iterator I = Header->begin(), E = Header->end(); 417 (I != E) && !SafetyInfo->HeaderMayThrow; ++I) 418 SafetyInfo->HeaderMayThrow |= 419 !isGuaranteedToTransferExecutionToSuccessor(&*I); 420 421 SafetyInfo->MayThrow = SafetyInfo->HeaderMayThrow; 422 // Iterate over loop instructions and compute safety info. 423 for (Loop::block_iterator BB = CurLoop->block_begin(), 424 BBE = CurLoop->block_end(); 425 (BB != BBE) && !SafetyInfo->MayThrow; ++BB) 426 for (BasicBlock::iterator I = (*BB)->begin(), E = (*BB)->end(); 427 (I != E) && !SafetyInfo->MayThrow; ++I) 428 SafetyInfo->MayThrow |= !isGuaranteedToTransferExecutionToSuccessor(&*I); 429 430 // Compute funclet colors if we might sink/hoist in a function with a funclet 431 // personality routine. 432 Function *Fn = CurLoop->getHeader()->getParent(); 433 if (Fn->hasPersonalityFn()) 434 if (Constant *PersonalityFn = Fn->getPersonalityFn()) 435 if (isFuncletEHPersonality(classifyEHPersonality(PersonalityFn))) 436 SafetyInfo->BlockColors = colorEHFunclets(*Fn); 437 } 438 439 /// Returns true if the hoister and sinker can handle this instruction. 440 /// If SafetyInfo is nullptr, we are checking for sinking instructions from 441 /// preheader to loop body (no speculation). 442 /// If SafetyInfo is not nullptr, we are checking for hoisting/sinking 443 /// instructions from loop body to preheader/exit. Check if the instruction 444 /// can execute specultatively. 445 /// 446 bool canSinkOrHoistInst(Instruction &I, AAResults *AA, DominatorTree *DT, 447 Loop *CurLoop, AliasSetTracker *CurAST, 448 LoopSafetyInfo *SafetyInfo) { 449 // Loads have extra constraints we have to verify before we can hoist them. 450 if (LoadInst *LI = dyn_cast<LoadInst>(&I)) { 451 if (!LI->isUnordered()) 452 return false; // Don't hoist volatile/atomic loads! 453 454 // Loads from constant memory are always safe to move, even if they end up 455 // in the same alias set as something that ends up being modified. 456 if (AA->pointsToConstantMemory(LI->getOperand(0))) 457 return true; 458 if (LI->getMetadata(LLVMContext::MD_invariant_load)) 459 return true; 460 461 // Don't hoist loads which have may-aliased stores in loop. 462 uint64_t Size = 0; 463 if (LI->getType()->isSized()) 464 Size = I.getModule()->getDataLayout().getTypeStoreSize(LI->getType()); 465 466 AAMDNodes AAInfo; 467 LI->getAAMetadata(AAInfo); 468 469 return !pointerInvalidatedByLoop(LI->getOperand(0), Size, AAInfo, CurAST); 470 } else if (CallInst *CI = dyn_cast<CallInst>(&I)) { 471 // Don't sink or hoist dbg info; it's legal, but not useful. 472 if (isa<DbgInfoIntrinsic>(I)) 473 return false; 474 475 // Don't sink calls which can throw. 476 if (CI->mayThrow()) 477 return false; 478 479 // Handle simple cases by querying alias analysis. 480 FunctionModRefBehavior Behavior = AA->getModRefBehavior(CI); 481 if (Behavior == FMRB_DoesNotAccessMemory) 482 return true; 483 if (AliasAnalysis::onlyReadsMemory(Behavior)) { 484 // A readonly argmemonly function only reads from memory pointed to by 485 // it's arguments with arbitrary offsets. If we can prove there are no 486 // writes to this memory in the loop, we can hoist or sink. 487 if (AliasAnalysis::onlyAccessesArgPointees(Behavior)) { 488 for (Value *Op : CI->arg_operands()) 489 if (Op->getType()->isPointerTy() && 490 pointerInvalidatedByLoop(Op, MemoryLocation::UnknownSize, 491 AAMDNodes(), CurAST)) 492 return false; 493 return true; 494 } 495 // If this call only reads from memory and there are no writes to memory 496 // in the loop, we can hoist or sink the call as appropriate. 497 bool FoundMod = false; 498 for (AliasSet &AS : *CurAST) { 499 if (!AS.isForwardingAliasSet() && AS.isMod()) { 500 FoundMod = true; 501 break; 502 } 503 } 504 if (!FoundMod) 505 return true; 506 } 507 508 // FIXME: This should use mod/ref information to see if we can hoist or 509 // sink the call. 510 511 return false; 512 } 513 514 // Only these instructions are hoistable/sinkable. 515 if (!isa<BinaryOperator>(I) && !isa<CastInst>(I) && !isa<SelectInst>(I) && 516 !isa<GetElementPtrInst>(I) && !isa<CmpInst>(I) && 517 !isa<InsertElementInst>(I) && !isa<ExtractElementInst>(I) && 518 !isa<ShuffleVectorInst>(I) && !isa<ExtractValueInst>(I) && 519 !isa<InsertValueInst>(I)) 520 return false; 521 522 // SafetyInfo is nullptr if we are checking for sinking from preheader to 523 // loop body. It will be always safe as there is no speculative execution. 524 if (!SafetyInfo) 525 return true; 526 527 // TODO: Plumb the context instruction through to make hoisting and sinking 528 // more powerful. Hoisting of loads already works due to the special casing 529 // above. 530 return isSafeToExecuteUnconditionally(I, DT, CurLoop, SafetyInfo, nullptr); 531 } 532 533 /// Returns true if a PHINode is a trivially replaceable with an 534 /// Instruction. 535 /// This is true when all incoming values are that instruction. 536 /// This pattern occurs most often with LCSSA PHI nodes. 537 /// 538 static bool isTriviallyReplacablePHI(const PHINode &PN, const Instruction &I) { 539 for (const Value *IncValue : PN.incoming_values()) 540 if (IncValue != &I) 541 return false; 542 543 return true; 544 } 545 546 /// Return true if the only users of this instruction are outside of 547 /// the loop. If this is true, we can sink the instruction to the exit 548 /// blocks of the loop. 549 /// 550 static bool isNotUsedInLoop(const Instruction &I, const Loop *CurLoop, 551 const LoopSafetyInfo *SafetyInfo) { 552 const auto &BlockColors = SafetyInfo->BlockColors; 553 for (const User *U : I.users()) { 554 const Instruction *UI = cast<Instruction>(U); 555 if (const PHINode *PN = dyn_cast<PHINode>(UI)) { 556 const BasicBlock *BB = PN->getParent(); 557 // We cannot sink uses in catchswitches. 558 if (isa<CatchSwitchInst>(BB->getTerminator())) 559 return false; 560 561 // We need to sink a callsite to a unique funclet. Avoid sinking if the 562 // phi use is too muddled. 563 if (isa<CallInst>(I)) 564 if (!BlockColors.empty() && 565 BlockColors.find(const_cast<BasicBlock *>(BB))->second.size() != 1) 566 return false; 567 568 // A PHI node where all of the incoming values are this instruction are 569 // special -- they can just be RAUW'ed with the instruction and thus 570 // don't require a use in the predecessor. This is a particular important 571 // special case because it is the pattern found in LCSSA form. 572 if (isTriviallyReplacablePHI(*PN, I)) { 573 if (CurLoop->contains(PN)) 574 return false; 575 else 576 continue; 577 } 578 579 // Otherwise, PHI node uses occur in predecessor blocks if the incoming 580 // values. Check for such a use being inside the loop. 581 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) 582 if (PN->getIncomingValue(i) == &I) 583 if (CurLoop->contains(PN->getIncomingBlock(i))) 584 return false; 585 586 continue; 587 } 588 589 if (CurLoop->contains(UI)) 590 return false; 591 } 592 return true; 593 } 594 595 static Instruction * 596 CloneInstructionInExitBlock(Instruction &I, BasicBlock &ExitBlock, PHINode &PN, 597 const LoopInfo *LI, 598 const LoopSafetyInfo *SafetyInfo) { 599 Instruction *New; 600 if (auto *CI = dyn_cast<CallInst>(&I)) { 601 const auto &BlockColors = SafetyInfo->BlockColors; 602 603 // Sinking call-sites need to be handled differently from other 604 // instructions. The cloned call-site needs a funclet bundle operand 605 // appropriate for it's location in the CFG. 606 SmallVector<OperandBundleDef, 1> OpBundles; 607 for (unsigned BundleIdx = 0, BundleEnd = CI->getNumOperandBundles(); 608 BundleIdx != BundleEnd; ++BundleIdx) { 609 OperandBundleUse Bundle = CI->getOperandBundleAt(BundleIdx); 610 if (Bundle.getTagID() == LLVMContext::OB_funclet) 611 continue; 612 613 OpBundles.emplace_back(Bundle); 614 } 615 616 if (!BlockColors.empty()) { 617 const ColorVector &CV = BlockColors.find(&ExitBlock)->second; 618 assert(CV.size() == 1 && "non-unique color for exit block!"); 619 BasicBlock *BBColor = CV.front(); 620 Instruction *EHPad = BBColor->getFirstNonPHI(); 621 if (EHPad->isEHPad()) 622 OpBundles.emplace_back("funclet", EHPad); 623 } 624 625 New = CallInst::Create(CI, OpBundles); 626 } else { 627 New = I.clone(); 628 } 629 630 ExitBlock.getInstList().insert(ExitBlock.getFirstInsertionPt(), New); 631 if (!I.getName().empty()) 632 New->setName(I.getName() + ".le"); 633 634 // Build LCSSA PHI nodes for any in-loop operands. Note that this is 635 // particularly cheap because we can rip off the PHI node that we're 636 // replacing for the number and blocks of the predecessors. 637 // OPT: If this shows up in a profile, we can instead finish sinking all 638 // invariant instructions, and then walk their operands to re-establish 639 // LCSSA. That will eliminate creating PHI nodes just to nuke them when 640 // sinking bottom-up. 641 for (User::op_iterator OI = New->op_begin(), OE = New->op_end(); OI != OE; 642 ++OI) 643 if (Instruction *OInst = dyn_cast<Instruction>(*OI)) 644 if (Loop *OLoop = LI->getLoopFor(OInst->getParent())) 645 if (!OLoop->contains(&PN)) { 646 PHINode *OpPN = 647 PHINode::Create(OInst->getType(), PN.getNumIncomingValues(), 648 OInst->getName() + ".lcssa", &ExitBlock.front()); 649 for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i) 650 OpPN->addIncoming(OInst, PN.getIncomingBlock(i)); 651 *OI = OpPN; 652 } 653 return New; 654 } 655 656 /// When an instruction is found to only be used outside of the loop, this 657 /// function moves it to the exit blocks and patches up SSA form as needed. 658 /// This method is guaranteed to remove the original instruction from its 659 /// position, and may either delete it or move it to outside of the loop. 660 /// 661 static bool sink(Instruction &I, const LoopInfo *LI, const DominatorTree *DT, 662 const Loop *CurLoop, AliasSetTracker *CurAST, 663 const LoopSafetyInfo *SafetyInfo) { 664 DEBUG(dbgs() << "LICM sinking instruction: " << I << "\n"); 665 bool Changed = false; 666 if (isa<LoadInst>(I)) 667 ++NumMovedLoads; 668 else if (isa<CallInst>(I)) 669 ++NumMovedCalls; 670 ++NumSunk; 671 Changed = true; 672 673 #ifndef NDEBUG 674 SmallVector<BasicBlock *, 32> ExitBlocks; 675 CurLoop->getUniqueExitBlocks(ExitBlocks); 676 SmallPtrSet<BasicBlock *, 32> ExitBlockSet(ExitBlocks.begin(), 677 ExitBlocks.end()); 678 #endif 679 680 // Clones of this instruction. Don't create more than one per exit block! 681 SmallDenseMap<BasicBlock *, Instruction *, 32> SunkCopies; 682 683 // If this instruction is only used outside of the loop, then all users are 684 // PHI nodes in exit blocks due to LCSSA form. Just RAUW them with clones of 685 // the instruction. 686 while (!I.use_empty()) { 687 Value::user_iterator UI = I.user_begin(); 688 auto *User = cast<Instruction>(*UI); 689 if (!DT->isReachableFromEntry(User->getParent())) { 690 User->replaceUsesOfWith(&I, UndefValue::get(I.getType())); 691 continue; 692 } 693 // The user must be a PHI node. 694 PHINode *PN = cast<PHINode>(User); 695 696 // Surprisingly, instructions can be used outside of loops without any 697 // exits. This can only happen in PHI nodes if the incoming block is 698 // unreachable. 699 Use &U = UI.getUse(); 700 BasicBlock *BB = PN->getIncomingBlock(U); 701 if (!DT->isReachableFromEntry(BB)) { 702 U = UndefValue::get(I.getType()); 703 continue; 704 } 705 706 BasicBlock *ExitBlock = PN->getParent(); 707 assert(ExitBlockSet.count(ExitBlock) && 708 "The LCSSA PHI is not in an exit block!"); 709 710 Instruction *New; 711 auto It = SunkCopies.find(ExitBlock); 712 if (It != SunkCopies.end()) 713 New = It->second; 714 else 715 New = SunkCopies[ExitBlock] = 716 CloneInstructionInExitBlock(I, *ExitBlock, *PN, LI, SafetyInfo); 717 718 PN->replaceAllUsesWith(New); 719 PN->eraseFromParent(); 720 } 721 722 CurAST->deleteValue(&I); 723 I.eraseFromParent(); 724 return Changed; 725 } 726 727 /// When an instruction is found to only use loop invariant operands that 728 /// is safe to hoist, this instruction is called to do the dirty work. 729 /// 730 static bool hoist(Instruction &I, const DominatorTree *DT, const Loop *CurLoop, 731 const LoopSafetyInfo *SafetyInfo) { 732 auto *Preheader = CurLoop->getLoopPreheader(); 733 DEBUG(dbgs() << "LICM hoisting to " << Preheader->getName() << ": " << I 734 << "\n"); 735 736 // Metadata can be dependent on conditions we are hoisting above. 737 // Conservatively strip all metadata on the instruction unless we were 738 // guaranteed to execute I if we entered the loop, in which case the metadata 739 // is valid in the loop preheader. 740 if (I.hasMetadataOtherThanDebugLoc() && 741 // The check on hasMetadataOtherThanDebugLoc is to prevent us from burning 742 // time in isGuaranteedToExecute if we don't actually have anything to 743 // drop. It is a compile time optimization, not required for correctness. 744 !isGuaranteedToExecute(I, DT, CurLoop, SafetyInfo)) 745 I.dropUnknownNonDebugMetadata(); 746 747 // Move the new node to the Preheader, before its terminator. 748 I.moveBefore(Preheader->getTerminator()); 749 750 if (isa<LoadInst>(I)) 751 ++NumMovedLoads; 752 else if (isa<CallInst>(I)) 753 ++NumMovedCalls; 754 ++NumHoisted; 755 return true; 756 } 757 758 /// Only sink or hoist an instruction if it is not a trapping instruction, 759 /// or if the instruction is known not to trap when moved to the preheader. 760 /// or if it is a trapping instruction and is guaranteed to execute. 761 static bool isSafeToExecuteUnconditionally(const Instruction &Inst, 762 const DominatorTree *DT, 763 const Loop *CurLoop, 764 const LoopSafetyInfo *SafetyInfo, 765 const Instruction *CtxI) { 766 if (isSafeToSpeculativelyExecute(&Inst, CtxI, DT)) 767 return true; 768 769 return isGuaranteedToExecute(Inst, DT, CurLoop, SafetyInfo); 770 } 771 772 namespace { 773 class LoopPromoter : public LoadAndStorePromoter { 774 Value *SomePtr; // Designated pointer to store to. 775 SmallPtrSetImpl<Value *> &PointerMustAliases; 776 SmallVectorImpl<BasicBlock *> &LoopExitBlocks; 777 SmallVectorImpl<Instruction *> &LoopInsertPts; 778 PredIteratorCache &PredCache; 779 AliasSetTracker &AST; 780 LoopInfo &LI; 781 DebugLoc DL; 782 int Alignment; 783 AAMDNodes AATags; 784 785 Value *maybeInsertLCSSAPHI(Value *V, BasicBlock *BB) const { 786 if (Instruction *I = dyn_cast<Instruction>(V)) 787 if (Loop *L = LI.getLoopFor(I->getParent())) 788 if (!L->contains(BB)) { 789 // We need to create an LCSSA PHI node for the incoming value and 790 // store that. 791 PHINode *PN = PHINode::Create(I->getType(), PredCache.size(BB), 792 I->getName() + ".lcssa", &BB->front()); 793 for (BasicBlock *Pred : PredCache.get(BB)) 794 PN->addIncoming(I, Pred); 795 return PN; 796 } 797 return V; 798 } 799 800 public: 801 LoopPromoter(Value *SP, ArrayRef<const Instruction *> Insts, SSAUpdater &S, 802 SmallPtrSetImpl<Value *> &PMA, 803 SmallVectorImpl<BasicBlock *> &LEB, 804 SmallVectorImpl<Instruction *> &LIP, PredIteratorCache &PIC, 805 AliasSetTracker &ast, LoopInfo &li, DebugLoc dl, int alignment, 806 const AAMDNodes &AATags) 807 : LoadAndStorePromoter(Insts, S), SomePtr(SP), PointerMustAliases(PMA), 808 LoopExitBlocks(LEB), LoopInsertPts(LIP), PredCache(PIC), AST(ast), 809 LI(li), DL(std::move(dl)), Alignment(alignment), AATags(AATags) {} 810 811 bool isInstInList(Instruction *I, 812 const SmallVectorImpl<Instruction *> &) const override { 813 Value *Ptr; 814 if (LoadInst *LI = dyn_cast<LoadInst>(I)) 815 Ptr = LI->getOperand(0); 816 else 817 Ptr = cast<StoreInst>(I)->getPointerOperand(); 818 return PointerMustAliases.count(Ptr); 819 } 820 821 void doExtraRewritesBeforeFinalDeletion() const override { 822 // Insert stores after in the loop exit blocks. Each exit block gets a 823 // store of the live-out values that feed them. Since we've already told 824 // the SSA updater about the defs in the loop and the preheader 825 // definition, it is all set and we can start using it. 826 for (unsigned i = 0, e = LoopExitBlocks.size(); i != e; ++i) { 827 BasicBlock *ExitBlock = LoopExitBlocks[i]; 828 Value *LiveInValue = SSA.GetValueInMiddleOfBlock(ExitBlock); 829 LiveInValue = maybeInsertLCSSAPHI(LiveInValue, ExitBlock); 830 Value *Ptr = maybeInsertLCSSAPHI(SomePtr, ExitBlock); 831 Instruction *InsertPos = LoopInsertPts[i]; 832 StoreInst *NewSI = new StoreInst(LiveInValue, Ptr, InsertPos); 833 NewSI->setAlignment(Alignment); 834 NewSI->setDebugLoc(DL); 835 if (AATags) 836 NewSI->setAAMetadata(AATags); 837 } 838 } 839 840 void replaceLoadWithValue(LoadInst *LI, Value *V) const override { 841 // Update alias analysis. 842 AST.copyValue(LI, V); 843 } 844 void instructionDeleted(Instruction *I) const override { AST.deleteValue(I); } 845 }; 846 } // end anon namespace 847 848 /// Try to promote memory values to scalars by sinking stores out of the 849 /// loop and moving loads to before the loop. We do this by looping over 850 /// the stores in the loop, looking for stores to Must pointers which are 851 /// loop invariant. 852 /// 853 bool llvm::promoteLoopAccessesToScalars( 854 AliasSet &AS, SmallVectorImpl<BasicBlock *> &ExitBlocks, 855 SmallVectorImpl<Instruction *> &InsertPts, PredIteratorCache &PIC, 856 LoopInfo *LI, DominatorTree *DT, const TargetLibraryInfo *TLI, 857 Loop *CurLoop, AliasSetTracker *CurAST, LoopSafetyInfo *SafetyInfo) { 858 // Verify inputs. 859 assert(LI != nullptr && DT != nullptr && CurLoop != nullptr && 860 CurAST != nullptr && SafetyInfo != nullptr && 861 "Unexpected Input to promoteLoopAccessesToScalars"); 862 863 // We can promote this alias set if it has a store, if it is a "Must" alias 864 // set, if the pointer is loop invariant, and if we are not eliminating any 865 // volatile loads or stores. 866 if (AS.isForwardingAliasSet() || !AS.isMod() || !AS.isMustAlias() || 867 AS.isVolatile() || !CurLoop->isLoopInvariant(AS.begin()->getValue())) 868 return false; 869 870 assert(!AS.empty() && 871 "Must alias set should have at least one pointer element in it!"); 872 873 Value *SomePtr = AS.begin()->getValue(); 874 BasicBlock *Preheader = CurLoop->getLoopPreheader(); 875 876 // It isn't safe to promote a load/store from the loop if the load/store is 877 // conditional. For example, turning: 878 // 879 // for () { if (c) *P += 1; } 880 // 881 // into: 882 // 883 // tmp = *P; for () { if (c) tmp +=1; } *P = tmp; 884 // 885 // is not safe, because *P may only be valid to access if 'c' is true. 886 // 887 // The safety property divides into two parts: 888 // 1) The memory may not be dereferenceable on entry to the loop. In this 889 // case, we can't insert the required load in the preheader. 890 // 2) The memory model does not allow us to insert a store along any dynamic 891 // path which did not originally have one. 892 // 893 // It is safe to promote P if all uses are direct load/stores and if at 894 // least one is guaranteed to be executed. 895 bool GuaranteedToExecute = false; 896 897 // It is also safe to promote P if we can prove that speculating a load into 898 // the preheader is safe (i.e. proving dereferenceability on all 899 // paths through the loop), and that the memory can be proven thread local 900 // (so that the memory model requirement doesn't apply.) We first establish 901 // the former, and then run a capture analysis below to establish the later. 902 // We can use any access within the alias set to prove dereferenceability 903 // since they're all must alias. 904 bool CanSpeculateLoad = false; 905 906 SmallVector<Instruction *, 64> LoopUses; 907 SmallPtrSet<Value *, 4> PointerMustAliases; 908 909 // We start with an alignment of one and try to find instructions that allow 910 // us to prove better alignment. 911 unsigned Alignment = 1; 912 AAMDNodes AATags; 913 bool HasDedicatedExits = CurLoop->hasDedicatedExits(); 914 915 // Don't sink stores from loops without dedicated block exits. Exits 916 // containing indirect branches are not transformed by loop simplify, 917 // make sure we catch that. An additional load may be generated in the 918 // preheader for SSA updater, so also avoid sinking when no preheader 919 // is available. 920 if (!HasDedicatedExits || !Preheader) 921 return false; 922 923 const DataLayout &MDL = Preheader->getModule()->getDataLayout(); 924 925 if (SafetyInfo->MayThrow) { 926 // If a loop can throw, we have to insert a store along each unwind edge. 927 // That said, we can't actually make the unwind edge explicit. Therefore, 928 // we have to prove that the store is dead along the unwind edge. 929 // 930 // Currently, this code just special-cases alloca instructions. 931 if (!isa<AllocaInst>(GetUnderlyingObject(SomePtr, MDL))) 932 return false; 933 } 934 935 // Check that all of the pointers in the alias set have the same type. We 936 // cannot (yet) promote a memory location that is loaded and stored in 937 // different sizes. While we are at it, collect alignment and AA info. 938 bool Changed = false; 939 for (const auto &ASI : AS) { 940 Value *ASIV = ASI.getValue(); 941 PointerMustAliases.insert(ASIV); 942 943 // Check that all of the pointers in the alias set have the same type. We 944 // cannot (yet) promote a memory location that is loaded and stored in 945 // different sizes. 946 if (SomePtr->getType() != ASIV->getType()) 947 return Changed; 948 949 for (User *U : ASIV->users()) { 950 // Ignore instructions that are outside the loop. 951 Instruction *UI = dyn_cast<Instruction>(U); 952 if (!UI || !CurLoop->contains(UI)) 953 continue; 954 955 // If there is an non-load/store instruction in the loop, we can't promote 956 // it. 957 if (const LoadInst *Load = dyn_cast<LoadInst>(UI)) { 958 assert(!Load->isVolatile() && "AST broken"); 959 if (!Load->isSimple()) 960 return Changed; 961 962 if (!GuaranteedToExecute && !CanSpeculateLoad) 963 CanSpeculateLoad = isSafeToExecuteUnconditionally( 964 *Load, DT, CurLoop, SafetyInfo, Preheader->getTerminator()); 965 } else if (const StoreInst *Store = dyn_cast<StoreInst>(UI)) { 966 // Stores *of* the pointer are not interesting, only stores *to* the 967 // pointer. 968 if (UI->getOperand(1) != ASIV) 969 continue; 970 assert(!Store->isVolatile() && "AST broken"); 971 if (!Store->isSimple()) 972 return Changed; 973 974 // Note that we only check GuaranteedToExecute inside the store case 975 // so that we do not introduce stores where they did not exist before 976 // (which would break the LLVM concurrency model). 977 978 // If the alignment of this instruction allows us to specify a more 979 // restrictive (and performant) alignment and if we are sure this 980 // instruction will be executed, update the alignment. 981 // Larger is better, with the exception of 0 being the best alignment. 982 unsigned InstAlignment = Store->getAlignment(); 983 if ((InstAlignment > Alignment || InstAlignment == 0) && 984 Alignment != 0) { 985 if (isGuaranteedToExecute(*UI, DT, CurLoop, SafetyInfo)) { 986 GuaranteedToExecute = true; 987 Alignment = InstAlignment; 988 } 989 } else if (!GuaranteedToExecute) { 990 GuaranteedToExecute = 991 isGuaranteedToExecute(*UI, DT, CurLoop, SafetyInfo); 992 } 993 994 if (!GuaranteedToExecute && !CanSpeculateLoad) { 995 CanSpeculateLoad = isDereferenceableAndAlignedPointer( 996 Store->getPointerOperand(), Store->getAlignment(), MDL, 997 Preheader->getTerminator(), DT); 998 } 999 } else 1000 return Changed; // Not a load or store. 1001 1002 // Merge the AA tags. 1003 if (LoopUses.empty()) { 1004 // On the first load/store, just take its AA tags. 1005 UI->getAAMetadata(AATags); 1006 } else if (AATags) { 1007 UI->getAAMetadata(AATags, /* Merge = */ true); 1008 } 1009 1010 LoopUses.push_back(UI); 1011 } 1012 } 1013 1014 // Check legality per comment above. Otherwise, we can't promote. 1015 bool PromotionIsLegal = GuaranteedToExecute; 1016 if (!PromotionIsLegal && CanSpeculateLoad) { 1017 // If this is a thread local location, then we can insert stores along 1018 // paths which originally didn't have them without violating the memory 1019 // model. 1020 Value *Object = GetUnderlyingObject(SomePtr, MDL); 1021 PromotionIsLegal = 1022 isAllocLikeFn(Object, TLI) && !PointerMayBeCaptured(Object, true, true); 1023 } 1024 if (!PromotionIsLegal) 1025 return Changed; 1026 1027 // Figure out the loop exits and their insertion points, if this is the 1028 // first promotion. 1029 if (ExitBlocks.empty()) { 1030 CurLoop->getUniqueExitBlocks(ExitBlocks); 1031 InsertPts.clear(); 1032 InsertPts.reserve(ExitBlocks.size()); 1033 for (BasicBlock *ExitBlock : ExitBlocks) 1034 InsertPts.push_back(&*ExitBlock->getFirstInsertionPt()); 1035 } 1036 1037 // Can't insert into a catchswitch. 1038 for (BasicBlock *ExitBlock : ExitBlocks) 1039 if (isa<CatchSwitchInst>(ExitBlock->getTerminator())) 1040 return Changed; 1041 1042 // Otherwise, this is safe to promote, lets do it! 1043 DEBUG(dbgs() << "LICM: Promoting value stored to in loop: " << *SomePtr 1044 << '\n'); 1045 Changed = true; 1046 ++NumPromoted; 1047 1048 // Grab a debug location for the inserted loads/stores; given that the 1049 // inserted loads/stores have little relation to the original loads/stores, 1050 // this code just arbitrarily picks a location from one, since any debug 1051 // location is better than none. 1052 DebugLoc DL = LoopUses[0]->getDebugLoc(); 1053 1054 // We use the SSAUpdater interface to insert phi nodes as required. 1055 SmallVector<PHINode *, 16> NewPHIs; 1056 SSAUpdater SSA(&NewPHIs); 1057 LoopPromoter Promoter(SomePtr, LoopUses, SSA, PointerMustAliases, ExitBlocks, 1058 InsertPts, PIC, *CurAST, *LI, DL, Alignment, AATags); 1059 1060 // Set up the preheader to have a definition of the value. It is the live-out 1061 // value from the preheader that uses in the loop will use. 1062 LoadInst *PreheaderLoad = new LoadInst( 1063 SomePtr, SomePtr->getName() + ".promoted", Preheader->getTerminator()); 1064 PreheaderLoad->setAlignment(Alignment); 1065 PreheaderLoad->setDebugLoc(DL); 1066 if (AATags) 1067 PreheaderLoad->setAAMetadata(AATags); 1068 SSA.AddAvailableValue(Preheader, PreheaderLoad); 1069 1070 // Rewrite all the loads in the loop and remember all the definitions from 1071 // stores in the loop. 1072 Promoter.run(LoopUses); 1073 1074 // If the SSAUpdater didn't use the load in the preheader, just zap it now. 1075 if (PreheaderLoad->use_empty()) 1076 PreheaderLoad->eraseFromParent(); 1077 1078 return Changed; 1079 } 1080 1081 /// Returns an owning pointer to an alias set which incorporates aliasing info 1082 /// from L and all subloops of L. 1083 /// FIXME: In new pass manager, there is no helper function to handle loop 1084 /// analysis such as cloneBasicBlockAnalysis, so the AST needs to be recomputed 1085 /// from scratch for every loop. Hook up with the helper functions when 1086 /// available in the new pass manager to avoid redundant computation. 1087 AliasSetTracker * 1088 LoopInvariantCodeMotion::collectAliasInfoForLoop(Loop *L, LoopInfo *LI, 1089 AliasAnalysis *AA) { 1090 AliasSetTracker *CurAST = nullptr; 1091 SmallVector<Loop *, 4> RecomputeLoops; 1092 for (Loop *InnerL : L->getSubLoops()) { 1093 auto MapI = LoopToAliasSetMap.find(InnerL); 1094 // If the AST for this inner loop is missing it may have been merged into 1095 // some other loop's AST and then that loop unrolled, and so we need to 1096 // recompute it. 1097 if (MapI == LoopToAliasSetMap.end()) { 1098 RecomputeLoops.push_back(InnerL); 1099 continue; 1100 } 1101 AliasSetTracker *InnerAST = MapI->second; 1102 1103 if (CurAST != nullptr) { 1104 // What if InnerLoop was modified by other passes ? 1105 CurAST->add(*InnerAST); 1106 1107 // Once we've incorporated the inner loop's AST into ours, we don't need 1108 // the subloop's anymore. 1109 delete InnerAST; 1110 } else { 1111 CurAST = InnerAST; 1112 } 1113 LoopToAliasSetMap.erase(MapI); 1114 } 1115 if (CurAST == nullptr) 1116 CurAST = new AliasSetTracker(*AA); 1117 1118 auto mergeLoop = [&](Loop *L) { 1119 // Loop over the body of this loop, looking for calls, invokes, and stores. 1120 // Because subloops have already been incorporated into AST, we skip blocks 1121 // in subloops. 1122 for (BasicBlock *BB : L->blocks()) 1123 if (LI->getLoopFor(BB) == L) // Ignore blocks in subloops. 1124 CurAST->add(*BB); // Incorporate the specified basic block 1125 }; 1126 1127 // Add everything from the sub loops that are no longer directly available. 1128 for (Loop *InnerL : RecomputeLoops) 1129 mergeLoop(InnerL); 1130 1131 // And merge in this loop. 1132 mergeLoop(L); 1133 1134 return CurAST; 1135 } 1136 1137 /// Simple analysis hook. Clone alias set info. 1138 /// 1139 void LegacyLICMPass::cloneBasicBlockAnalysis(BasicBlock *From, BasicBlock *To, 1140 Loop *L) { 1141 AliasSetTracker *AST = LICM.getLoopToAliasSetMap().lookup(L); 1142 if (!AST) 1143 return; 1144 1145 AST->copyValue(From, To); 1146 } 1147 1148 /// Simple Analysis hook. Delete value V from alias set 1149 /// 1150 void LegacyLICMPass::deleteAnalysisValue(Value *V, Loop *L) { 1151 AliasSetTracker *AST = LICM.getLoopToAliasSetMap().lookup(L); 1152 if (!AST) 1153 return; 1154 1155 AST->deleteValue(V); 1156 } 1157 1158 /// Simple Analysis hook. Delete value L from alias set map. 1159 /// 1160 void LegacyLICMPass::deleteAnalysisLoop(Loop *L) { 1161 AliasSetTracker *AST = LICM.getLoopToAliasSetMap().lookup(L); 1162 if (!AST) 1163 return; 1164 1165 delete AST; 1166 LICM.getLoopToAliasSetMap().erase(L); 1167 } 1168 1169 /// Return true if the body of this loop may store into the memory 1170 /// location pointed to by V. 1171 /// 1172 static bool pointerInvalidatedByLoop(Value *V, uint64_t Size, 1173 const AAMDNodes &AAInfo, 1174 AliasSetTracker *CurAST) { 1175 // Check to see if any of the basic blocks in CurLoop invalidate *V. 1176 return CurAST->getAliasSetForPointer(V, Size, AAInfo).isMod(); 1177 } 1178 1179 /// Little predicate that returns true if the specified basic block is in 1180 /// a subloop of the current one, not the current one itself. 1181 /// 1182 static bool inSubLoop(BasicBlock *BB, Loop *CurLoop, LoopInfo *LI) { 1183 assert(CurLoop->contains(BB) && "Only valid if BB is IN the loop"); 1184 return LI->getLoopFor(BB) != CurLoop; 1185 } 1186