1 //===-- LICM.cpp - Loop Invariant Code Motion Pass ------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass performs loop invariant code motion, attempting to remove as much
10 // code from the body of a loop as possible.  It does this by either hoisting
11 // code into the preheader block, or by sinking code to the exit blocks if it is
12 // safe.  This pass also promotes must-aliased memory locations in the loop to
13 // live in registers, thus hoisting and sinking "invariant" loads and stores.
14 //
15 // Hoisting operations out of loops is a canonicalization transform.  It
16 // enables and simplifies subsequent optimizations in the middle-end.
17 // Rematerialization of hoisted instructions to reduce register pressure is the
18 // responsibility of the back-end, which has more accurate information about
19 // register pressure and also handles other optimizations than LICM that
20 // increase live-ranges.
21 //
22 // This pass uses alias analysis for two purposes:
23 //
24 //  1. Moving loop invariant loads and calls out of loops.  If we can determine
25 //     that a load or call inside of a loop never aliases anything stored to,
26 //     we can hoist it or sink it like any other instruction.
27 //  2. Scalar Promotion of Memory - If there is a store instruction inside of
28 //     the loop, we try to move the store to happen AFTER the loop instead of
29 //     inside of the loop.  This can only happen if a few conditions are true:
30 //       A. The pointer stored through is loop invariant
31 //       B. There are no stores or loads in the loop which _may_ alias the
32 //          pointer.  There are no calls in the loop which mod/ref the pointer.
33 //     If these conditions are true, we can promote the loads and stores in the
34 //     loop of the pointer to use a temporary alloca'd variable.  We then use
35 //     the SSAUpdater to construct the appropriate SSA form for the value.
36 //
37 //===----------------------------------------------------------------------===//
38 
39 #include "llvm/Transforms/Scalar/LICM.h"
40 #include "llvm/ADT/SetOperations.h"
41 #include "llvm/ADT/Statistic.h"
42 #include "llvm/Analysis/AliasAnalysis.h"
43 #include "llvm/Analysis/AliasSetTracker.h"
44 #include "llvm/Analysis/BasicAliasAnalysis.h"
45 #include "llvm/Analysis/BlockFrequencyInfo.h"
46 #include "llvm/Analysis/CaptureTracking.h"
47 #include "llvm/Analysis/ConstantFolding.h"
48 #include "llvm/Analysis/GlobalsModRef.h"
49 #include "llvm/Analysis/GuardUtils.h"
50 #include "llvm/Analysis/LazyBlockFrequencyInfo.h"
51 #include "llvm/Analysis/Loads.h"
52 #include "llvm/Analysis/LoopInfo.h"
53 #include "llvm/Analysis/LoopIterator.h"
54 #include "llvm/Analysis/LoopPass.h"
55 #include "llvm/Analysis/MemoryBuiltins.h"
56 #include "llvm/Analysis/MemorySSA.h"
57 #include "llvm/Analysis/MemorySSAUpdater.h"
58 #include "llvm/Analysis/MustExecute.h"
59 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
60 #include "llvm/Analysis/ScalarEvolution.h"
61 #include "llvm/Analysis/ScalarEvolutionAliasAnalysis.h"
62 #include "llvm/Analysis/TargetLibraryInfo.h"
63 #include "llvm/Analysis/ValueTracking.h"
64 #include "llvm/IR/CFG.h"
65 #include "llvm/IR/Constants.h"
66 #include "llvm/IR/DataLayout.h"
67 #include "llvm/IR/DebugInfoMetadata.h"
68 #include "llvm/IR/DerivedTypes.h"
69 #include "llvm/IR/Dominators.h"
70 #include "llvm/IR/Instructions.h"
71 #include "llvm/IR/IntrinsicInst.h"
72 #include "llvm/IR/LLVMContext.h"
73 #include "llvm/IR/Metadata.h"
74 #include "llvm/IR/PatternMatch.h"
75 #include "llvm/IR/PredIteratorCache.h"
76 #include "llvm/InitializePasses.h"
77 #include "llvm/Support/CommandLine.h"
78 #include "llvm/Support/Debug.h"
79 #include "llvm/Support/raw_ostream.h"
80 #include "llvm/Transforms/Scalar.h"
81 #include "llvm/Transforms/Scalar/LoopPassManager.h"
82 #include "llvm/Transforms/Utils/AssumeBundleBuilder.h"
83 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
84 #include "llvm/Transforms/Utils/Local.h"
85 #include "llvm/Transforms/Utils/LoopUtils.h"
86 #include "llvm/Transforms/Utils/SSAUpdater.h"
87 #include <algorithm>
88 #include <utility>
89 using namespace llvm;
90 
91 #define DEBUG_TYPE "licm"
92 
93 STATISTIC(NumCreatedBlocks, "Number of blocks created");
94 STATISTIC(NumClonedBranches, "Number of branches cloned");
95 STATISTIC(NumSunk, "Number of instructions sunk out of loop");
96 STATISTIC(NumHoisted, "Number of instructions hoisted out of loop");
97 STATISTIC(NumMovedLoads, "Number of load insts hoisted or sunk");
98 STATISTIC(NumMovedCalls, "Number of call insts hoisted or sunk");
99 STATISTIC(NumPromoted, "Number of memory locations promoted to registers");
100 
101 /// Memory promotion is enabled by default.
102 static cl::opt<bool>
103     DisablePromotion("disable-licm-promotion", cl::Hidden, cl::init(false),
104                      cl::desc("Disable memory promotion in LICM pass"));
105 
106 static cl::opt<bool> ControlFlowHoisting(
107     "licm-control-flow-hoisting", cl::Hidden, cl::init(false),
108     cl::desc("Enable control flow (and PHI) hoisting in LICM"));
109 
110 static cl::opt<unsigned> HoistSinkColdnessThreshold(
111     "licm-coldness-threshold", cl::Hidden, cl::init(4),
112     cl::desc("Relative coldness Threshold of hoisting/sinking destination "
113              "block for LICM to be considered beneficial"));
114 
115 static cl::opt<uint32_t> MaxNumUsesTraversed(
116     "licm-max-num-uses-traversed", cl::Hidden, cl::init(8),
117     cl::desc("Max num uses visited for identifying load "
118              "invariance in loop using invariant start (default = 8)"));
119 
120 // Default value of zero implies we use the regular alias set tracker mechanism
121 // instead of the cross product using AA to identify aliasing of the memory
122 // location we are interested in.
123 static cl::opt<int>
124 LICMN2Theshold("licm-n2-threshold", cl::Hidden, cl::init(0),
125                cl::desc("How many instruction to cross product using AA"));
126 
127 // Experimental option to allow imprecision in LICM in pathological cases, in
128 // exchange for faster compile. This is to be removed if MemorySSA starts to
129 // address the same issue. This flag applies only when LICM uses MemorySSA
130 // instead on AliasSetTracker. LICM calls MemorySSAWalker's
131 // getClobberingMemoryAccess, up to the value of the Cap, getting perfect
132 // accuracy. Afterwards, LICM will call into MemorySSA's getDefiningAccess,
133 // which may not be precise, since optimizeUses is capped. The result is
134 // correct, but we may not get as "far up" as possible to get which access is
135 // clobbering the one queried.
136 cl::opt<unsigned> llvm::SetLicmMssaOptCap(
137     "licm-mssa-optimization-cap", cl::init(100), cl::Hidden,
138     cl::desc("Enable imprecision in LICM in pathological cases, in exchange "
139              "for faster compile. Caps the MemorySSA clobbering calls."));
140 
141 // Experimentally, memory promotion carries less importance than sinking and
142 // hoisting. Limit when we do promotion when using MemorySSA, in order to save
143 // compile time.
144 cl::opt<unsigned> llvm::SetLicmMssaNoAccForPromotionCap(
145     "licm-mssa-max-acc-promotion", cl::init(250), cl::Hidden,
146     cl::desc("[LICM & MemorySSA] When MSSA in LICM is disabled, this has no "
147              "effect. When MSSA in LICM is enabled, then this is the maximum "
148              "number of accesses allowed to be present in a loop in order to "
149              "enable memory promotion."));
150 
151 static bool inSubLoop(BasicBlock *BB, Loop *CurLoop, LoopInfo *LI);
152 static bool isNotUsedOrFreeInLoop(const Instruction &I, const Loop *CurLoop,
153                                   const LoopSafetyInfo *SafetyInfo,
154                                   TargetTransformInfo *TTI, bool &FreeInLoop);
155 static void hoist(Instruction &I, const DominatorTree *DT, const Loop *CurLoop,
156                   BasicBlock *Dest, ICFLoopSafetyInfo *SafetyInfo,
157                   MemorySSAUpdater *MSSAU, ScalarEvolution *SE,
158                   OptimizationRemarkEmitter *ORE);
159 static bool sink(Instruction &I, LoopInfo *LI, DominatorTree *DT,
160                  BlockFrequencyInfo *BFI, const Loop *CurLoop,
161                  ICFLoopSafetyInfo *SafetyInfo, MemorySSAUpdater *MSSAU,
162                  OptimizationRemarkEmitter *ORE);
163 static bool isSafeToExecuteUnconditionally(Instruction &Inst,
164                                            const DominatorTree *DT,
165                                            const Loop *CurLoop,
166                                            const LoopSafetyInfo *SafetyInfo,
167                                            OptimizationRemarkEmitter *ORE,
168                                            const Instruction *CtxI = nullptr);
169 static bool pointerInvalidatedByLoop(MemoryLocation MemLoc,
170                                      AliasSetTracker *CurAST, Loop *CurLoop,
171                                      AAResults *AA);
172 static bool pointerInvalidatedByLoopWithMSSA(MemorySSA *MSSA, MemoryUse *MU,
173                                              Loop *CurLoop, Instruction &I,
174                                              SinkAndHoistLICMFlags &Flags);
175 static bool pointerInvalidatedByBlockWithMSSA(BasicBlock &BB, MemorySSA &MSSA,
176                                               MemoryUse &MU);
177 static Instruction *cloneInstructionInExitBlock(
178     Instruction &I, BasicBlock &ExitBlock, PHINode &PN, const LoopInfo *LI,
179     const LoopSafetyInfo *SafetyInfo, MemorySSAUpdater *MSSAU);
180 
181 static void eraseInstruction(Instruction &I, ICFLoopSafetyInfo &SafetyInfo,
182                              AliasSetTracker *AST, MemorySSAUpdater *MSSAU);
183 
184 static void moveInstructionBefore(Instruction &I, Instruction &Dest,
185                                   ICFLoopSafetyInfo &SafetyInfo,
186                                   MemorySSAUpdater *MSSAU, ScalarEvolution *SE);
187 
188 static void foreachMemoryAccess(MemorySSA *MSSA, Loop *L,
189                                 function_ref<void(Instruction *)> Fn);
190 static SmallVector<SmallSetVector<Value *, 8>, 0>
191 collectPromotionCandidates(MemorySSA *MSSA, AliasAnalysis *AA, Loop *L,
192                            SmallVectorImpl<Instruction *> &MaybePromotable);
193 
194 namespace {
195 struct LoopInvariantCodeMotion {
196   bool runOnLoop(Loop *L, AAResults *AA, LoopInfo *LI, DominatorTree *DT,
197                  BlockFrequencyInfo *BFI, TargetLibraryInfo *TLI,
198                  TargetTransformInfo *TTI, ScalarEvolution *SE, MemorySSA *MSSA,
199                  OptimizationRemarkEmitter *ORE);
200 
201   LoopInvariantCodeMotion(unsigned LicmMssaOptCap,
202                           unsigned LicmMssaNoAccForPromotionCap)
203       : LicmMssaOptCap(LicmMssaOptCap),
204         LicmMssaNoAccForPromotionCap(LicmMssaNoAccForPromotionCap) {}
205 
206 private:
207   unsigned LicmMssaOptCap;
208   unsigned LicmMssaNoAccForPromotionCap;
209 
210   std::unique_ptr<AliasSetTracker>
211   collectAliasInfoForLoop(Loop *L, LoopInfo *LI, AAResults *AA);
212 };
213 
214 struct LegacyLICMPass : public LoopPass {
215   static char ID; // Pass identification, replacement for typeid
216   LegacyLICMPass(
217       unsigned LicmMssaOptCap = SetLicmMssaOptCap,
218       unsigned LicmMssaNoAccForPromotionCap = SetLicmMssaNoAccForPromotionCap)
219       : LoopPass(ID), LICM(LicmMssaOptCap, LicmMssaNoAccForPromotionCap) {
220     initializeLegacyLICMPassPass(*PassRegistry::getPassRegistry());
221   }
222 
223   bool runOnLoop(Loop *L, LPPassManager &LPM) override {
224     if (skipLoop(L))
225       return false;
226 
227     LLVM_DEBUG(dbgs() << "Perform LICM on Loop with header at block "
228                       << L->getHeader()->getNameOrAsOperand() << "\n");
229 
230     auto *SE = getAnalysisIfAvailable<ScalarEvolutionWrapperPass>();
231     MemorySSA *MSSA = EnableMSSALoopDependency
232                           ? (&getAnalysis<MemorySSAWrapperPass>().getMSSA())
233                           : nullptr;
234     bool hasProfileData = L->getHeader()->getParent()->hasProfileData();
235     BlockFrequencyInfo *BFI =
236         hasProfileData ? &getAnalysis<LazyBlockFrequencyInfoPass>().getBFI()
237                        : nullptr;
238     // For the old PM, we can't use OptimizationRemarkEmitter as an analysis
239     // pass. Function analyses need to be preserved across loop transformations
240     // but ORE cannot be preserved (see comment before the pass definition).
241     OptimizationRemarkEmitter ORE(L->getHeader()->getParent());
242     return LICM.runOnLoop(
243         L, &getAnalysis<AAResultsWrapperPass>().getAAResults(),
244         &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(),
245         &getAnalysis<DominatorTreeWrapperPass>().getDomTree(), BFI,
246         &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(
247             *L->getHeader()->getParent()),
248         &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(
249             *L->getHeader()->getParent()),
250         SE ? &SE->getSE() : nullptr, MSSA, &ORE);
251   }
252 
253   /// This transformation requires natural loop information & requires that
254   /// loop preheaders be inserted into the CFG...
255   ///
256   void getAnalysisUsage(AnalysisUsage &AU) const override {
257     AU.addPreserved<DominatorTreeWrapperPass>();
258     AU.addPreserved<LoopInfoWrapperPass>();
259     AU.addRequired<TargetLibraryInfoWrapperPass>();
260     if (EnableMSSALoopDependency) {
261       AU.addRequired<MemorySSAWrapperPass>();
262       AU.addPreserved<MemorySSAWrapperPass>();
263     }
264     AU.addRequired<TargetTransformInfoWrapperPass>();
265     getLoopAnalysisUsage(AU);
266     LazyBlockFrequencyInfoPass::getLazyBFIAnalysisUsage(AU);
267     AU.addPreserved<LazyBlockFrequencyInfoPass>();
268     AU.addPreserved<LazyBranchProbabilityInfoPass>();
269   }
270 
271 private:
272   LoopInvariantCodeMotion LICM;
273 };
274 } // namespace
275 
276 PreservedAnalyses LICMPass::run(Loop &L, LoopAnalysisManager &AM,
277                                 LoopStandardAnalysisResults &AR, LPMUpdater &) {
278   // For the new PM, we also can't use OptimizationRemarkEmitter as an analysis
279   // pass.  Function analyses need to be preserved across loop transformations
280   // but ORE cannot be preserved (see comment before the pass definition).
281   OptimizationRemarkEmitter ORE(L.getHeader()->getParent());
282 
283   LoopInvariantCodeMotion LICM(LicmMssaOptCap, LicmMssaNoAccForPromotionCap);
284   if (!LICM.runOnLoop(&L, &AR.AA, &AR.LI, &AR.DT, AR.BFI, &AR.TLI, &AR.TTI,
285                       &AR.SE, AR.MSSA, &ORE))
286     return PreservedAnalyses::all();
287 
288   auto PA = getLoopPassPreservedAnalyses();
289 
290   PA.preserve<DominatorTreeAnalysis>();
291   PA.preserve<LoopAnalysis>();
292   if (AR.MSSA)
293     PA.preserve<MemorySSAAnalysis>();
294 
295   return PA;
296 }
297 
298 char LegacyLICMPass::ID = 0;
299 INITIALIZE_PASS_BEGIN(LegacyLICMPass, "licm", "Loop Invariant Code Motion",
300                       false, false)
301 INITIALIZE_PASS_DEPENDENCY(LoopPass)
302 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
303 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
304 INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass)
305 INITIALIZE_PASS_DEPENDENCY(LazyBFIPass)
306 INITIALIZE_PASS_END(LegacyLICMPass, "licm", "Loop Invariant Code Motion", false,
307                     false)
308 
309 Pass *llvm::createLICMPass() { return new LegacyLICMPass(); }
310 Pass *llvm::createLICMPass(unsigned LicmMssaOptCap,
311                            unsigned LicmMssaNoAccForPromotionCap) {
312   return new LegacyLICMPass(LicmMssaOptCap, LicmMssaNoAccForPromotionCap);
313 }
314 
315 llvm::SinkAndHoistLICMFlags::SinkAndHoistLICMFlags(bool IsSink, Loop *L,
316                                                    MemorySSA *MSSA)
317     : SinkAndHoistLICMFlags(SetLicmMssaOptCap, SetLicmMssaNoAccForPromotionCap,
318                             IsSink, L, MSSA) {}
319 
320 llvm::SinkAndHoistLICMFlags::SinkAndHoistLICMFlags(
321     unsigned LicmMssaOptCap, unsigned LicmMssaNoAccForPromotionCap, bool IsSink,
322     Loop *L, MemorySSA *MSSA)
323     : LicmMssaOptCap(LicmMssaOptCap),
324       LicmMssaNoAccForPromotionCap(LicmMssaNoAccForPromotionCap),
325       IsSink(IsSink) {
326   assert(((L != nullptr) == (MSSA != nullptr)) &&
327          "Unexpected values for SinkAndHoistLICMFlags");
328   if (!MSSA)
329     return;
330 
331   unsigned AccessCapCount = 0;
332   for (auto *BB : L->getBlocks())
333     if (const auto *Accesses = MSSA->getBlockAccesses(BB))
334       for (const auto &MA : *Accesses) {
335         (void)MA;
336         ++AccessCapCount;
337         if (AccessCapCount > LicmMssaNoAccForPromotionCap) {
338           NoOfMemAccTooLarge = true;
339           return;
340         }
341       }
342 }
343 
344 /// Hoist expressions out of the specified loop. Note, alias info for inner
345 /// loop is not preserved so it is not a good idea to run LICM multiple
346 /// times on one loop.
347 bool LoopInvariantCodeMotion::runOnLoop(
348     Loop *L, AAResults *AA, LoopInfo *LI, DominatorTree *DT,
349     BlockFrequencyInfo *BFI, TargetLibraryInfo *TLI, TargetTransformInfo *TTI,
350     ScalarEvolution *SE, MemorySSA *MSSA, OptimizationRemarkEmitter *ORE) {
351   bool Changed = false;
352 
353   assert(L->isLCSSAForm(*DT) && "Loop is not in LCSSA form.");
354 
355   // If this loop has metadata indicating that LICM is not to be performed then
356   // just exit.
357   if (hasDisableLICMTransformsHint(L)) {
358     return false;
359   }
360 
361   std::unique_ptr<AliasSetTracker> CurAST;
362   std::unique_ptr<MemorySSAUpdater> MSSAU;
363   std::unique_ptr<SinkAndHoistLICMFlags> Flags;
364 
365   // Don't sink stores from loops with coroutine suspend instructions.
366   // LICM would sink instructions into the default destination of
367   // the coroutine switch. The default destination of the switch is to
368   // handle the case where the coroutine is suspended, by which point the
369   // coroutine frame may have been destroyed. No instruction can be sunk there.
370   // FIXME: This would unfortunately hurt the performance of coroutines, however
371   // there is currently no general solution for this. Similar issues could also
372   // potentially happen in other passes where instructions are being moved
373   // across that edge.
374   bool HasCoroSuspendInst = llvm::any_of(L->getBlocks(), [](BasicBlock *BB) {
375     return llvm::any_of(*BB, [](Instruction &I) {
376       IntrinsicInst *II = dyn_cast<IntrinsicInst>(&I);
377       return II && II->getIntrinsicID() == Intrinsic::coro_suspend;
378     });
379   });
380 
381   if (!MSSA) {
382     LLVM_DEBUG(dbgs() << "LICM: Using Alias Set Tracker.\n");
383     CurAST = collectAliasInfoForLoop(L, LI, AA);
384     Flags = std::make_unique<SinkAndHoistLICMFlags>(
385         LicmMssaOptCap, LicmMssaNoAccForPromotionCap, /*IsSink=*/true);
386   } else {
387     LLVM_DEBUG(dbgs() << "LICM: Using MemorySSA.\n");
388     MSSAU = std::make_unique<MemorySSAUpdater>(MSSA);
389     Flags = std::make_unique<SinkAndHoistLICMFlags>(
390         LicmMssaOptCap, LicmMssaNoAccForPromotionCap, /*IsSink=*/true, L, MSSA);
391   }
392 
393   // Get the preheader block to move instructions into...
394   BasicBlock *Preheader = L->getLoopPreheader();
395 
396   // Compute loop safety information.
397   ICFLoopSafetyInfo SafetyInfo;
398   SafetyInfo.computeLoopSafetyInfo(L);
399 
400   // We want to visit all of the instructions in this loop... that are not parts
401   // of our subloops (they have already had their invariants hoisted out of
402   // their loop, into this loop, so there is no need to process the BODIES of
403   // the subloops).
404   //
405   // Traverse the body of the loop in depth first order on the dominator tree so
406   // that we are guaranteed to see definitions before we see uses.  This allows
407   // us to sink instructions in one pass, without iteration.  After sinking
408   // instructions, we perform another pass to hoist them out of the loop.
409   if (L->hasDedicatedExits())
410     Changed |=
411         sinkRegion(DT->getNode(L->getHeader()), AA, LI, DT, BFI, TLI, TTI, L,
412                    CurAST.get(), MSSAU.get(), &SafetyInfo, *Flags.get(), ORE);
413   Flags->setIsSink(false);
414   if (Preheader)
415     Changed |= hoistRegion(DT->getNode(L->getHeader()), AA, LI, DT, BFI, TLI, L,
416                            CurAST.get(), MSSAU.get(), SE, &SafetyInfo,
417                            *Flags.get(), ORE);
418 
419   // Now that all loop invariants have been removed from the loop, promote any
420   // memory references to scalars that we can.
421   // Don't sink stores from loops without dedicated block exits. Exits
422   // containing indirect branches are not transformed by loop simplify,
423   // make sure we catch that. An additional load may be generated in the
424   // preheader for SSA updater, so also avoid sinking when no preheader
425   // is available.
426   if (!DisablePromotion && Preheader && L->hasDedicatedExits() &&
427       !Flags->tooManyMemoryAccesses() && !HasCoroSuspendInst) {
428     // Figure out the loop exits and their insertion points
429     SmallVector<BasicBlock *, 8> ExitBlocks;
430     L->getUniqueExitBlocks(ExitBlocks);
431 
432     // We can't insert into a catchswitch.
433     bool HasCatchSwitch = llvm::any_of(ExitBlocks, [](BasicBlock *Exit) {
434       return isa<CatchSwitchInst>(Exit->getTerminator());
435     });
436 
437     if (!HasCatchSwitch) {
438       SmallVector<Instruction *, 8> InsertPts;
439       SmallVector<MemoryAccess *, 8> MSSAInsertPts;
440       InsertPts.reserve(ExitBlocks.size());
441       if (MSSAU)
442         MSSAInsertPts.reserve(ExitBlocks.size());
443       for (BasicBlock *ExitBlock : ExitBlocks) {
444         InsertPts.push_back(&*ExitBlock->getFirstInsertionPt());
445         if (MSSAU)
446           MSSAInsertPts.push_back(nullptr);
447       }
448 
449       PredIteratorCache PIC;
450 
451       bool Promoted = false;
452       if (CurAST.get()) {
453         // Loop over all of the alias sets in the tracker object.
454         for (AliasSet &AS : *CurAST) {
455           // We can promote this alias set if it has a store, if it is a "Must"
456           // alias set, if the pointer is loop invariant, and if we are not
457           // eliminating any volatile loads or stores.
458           if (AS.isForwardingAliasSet() || !AS.isMod() || !AS.isMustAlias() ||
459               !L->isLoopInvariant(AS.begin()->getValue()))
460             continue;
461 
462           assert(
463               !AS.empty() &&
464               "Must alias set should have at least one pointer element in it!");
465 
466           SmallSetVector<Value *, 8> PointerMustAliases;
467           for (const auto &ASI : AS)
468             PointerMustAliases.insert(ASI.getValue());
469 
470           Promoted |= promoteLoopAccessesToScalars(
471               PointerMustAliases, ExitBlocks, InsertPts, MSSAInsertPts, PIC, LI,
472               DT, TLI, L, CurAST.get(), MSSAU.get(), &SafetyInfo, ORE);
473         }
474       } else {
475         SmallVector<Instruction *, 16> MaybePromotable;
476         foreachMemoryAccess(MSSA, L, [&](Instruction *I) {
477           MaybePromotable.push_back(I);
478         });
479 
480         // Promoting one set of accesses may make the pointers for another set
481         // loop invariant, so run this in a loop (with the MaybePromotable set
482         // decreasing in size over time).
483         bool LocalPromoted;
484         do {
485           LocalPromoted = false;
486           for (const SmallSetVector<Value *, 8> &PointerMustAliases :
487                collectPromotionCandidates(MSSA, AA, L, MaybePromotable)) {
488             LocalPromoted |= promoteLoopAccessesToScalars(
489                 PointerMustAliases, ExitBlocks, InsertPts, MSSAInsertPts, PIC,
490                 LI, DT, TLI, L, /*AST*/nullptr, MSSAU.get(), &SafetyInfo, ORE);
491           }
492           Promoted |= LocalPromoted;
493         } while (LocalPromoted);
494       }
495 
496       // Once we have promoted values across the loop body we have to
497       // recursively reform LCSSA as any nested loop may now have values defined
498       // within the loop used in the outer loop.
499       // FIXME: This is really heavy handed. It would be a bit better to use an
500       // SSAUpdater strategy during promotion that was LCSSA aware and reformed
501       // it as it went.
502       if (Promoted)
503         formLCSSARecursively(*L, *DT, LI, SE);
504 
505       Changed |= Promoted;
506     }
507   }
508 
509   // Check that neither this loop nor its parent have had LCSSA broken. LICM is
510   // specifically moving instructions across the loop boundary and so it is
511   // especially in need of sanity checking here.
512   assert(L->isLCSSAForm(*DT) && "Loop not left in LCSSA form after LICM!");
513   assert((L->isOutermost() || L->getParentLoop()->isLCSSAForm(*DT)) &&
514          "Parent loop not left in LCSSA form after LICM!");
515 
516   if (MSSAU.get() && VerifyMemorySSA)
517     MSSAU->getMemorySSA()->verifyMemorySSA();
518 
519   if (Changed && SE)
520     SE->forgetLoopDispositions(L);
521   return Changed;
522 }
523 
524 /// Walk the specified region of the CFG (defined by all blocks dominated by
525 /// the specified block, and that are in the current loop) in reverse depth
526 /// first order w.r.t the DominatorTree.  This allows us to visit uses before
527 /// definitions, allowing us to sink a loop body in one pass without iteration.
528 ///
529 bool llvm::sinkRegion(DomTreeNode *N, AAResults *AA, LoopInfo *LI,
530                       DominatorTree *DT, BlockFrequencyInfo *BFI,
531                       TargetLibraryInfo *TLI, TargetTransformInfo *TTI,
532                       Loop *CurLoop, AliasSetTracker *CurAST,
533                       MemorySSAUpdater *MSSAU, ICFLoopSafetyInfo *SafetyInfo,
534                       SinkAndHoistLICMFlags &Flags,
535                       OptimizationRemarkEmitter *ORE) {
536 
537   // Verify inputs.
538   assert(N != nullptr && AA != nullptr && LI != nullptr && DT != nullptr &&
539          CurLoop != nullptr && SafetyInfo != nullptr &&
540          "Unexpected input to sinkRegion.");
541   assert(((CurAST != nullptr) ^ (MSSAU != nullptr)) &&
542          "Either AliasSetTracker or MemorySSA should be initialized.");
543 
544   // We want to visit children before parents. We will enque all the parents
545   // before their children in the worklist and process the worklist in reverse
546   // order.
547   SmallVector<DomTreeNode *, 16> Worklist = collectChildrenInLoop(N, CurLoop);
548 
549   bool Changed = false;
550   for (DomTreeNode *DTN : reverse(Worklist)) {
551     BasicBlock *BB = DTN->getBlock();
552     // Only need to process the contents of this block if it is not part of a
553     // subloop (which would already have been processed).
554     if (inSubLoop(BB, CurLoop, LI))
555       continue;
556 
557     for (BasicBlock::iterator II = BB->end(); II != BB->begin();) {
558       Instruction &I = *--II;
559 
560       // If the instruction is dead, we would try to sink it because it isn't
561       // used in the loop, instead, just delete it.
562       if (isInstructionTriviallyDead(&I, TLI)) {
563         LLVM_DEBUG(dbgs() << "LICM deleting dead inst: " << I << '\n');
564         salvageKnowledge(&I);
565         salvageDebugInfo(I);
566         ++II;
567         eraseInstruction(I, *SafetyInfo, CurAST, MSSAU);
568         Changed = true;
569         continue;
570       }
571 
572       // Check to see if we can sink this instruction to the exit blocks
573       // of the loop.  We can do this if the all users of the instruction are
574       // outside of the loop.  In this case, it doesn't even matter if the
575       // operands of the instruction are loop invariant.
576       //
577       bool FreeInLoop = false;
578       if (!I.mayHaveSideEffects() &&
579           isNotUsedOrFreeInLoop(I, CurLoop, SafetyInfo, TTI, FreeInLoop) &&
580           canSinkOrHoistInst(I, AA, DT, CurLoop, CurAST, MSSAU, true, &Flags,
581                              ORE)) {
582         if (sink(I, LI, DT, BFI, CurLoop, SafetyInfo, MSSAU, ORE)) {
583           if (!FreeInLoop) {
584             ++II;
585             salvageDebugInfo(I);
586             eraseInstruction(I, *SafetyInfo, CurAST, MSSAU);
587           }
588           Changed = true;
589         }
590       }
591     }
592   }
593   if (MSSAU && VerifyMemorySSA)
594     MSSAU->getMemorySSA()->verifyMemorySSA();
595   return Changed;
596 }
597 
598 namespace {
599 // This is a helper class for hoistRegion to make it able to hoist control flow
600 // in order to be able to hoist phis. The way this works is that we initially
601 // start hoisting to the loop preheader, and when we see a loop invariant branch
602 // we make note of this. When we then come to hoist an instruction that's
603 // conditional on such a branch we duplicate the branch and the relevant control
604 // flow, then hoist the instruction into the block corresponding to its original
605 // block in the duplicated control flow.
606 class ControlFlowHoister {
607 private:
608   // Information about the loop we are hoisting from
609   LoopInfo *LI;
610   DominatorTree *DT;
611   Loop *CurLoop;
612   MemorySSAUpdater *MSSAU;
613 
614   // A map of blocks in the loop to the block their instructions will be hoisted
615   // to.
616   DenseMap<BasicBlock *, BasicBlock *> HoistDestinationMap;
617 
618   // The branches that we can hoist, mapped to the block that marks a
619   // convergence point of their control flow.
620   DenseMap<BranchInst *, BasicBlock *> HoistableBranches;
621 
622 public:
623   ControlFlowHoister(LoopInfo *LI, DominatorTree *DT, Loop *CurLoop,
624                      MemorySSAUpdater *MSSAU)
625       : LI(LI), DT(DT), CurLoop(CurLoop), MSSAU(MSSAU) {}
626 
627   void registerPossiblyHoistableBranch(BranchInst *BI) {
628     // We can only hoist conditional branches with loop invariant operands.
629     if (!ControlFlowHoisting || !BI->isConditional() ||
630         !CurLoop->hasLoopInvariantOperands(BI))
631       return;
632 
633     // The branch destinations need to be in the loop, and we don't gain
634     // anything by duplicating conditional branches with duplicate successors,
635     // as it's essentially the same as an unconditional branch.
636     BasicBlock *TrueDest = BI->getSuccessor(0);
637     BasicBlock *FalseDest = BI->getSuccessor(1);
638     if (!CurLoop->contains(TrueDest) || !CurLoop->contains(FalseDest) ||
639         TrueDest == FalseDest)
640       return;
641 
642     // We can hoist BI if one branch destination is the successor of the other,
643     // or both have common successor which we check by seeing if the
644     // intersection of their successors is non-empty.
645     // TODO: This could be expanded to allowing branches where both ends
646     // eventually converge to a single block.
647     SmallPtrSet<BasicBlock *, 4> TrueDestSucc, FalseDestSucc;
648     TrueDestSucc.insert(succ_begin(TrueDest), succ_end(TrueDest));
649     FalseDestSucc.insert(succ_begin(FalseDest), succ_end(FalseDest));
650     BasicBlock *CommonSucc = nullptr;
651     if (TrueDestSucc.count(FalseDest)) {
652       CommonSucc = FalseDest;
653     } else if (FalseDestSucc.count(TrueDest)) {
654       CommonSucc = TrueDest;
655     } else {
656       set_intersect(TrueDestSucc, FalseDestSucc);
657       // If there's one common successor use that.
658       if (TrueDestSucc.size() == 1)
659         CommonSucc = *TrueDestSucc.begin();
660       // If there's more than one pick whichever appears first in the block list
661       // (we can't use the value returned by TrueDestSucc.begin() as it's
662       // unpredicatable which element gets returned).
663       else if (!TrueDestSucc.empty()) {
664         Function *F = TrueDest->getParent();
665         auto IsSucc = [&](BasicBlock &BB) { return TrueDestSucc.count(&BB); };
666         auto It = llvm::find_if(*F, IsSucc);
667         assert(It != F->end() && "Could not find successor in function");
668         CommonSucc = &*It;
669       }
670     }
671     // The common successor has to be dominated by the branch, as otherwise
672     // there will be some other path to the successor that will not be
673     // controlled by this branch so any phi we hoist would be controlled by the
674     // wrong condition. This also takes care of avoiding hoisting of loop back
675     // edges.
676     // TODO: In some cases this could be relaxed if the successor is dominated
677     // by another block that's been hoisted and we can guarantee that the
678     // control flow has been replicated exactly.
679     if (CommonSucc && DT->dominates(BI, CommonSucc))
680       HoistableBranches[BI] = CommonSucc;
681   }
682 
683   bool canHoistPHI(PHINode *PN) {
684     // The phi must have loop invariant operands.
685     if (!ControlFlowHoisting || !CurLoop->hasLoopInvariantOperands(PN))
686       return false;
687     // We can hoist phis if the block they are in is the target of hoistable
688     // branches which cover all of the predecessors of the block.
689     SmallPtrSet<BasicBlock *, 8> PredecessorBlocks;
690     BasicBlock *BB = PN->getParent();
691     for (BasicBlock *PredBB : predecessors(BB))
692       PredecessorBlocks.insert(PredBB);
693     // If we have less predecessor blocks than predecessors then the phi will
694     // have more than one incoming value for the same block which we can't
695     // handle.
696     // TODO: This could be handled be erasing some of the duplicate incoming
697     // values.
698     if (PredecessorBlocks.size() != pred_size(BB))
699       return false;
700     for (auto &Pair : HoistableBranches) {
701       if (Pair.second == BB) {
702         // Which blocks are predecessors via this branch depends on if the
703         // branch is triangle-like or diamond-like.
704         if (Pair.first->getSuccessor(0) == BB) {
705           PredecessorBlocks.erase(Pair.first->getParent());
706           PredecessorBlocks.erase(Pair.first->getSuccessor(1));
707         } else if (Pair.first->getSuccessor(1) == BB) {
708           PredecessorBlocks.erase(Pair.first->getParent());
709           PredecessorBlocks.erase(Pair.first->getSuccessor(0));
710         } else {
711           PredecessorBlocks.erase(Pair.first->getSuccessor(0));
712           PredecessorBlocks.erase(Pair.first->getSuccessor(1));
713         }
714       }
715     }
716     // PredecessorBlocks will now be empty if for every predecessor of BB we
717     // found a hoistable branch source.
718     return PredecessorBlocks.empty();
719   }
720 
721   BasicBlock *getOrCreateHoistedBlock(BasicBlock *BB) {
722     if (!ControlFlowHoisting)
723       return CurLoop->getLoopPreheader();
724     // If BB has already been hoisted, return that
725     if (HoistDestinationMap.count(BB))
726       return HoistDestinationMap[BB];
727 
728     // Check if this block is conditional based on a pending branch
729     auto HasBBAsSuccessor =
730         [&](DenseMap<BranchInst *, BasicBlock *>::value_type &Pair) {
731           return BB != Pair.second && (Pair.first->getSuccessor(0) == BB ||
732                                        Pair.first->getSuccessor(1) == BB);
733         };
734     auto It = llvm::find_if(HoistableBranches, HasBBAsSuccessor);
735 
736     // If not involved in a pending branch, hoist to preheader
737     BasicBlock *InitialPreheader = CurLoop->getLoopPreheader();
738     if (It == HoistableBranches.end()) {
739       LLVM_DEBUG(dbgs() << "LICM using "
740                         << InitialPreheader->getNameOrAsOperand()
741                         << " as hoist destination for "
742                         << BB->getNameOrAsOperand() << "\n");
743       HoistDestinationMap[BB] = InitialPreheader;
744       return InitialPreheader;
745     }
746     BranchInst *BI = It->first;
747     assert(std::find_if(++It, HoistableBranches.end(), HasBBAsSuccessor) ==
748                HoistableBranches.end() &&
749            "BB is expected to be the target of at most one branch");
750 
751     LLVMContext &C = BB->getContext();
752     BasicBlock *TrueDest = BI->getSuccessor(0);
753     BasicBlock *FalseDest = BI->getSuccessor(1);
754     BasicBlock *CommonSucc = HoistableBranches[BI];
755     BasicBlock *HoistTarget = getOrCreateHoistedBlock(BI->getParent());
756 
757     // Create hoisted versions of blocks that currently don't have them
758     auto CreateHoistedBlock = [&](BasicBlock *Orig) {
759       if (HoistDestinationMap.count(Orig))
760         return HoistDestinationMap[Orig];
761       BasicBlock *New =
762           BasicBlock::Create(C, Orig->getName() + ".licm", Orig->getParent());
763       HoistDestinationMap[Orig] = New;
764       DT->addNewBlock(New, HoistTarget);
765       if (CurLoop->getParentLoop())
766         CurLoop->getParentLoop()->addBasicBlockToLoop(New, *LI);
767       ++NumCreatedBlocks;
768       LLVM_DEBUG(dbgs() << "LICM created " << New->getName()
769                         << " as hoist destination for " << Orig->getName()
770                         << "\n");
771       return New;
772     };
773     BasicBlock *HoistTrueDest = CreateHoistedBlock(TrueDest);
774     BasicBlock *HoistFalseDest = CreateHoistedBlock(FalseDest);
775     BasicBlock *HoistCommonSucc = CreateHoistedBlock(CommonSucc);
776 
777     // Link up these blocks with branches.
778     if (!HoistCommonSucc->getTerminator()) {
779       // The new common successor we've generated will branch to whatever that
780       // hoist target branched to.
781       BasicBlock *TargetSucc = HoistTarget->getSingleSuccessor();
782       assert(TargetSucc && "Expected hoist target to have a single successor");
783       HoistCommonSucc->moveBefore(TargetSucc);
784       BranchInst::Create(TargetSucc, HoistCommonSucc);
785     }
786     if (!HoistTrueDest->getTerminator()) {
787       HoistTrueDest->moveBefore(HoistCommonSucc);
788       BranchInst::Create(HoistCommonSucc, HoistTrueDest);
789     }
790     if (!HoistFalseDest->getTerminator()) {
791       HoistFalseDest->moveBefore(HoistCommonSucc);
792       BranchInst::Create(HoistCommonSucc, HoistFalseDest);
793     }
794 
795     // If BI is being cloned to what was originally the preheader then
796     // HoistCommonSucc will now be the new preheader.
797     if (HoistTarget == InitialPreheader) {
798       // Phis in the loop header now need to use the new preheader.
799       InitialPreheader->replaceSuccessorsPhiUsesWith(HoistCommonSucc);
800       if (MSSAU)
801         MSSAU->wireOldPredecessorsToNewImmediatePredecessor(
802             HoistTarget->getSingleSuccessor(), HoistCommonSucc, {HoistTarget});
803       // The new preheader dominates the loop header.
804       DomTreeNode *PreheaderNode = DT->getNode(HoistCommonSucc);
805       DomTreeNode *HeaderNode = DT->getNode(CurLoop->getHeader());
806       DT->changeImmediateDominator(HeaderNode, PreheaderNode);
807       // The preheader hoist destination is now the new preheader, with the
808       // exception of the hoist destination of this branch.
809       for (auto &Pair : HoistDestinationMap)
810         if (Pair.second == InitialPreheader && Pair.first != BI->getParent())
811           Pair.second = HoistCommonSucc;
812     }
813 
814     // Now finally clone BI.
815     ReplaceInstWithInst(
816         HoistTarget->getTerminator(),
817         BranchInst::Create(HoistTrueDest, HoistFalseDest, BI->getCondition()));
818     ++NumClonedBranches;
819 
820     assert(CurLoop->getLoopPreheader() &&
821            "Hoisting blocks should not have destroyed preheader");
822     return HoistDestinationMap[BB];
823   }
824 };
825 } // namespace
826 
827 // Hoisting/sinking instruction out of a loop isn't always beneficial. It's only
828 // only worthwhile if the destination block is actually colder than current
829 // block.
830 static bool worthSinkOrHoistInst(Instruction &I, BasicBlock *DstBlock,
831                                  OptimizationRemarkEmitter *ORE,
832                                  BlockFrequencyInfo *BFI) {
833   // Check block frequency only when runtime profile is available
834   // to avoid pathological cases. With static profile, lean towards
835   // hosting because it helps canonicalize the loop for vectorizer.
836   if (!DstBlock->getParent()->hasProfileData())
837     return true;
838 
839   if (!HoistSinkColdnessThreshold || !BFI)
840     return true;
841 
842   BasicBlock *SrcBlock = I.getParent();
843   if (BFI->getBlockFreq(DstBlock).getFrequency() / HoistSinkColdnessThreshold >
844       BFI->getBlockFreq(SrcBlock).getFrequency()) {
845     ORE->emit([&]() {
846       return OptimizationRemarkMissed(DEBUG_TYPE, "SinkHoistInst", &I)
847              << "failed to sink or hoist instruction because containing block "
848                 "has lower frequency than destination block";
849     });
850     return false;
851   }
852 
853   return true;
854 }
855 
856 /// Walk the specified region of the CFG (defined by all blocks dominated by
857 /// the specified block, and that are in the current loop) in depth first
858 /// order w.r.t the DominatorTree.  This allows us to visit definitions before
859 /// uses, allowing us to hoist a loop body in one pass without iteration.
860 ///
861 bool llvm::hoistRegion(DomTreeNode *N, AAResults *AA, LoopInfo *LI,
862                        DominatorTree *DT, BlockFrequencyInfo *BFI,
863                        TargetLibraryInfo *TLI, Loop *CurLoop,
864                        AliasSetTracker *CurAST, MemorySSAUpdater *MSSAU,
865                        ScalarEvolution *SE, ICFLoopSafetyInfo *SafetyInfo,
866                        SinkAndHoistLICMFlags &Flags,
867                        OptimizationRemarkEmitter *ORE) {
868   // Verify inputs.
869   assert(N != nullptr && AA != nullptr && LI != nullptr && DT != nullptr &&
870          CurLoop != nullptr && SafetyInfo != nullptr &&
871          "Unexpected input to hoistRegion.");
872   assert(((CurAST != nullptr) ^ (MSSAU != nullptr)) &&
873          "Either AliasSetTracker or MemorySSA should be initialized.");
874 
875   ControlFlowHoister CFH(LI, DT, CurLoop, MSSAU);
876 
877   // Keep track of instructions that have been hoisted, as they may need to be
878   // re-hoisted if they end up not dominating all of their uses.
879   SmallVector<Instruction *, 16> HoistedInstructions;
880 
881   // For PHI hoisting to work we need to hoist blocks before their successors.
882   // We can do this by iterating through the blocks in the loop in reverse
883   // post-order.
884   LoopBlocksRPO Worklist(CurLoop);
885   Worklist.perform(LI);
886   bool Changed = false;
887   for (BasicBlock *BB : Worklist) {
888     // Only need to process the contents of this block if it is not part of a
889     // subloop (which would already have been processed).
890     if (inSubLoop(BB, CurLoop, LI))
891       continue;
892 
893     for (BasicBlock::iterator II = BB->begin(), E = BB->end(); II != E;) {
894       Instruction &I = *II++;
895       // Try constant folding this instruction.  If all the operands are
896       // constants, it is technically hoistable, but it would be better to
897       // just fold it.
898       if (Constant *C = ConstantFoldInstruction(
899               &I, I.getModule()->getDataLayout(), TLI)) {
900         LLVM_DEBUG(dbgs() << "LICM folding inst: " << I << "  --> " << *C
901                           << '\n');
902         if (CurAST)
903           CurAST->copyValue(&I, C);
904         // FIXME MSSA: Such replacements may make accesses unoptimized (D51960).
905         I.replaceAllUsesWith(C);
906         if (isInstructionTriviallyDead(&I, TLI))
907           eraseInstruction(I, *SafetyInfo, CurAST, MSSAU);
908         Changed = true;
909         continue;
910       }
911 
912       // Try hoisting the instruction out to the preheader.  We can only do
913       // this if all of the operands of the instruction are loop invariant and
914       // if it is safe to hoist the instruction. We also check block frequency
915       // to make sure instruction only gets hoisted into colder blocks.
916       // TODO: It may be safe to hoist if we are hoisting to a conditional block
917       // and we have accurately duplicated the control flow from the loop header
918       // to that block.
919       if (CurLoop->hasLoopInvariantOperands(&I) &&
920           canSinkOrHoistInst(I, AA, DT, CurLoop, CurAST, MSSAU, true, &Flags,
921                              ORE) &&
922           worthSinkOrHoistInst(I, CurLoop->getLoopPreheader(), ORE, BFI) &&
923           isSafeToExecuteUnconditionally(
924               I, DT, CurLoop, SafetyInfo, ORE,
925               CurLoop->getLoopPreheader()->getTerminator())) {
926         hoist(I, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB), SafetyInfo,
927               MSSAU, SE, ORE);
928         HoistedInstructions.push_back(&I);
929         Changed = true;
930         continue;
931       }
932 
933       // Attempt to remove floating point division out of the loop by
934       // converting it to a reciprocal multiplication.
935       if (I.getOpcode() == Instruction::FDiv && I.hasAllowReciprocal() &&
936           CurLoop->isLoopInvariant(I.getOperand(1))) {
937         auto Divisor = I.getOperand(1);
938         auto One = llvm::ConstantFP::get(Divisor->getType(), 1.0);
939         auto ReciprocalDivisor = BinaryOperator::CreateFDiv(One, Divisor);
940         ReciprocalDivisor->setFastMathFlags(I.getFastMathFlags());
941         SafetyInfo->insertInstructionTo(ReciprocalDivisor, I.getParent());
942         ReciprocalDivisor->insertBefore(&I);
943 
944         auto Product =
945             BinaryOperator::CreateFMul(I.getOperand(0), ReciprocalDivisor);
946         Product->setFastMathFlags(I.getFastMathFlags());
947         SafetyInfo->insertInstructionTo(Product, I.getParent());
948         Product->insertAfter(&I);
949         I.replaceAllUsesWith(Product);
950         eraseInstruction(I, *SafetyInfo, CurAST, MSSAU);
951 
952         hoist(*ReciprocalDivisor, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB),
953               SafetyInfo, MSSAU, SE, ORE);
954         HoistedInstructions.push_back(ReciprocalDivisor);
955         Changed = true;
956         continue;
957       }
958 
959       auto IsInvariantStart = [&](Instruction &I) {
960         using namespace PatternMatch;
961         return I.use_empty() &&
962                match(&I, m_Intrinsic<Intrinsic::invariant_start>());
963       };
964       auto MustExecuteWithoutWritesBefore = [&](Instruction &I) {
965         return SafetyInfo->isGuaranteedToExecute(I, DT, CurLoop) &&
966                SafetyInfo->doesNotWriteMemoryBefore(I, CurLoop);
967       };
968       if ((IsInvariantStart(I) || isGuard(&I)) &&
969           CurLoop->hasLoopInvariantOperands(&I) &&
970           MustExecuteWithoutWritesBefore(I)) {
971         hoist(I, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB), SafetyInfo,
972               MSSAU, SE, ORE);
973         HoistedInstructions.push_back(&I);
974         Changed = true;
975         continue;
976       }
977 
978       if (PHINode *PN = dyn_cast<PHINode>(&I)) {
979         if (CFH.canHoistPHI(PN)) {
980           // Redirect incoming blocks first to ensure that we create hoisted
981           // versions of those blocks before we hoist the phi.
982           for (unsigned int i = 0; i < PN->getNumIncomingValues(); ++i)
983             PN->setIncomingBlock(
984                 i, CFH.getOrCreateHoistedBlock(PN->getIncomingBlock(i)));
985           hoist(*PN, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB), SafetyInfo,
986                 MSSAU, SE, ORE);
987           assert(DT->dominates(PN, BB) && "Conditional PHIs not expected");
988           Changed = true;
989           continue;
990         }
991       }
992 
993       // Remember possibly hoistable branches so we can actually hoist them
994       // later if needed.
995       if (BranchInst *BI = dyn_cast<BranchInst>(&I))
996         CFH.registerPossiblyHoistableBranch(BI);
997     }
998   }
999 
1000   // If we hoisted instructions to a conditional block they may not dominate
1001   // their uses that weren't hoisted (such as phis where some operands are not
1002   // loop invariant). If so make them unconditional by moving them to their
1003   // immediate dominator. We iterate through the instructions in reverse order
1004   // which ensures that when we rehoist an instruction we rehoist its operands,
1005   // and also keep track of where in the block we are rehoisting to to make sure
1006   // that we rehoist instructions before the instructions that use them.
1007   Instruction *HoistPoint = nullptr;
1008   if (ControlFlowHoisting) {
1009     for (Instruction *I : reverse(HoistedInstructions)) {
1010       if (!llvm::all_of(I->uses(),
1011                         [&](Use &U) { return DT->dominates(I, U); })) {
1012         BasicBlock *Dominator =
1013             DT->getNode(I->getParent())->getIDom()->getBlock();
1014         if (!HoistPoint || !DT->dominates(HoistPoint->getParent(), Dominator)) {
1015           if (HoistPoint)
1016             assert(DT->dominates(Dominator, HoistPoint->getParent()) &&
1017                    "New hoist point expected to dominate old hoist point");
1018           HoistPoint = Dominator->getTerminator();
1019         }
1020         LLVM_DEBUG(dbgs() << "LICM rehoisting to "
1021                           << HoistPoint->getParent()->getNameOrAsOperand()
1022                           << ": " << *I << "\n");
1023         moveInstructionBefore(*I, *HoistPoint, *SafetyInfo, MSSAU, SE);
1024         HoistPoint = I;
1025         Changed = true;
1026       }
1027     }
1028   }
1029   if (MSSAU && VerifyMemorySSA)
1030     MSSAU->getMemorySSA()->verifyMemorySSA();
1031 
1032     // Now that we've finished hoisting make sure that LI and DT are still
1033     // valid.
1034 #ifdef EXPENSIVE_CHECKS
1035   if (Changed) {
1036     assert(DT->verify(DominatorTree::VerificationLevel::Fast) &&
1037            "Dominator tree verification failed");
1038     LI->verify(*DT);
1039   }
1040 #endif
1041 
1042   return Changed;
1043 }
1044 
1045 // Return true if LI is invariant within scope of the loop. LI is invariant if
1046 // CurLoop is dominated by an invariant.start representing the same memory
1047 // location and size as the memory location LI loads from, and also the
1048 // invariant.start has no uses.
1049 static bool isLoadInvariantInLoop(LoadInst *LI, DominatorTree *DT,
1050                                   Loop *CurLoop) {
1051   Value *Addr = LI->getOperand(0);
1052   const DataLayout &DL = LI->getModule()->getDataLayout();
1053   const TypeSize LocSizeInBits = DL.getTypeSizeInBits(LI->getType());
1054 
1055   // It is not currently possible for clang to generate an invariant.start
1056   // intrinsic with scalable vector types because we don't support thread local
1057   // sizeless types and we don't permit sizeless types in structs or classes.
1058   // Furthermore, even if support is added for this in future the intrinsic
1059   // itself is defined to have a size of -1 for variable sized objects. This
1060   // makes it impossible to verify if the intrinsic envelops our region of
1061   // interest. For example, both <vscale x 32 x i8> and <vscale x 16 x i8>
1062   // types would have a -1 parameter, but the former is clearly double the size
1063   // of the latter.
1064   if (LocSizeInBits.isScalable())
1065     return false;
1066 
1067   // if the type is i8 addrspace(x)*, we know this is the type of
1068   // llvm.invariant.start operand
1069   auto *PtrInt8Ty = PointerType::get(Type::getInt8Ty(LI->getContext()),
1070                                      LI->getPointerAddressSpace());
1071   unsigned BitcastsVisited = 0;
1072   // Look through bitcasts until we reach the i8* type (this is invariant.start
1073   // operand type).
1074   while (Addr->getType() != PtrInt8Ty) {
1075     auto *BC = dyn_cast<BitCastInst>(Addr);
1076     // Avoid traversing high number of bitcast uses.
1077     if (++BitcastsVisited > MaxNumUsesTraversed || !BC)
1078       return false;
1079     Addr = BC->getOperand(0);
1080   }
1081 
1082   unsigned UsesVisited = 0;
1083   // Traverse all uses of the load operand value, to see if invariant.start is
1084   // one of the uses, and whether it dominates the load instruction.
1085   for (auto *U : Addr->users()) {
1086     // Avoid traversing for Load operand with high number of users.
1087     if (++UsesVisited > MaxNumUsesTraversed)
1088       return false;
1089     IntrinsicInst *II = dyn_cast<IntrinsicInst>(U);
1090     // If there are escaping uses of invariant.start instruction, the load maybe
1091     // non-invariant.
1092     if (!II || II->getIntrinsicID() != Intrinsic::invariant_start ||
1093         !II->use_empty())
1094       continue;
1095     ConstantInt *InvariantSize = cast<ConstantInt>(II->getArgOperand(0));
1096     // The intrinsic supports having a -1 argument for variable sized objects
1097     // so we should check for that here.
1098     if (InvariantSize->isNegative())
1099       continue;
1100     uint64_t InvariantSizeInBits = InvariantSize->getSExtValue() * 8;
1101     // Confirm the invariant.start location size contains the load operand size
1102     // in bits. Also, the invariant.start should dominate the load, and we
1103     // should not hoist the load out of a loop that contains this dominating
1104     // invariant.start.
1105     if (LocSizeInBits.getFixedSize() <= InvariantSizeInBits &&
1106         DT->properlyDominates(II->getParent(), CurLoop->getHeader()))
1107       return true;
1108   }
1109 
1110   return false;
1111 }
1112 
1113 namespace {
1114 /// Return true if-and-only-if we know how to (mechanically) both hoist and
1115 /// sink a given instruction out of a loop.  Does not address legality
1116 /// concerns such as aliasing or speculation safety.
1117 bool isHoistableAndSinkableInst(Instruction &I) {
1118   // Only these instructions are hoistable/sinkable.
1119   return (isa<LoadInst>(I) || isa<StoreInst>(I) || isa<CallInst>(I) ||
1120           isa<FenceInst>(I) || isa<CastInst>(I) || isa<UnaryOperator>(I) ||
1121           isa<BinaryOperator>(I) || isa<SelectInst>(I) ||
1122           isa<GetElementPtrInst>(I) || isa<CmpInst>(I) ||
1123           isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) ||
1124           isa<ShuffleVectorInst>(I) || isa<ExtractValueInst>(I) ||
1125           isa<InsertValueInst>(I) || isa<FreezeInst>(I));
1126 }
1127 /// Return true if all of the alias sets within this AST are known not to
1128 /// contain a Mod, or if MSSA knows thare are no MemoryDefs in the loop.
1129 bool isReadOnly(AliasSetTracker *CurAST, const MemorySSAUpdater *MSSAU,
1130                 const Loop *L) {
1131   if (CurAST) {
1132     for (AliasSet &AS : *CurAST) {
1133       if (!AS.isForwardingAliasSet() && AS.isMod()) {
1134         return false;
1135       }
1136     }
1137     return true;
1138   } else { /*MSSAU*/
1139     for (auto *BB : L->getBlocks())
1140       if (MSSAU->getMemorySSA()->getBlockDefs(BB))
1141         return false;
1142     return true;
1143   }
1144 }
1145 
1146 /// Return true if I is the only Instruction with a MemoryAccess in L.
1147 bool isOnlyMemoryAccess(const Instruction *I, const Loop *L,
1148                         const MemorySSAUpdater *MSSAU) {
1149   for (auto *BB : L->getBlocks())
1150     if (auto *Accs = MSSAU->getMemorySSA()->getBlockAccesses(BB)) {
1151       int NotAPhi = 0;
1152       for (const auto &Acc : *Accs) {
1153         if (isa<MemoryPhi>(&Acc))
1154           continue;
1155         const auto *MUD = cast<MemoryUseOrDef>(&Acc);
1156         if (MUD->getMemoryInst() != I || NotAPhi++ == 1)
1157           return false;
1158       }
1159     }
1160   return true;
1161 }
1162 }
1163 
1164 bool llvm::canSinkOrHoistInst(Instruction &I, AAResults *AA, DominatorTree *DT,
1165                               Loop *CurLoop, AliasSetTracker *CurAST,
1166                               MemorySSAUpdater *MSSAU,
1167                               bool TargetExecutesOncePerLoop,
1168                               SinkAndHoistLICMFlags *Flags,
1169                               OptimizationRemarkEmitter *ORE) {
1170   assert(((CurAST != nullptr) ^ (MSSAU != nullptr)) &&
1171          "Either AliasSetTracker or MemorySSA should be initialized.");
1172 
1173   // If we don't understand the instruction, bail early.
1174   if (!isHoistableAndSinkableInst(I))
1175     return false;
1176 
1177   MemorySSA *MSSA = MSSAU ? MSSAU->getMemorySSA() : nullptr;
1178   if (MSSA)
1179     assert(Flags != nullptr && "Flags cannot be null.");
1180 
1181   // Loads have extra constraints we have to verify before we can hoist them.
1182   if (LoadInst *LI = dyn_cast<LoadInst>(&I)) {
1183     if (!LI->isUnordered())
1184       return false; // Don't sink/hoist volatile or ordered atomic loads!
1185 
1186     // Loads from constant memory are always safe to move, even if they end up
1187     // in the same alias set as something that ends up being modified.
1188     if (AA->pointsToConstantMemory(LI->getOperand(0)))
1189       return true;
1190     if (LI->hasMetadata(LLVMContext::MD_invariant_load))
1191       return true;
1192 
1193     if (LI->isAtomic() && !TargetExecutesOncePerLoop)
1194       return false; // Don't risk duplicating unordered loads
1195 
1196     // This checks for an invariant.start dominating the load.
1197     if (isLoadInvariantInLoop(LI, DT, CurLoop))
1198       return true;
1199 
1200     bool Invalidated;
1201     if (CurAST)
1202       Invalidated = pointerInvalidatedByLoop(MemoryLocation::get(LI), CurAST,
1203                                              CurLoop, AA);
1204     else
1205       Invalidated = pointerInvalidatedByLoopWithMSSA(
1206           MSSA, cast<MemoryUse>(MSSA->getMemoryAccess(LI)), CurLoop, I, *Flags);
1207     // Check loop-invariant address because this may also be a sinkable load
1208     // whose address is not necessarily loop-invariant.
1209     if (ORE && Invalidated && CurLoop->isLoopInvariant(LI->getPointerOperand()))
1210       ORE->emit([&]() {
1211         return OptimizationRemarkMissed(
1212                    DEBUG_TYPE, "LoadWithLoopInvariantAddressInvalidated", LI)
1213                << "failed to move load with loop-invariant address "
1214                   "because the loop may invalidate its value";
1215       });
1216 
1217     return !Invalidated;
1218   } else if (CallInst *CI = dyn_cast<CallInst>(&I)) {
1219     // Don't sink or hoist dbg info; it's legal, but not useful.
1220     if (isa<DbgInfoIntrinsic>(I))
1221       return false;
1222 
1223     // Don't sink calls which can throw.
1224     if (CI->mayThrow())
1225       return false;
1226 
1227     // Convergent attribute has been used on operations that involve
1228     // inter-thread communication which results are implicitly affected by the
1229     // enclosing control flows. It is not safe to hoist or sink such operations
1230     // across control flow.
1231     if (CI->isConvergent())
1232       return false;
1233 
1234     using namespace PatternMatch;
1235     if (match(CI, m_Intrinsic<Intrinsic::assume>()))
1236       // Assumes don't actually alias anything or throw
1237       return true;
1238 
1239     if (match(CI, m_Intrinsic<Intrinsic::experimental_widenable_condition>()))
1240       // Widenable conditions don't actually alias anything or throw
1241       return true;
1242 
1243     // Handle simple cases by querying alias analysis.
1244     FunctionModRefBehavior Behavior = AA->getModRefBehavior(CI);
1245     if (Behavior == FMRB_DoesNotAccessMemory)
1246       return true;
1247     if (AAResults::onlyReadsMemory(Behavior)) {
1248       // A readonly argmemonly function only reads from memory pointed to by
1249       // it's arguments with arbitrary offsets.  If we can prove there are no
1250       // writes to this memory in the loop, we can hoist or sink.
1251       if (AAResults::onlyAccessesArgPointees(Behavior)) {
1252         // TODO: expand to writeable arguments
1253         for (Value *Op : CI->arg_operands())
1254           if (Op->getType()->isPointerTy()) {
1255             bool Invalidated;
1256             if (CurAST)
1257               Invalidated = pointerInvalidatedByLoop(
1258                   MemoryLocation::getBeforeOrAfter(Op), CurAST, CurLoop, AA);
1259             else
1260               Invalidated = pointerInvalidatedByLoopWithMSSA(
1261                   MSSA, cast<MemoryUse>(MSSA->getMemoryAccess(CI)), CurLoop, I,
1262                   *Flags);
1263             if (Invalidated)
1264               return false;
1265           }
1266         return true;
1267       }
1268 
1269       // If this call only reads from memory and there are no writes to memory
1270       // in the loop, we can hoist or sink the call as appropriate.
1271       if (isReadOnly(CurAST, MSSAU, CurLoop))
1272         return true;
1273     }
1274 
1275     // FIXME: This should use mod/ref information to see if we can hoist or
1276     // sink the call.
1277 
1278     return false;
1279   } else if (auto *FI = dyn_cast<FenceInst>(&I)) {
1280     // Fences alias (most) everything to provide ordering.  For the moment,
1281     // just give up if there are any other memory operations in the loop.
1282     if (CurAST) {
1283       auto Begin = CurAST->begin();
1284       assert(Begin != CurAST->end() && "must contain FI");
1285       if (std::next(Begin) != CurAST->end())
1286         // constant memory for instance, TODO: handle better
1287         return false;
1288       auto *UniqueI = Begin->getUniqueInstruction();
1289       if (!UniqueI)
1290         // other memory op, give up
1291         return false;
1292       (void)FI; // suppress unused variable warning
1293       assert(UniqueI == FI && "AS must contain FI");
1294       return true;
1295     } else // MSSAU
1296       return isOnlyMemoryAccess(FI, CurLoop, MSSAU);
1297   } else if (auto *SI = dyn_cast<StoreInst>(&I)) {
1298     if (!SI->isUnordered())
1299       return false; // Don't sink/hoist volatile or ordered atomic store!
1300 
1301     // We can only hoist a store that we can prove writes a value which is not
1302     // read or overwritten within the loop.  For those cases, we fallback to
1303     // load store promotion instead.  TODO: We can extend this to cases where
1304     // there is exactly one write to the location and that write dominates an
1305     // arbitrary number of reads in the loop.
1306     if (CurAST) {
1307       auto &AS = CurAST->getAliasSetFor(MemoryLocation::get(SI));
1308 
1309       if (AS.isRef() || !AS.isMustAlias())
1310         // Quick exit test, handled by the full path below as well.
1311         return false;
1312       auto *UniqueI = AS.getUniqueInstruction();
1313       if (!UniqueI)
1314         // other memory op, give up
1315         return false;
1316       assert(UniqueI == SI && "AS must contain SI");
1317       return true;
1318     } else { // MSSAU
1319       if (isOnlyMemoryAccess(SI, CurLoop, MSSAU))
1320         return true;
1321       // If there are more accesses than the Promotion cap or no "quota" to
1322       // check clobber, then give up as we're not walking a list that long.
1323       if (Flags->tooManyMemoryAccesses() || Flags->tooManyClobberingCalls())
1324         return false;
1325       // If there are interfering Uses (i.e. their defining access is in the
1326       // loop), or ordered loads (stored as Defs!), don't move this store.
1327       // Could do better here, but this is conservatively correct.
1328       // TODO: Cache set of Uses on the first walk in runOnLoop, update when
1329       // moving accesses. Can also extend to dominating uses.
1330       auto *SIMD = MSSA->getMemoryAccess(SI);
1331       for (auto *BB : CurLoop->getBlocks())
1332         if (auto *Accesses = MSSA->getBlockAccesses(BB)) {
1333           for (const auto &MA : *Accesses)
1334             if (const auto *MU = dyn_cast<MemoryUse>(&MA)) {
1335               auto *MD = MU->getDefiningAccess();
1336               if (!MSSA->isLiveOnEntryDef(MD) &&
1337                   CurLoop->contains(MD->getBlock()))
1338                 return false;
1339               // Disable hoisting past potentially interfering loads. Optimized
1340               // Uses may point to an access outside the loop, as getClobbering
1341               // checks the previous iteration when walking the backedge.
1342               // FIXME: More precise: no Uses that alias SI.
1343               if (!Flags->getIsSink() && !MSSA->dominates(SIMD, MU))
1344                 return false;
1345             } else if (const auto *MD = dyn_cast<MemoryDef>(&MA)) {
1346               if (auto *LI = dyn_cast<LoadInst>(MD->getMemoryInst())) {
1347                 (void)LI; // Silence warning.
1348                 assert(!LI->isUnordered() && "Expected unordered load");
1349                 return false;
1350               }
1351               // Any call, while it may not be clobbering SI, it may be a use.
1352               if (auto *CI = dyn_cast<CallInst>(MD->getMemoryInst())) {
1353                 // Check if the call may read from the memory locattion written
1354                 // to by SI. Check CI's attributes and arguments; the number of
1355                 // such checks performed is limited above by NoOfMemAccTooLarge.
1356                 ModRefInfo MRI = AA->getModRefInfo(CI, MemoryLocation::get(SI));
1357                 if (isModOrRefSet(MRI))
1358                   return false;
1359               }
1360             }
1361         }
1362       auto *Source = MSSA->getSkipSelfWalker()->getClobberingMemoryAccess(SI);
1363       Flags->incrementClobberingCalls();
1364       // If there are no clobbering Defs in the loop, store is safe to hoist.
1365       return MSSA->isLiveOnEntryDef(Source) ||
1366              !CurLoop->contains(Source->getBlock());
1367     }
1368   }
1369 
1370   assert(!I.mayReadOrWriteMemory() && "unhandled aliasing");
1371 
1372   // We've established mechanical ability and aliasing, it's up to the caller
1373   // to check fault safety
1374   return true;
1375 }
1376 
1377 /// Returns true if a PHINode is a trivially replaceable with an
1378 /// Instruction.
1379 /// This is true when all incoming values are that instruction.
1380 /// This pattern occurs most often with LCSSA PHI nodes.
1381 ///
1382 static bool isTriviallyReplaceablePHI(const PHINode &PN, const Instruction &I) {
1383   for (const Value *IncValue : PN.incoming_values())
1384     if (IncValue != &I)
1385       return false;
1386 
1387   return true;
1388 }
1389 
1390 /// Return true if the instruction is free in the loop.
1391 static bool isFreeInLoop(const Instruction &I, const Loop *CurLoop,
1392                          const TargetTransformInfo *TTI) {
1393 
1394   if (const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(&I)) {
1395     if (TTI->getUserCost(GEP, TargetTransformInfo::TCK_SizeAndLatency) !=
1396         TargetTransformInfo::TCC_Free)
1397       return false;
1398     // For a GEP, we cannot simply use getUserCost because currently it
1399     // optimistically assume that a GEP will fold into addressing mode
1400     // regardless of its users.
1401     const BasicBlock *BB = GEP->getParent();
1402     for (const User *U : GEP->users()) {
1403       const Instruction *UI = cast<Instruction>(U);
1404       if (CurLoop->contains(UI) &&
1405           (BB != UI->getParent() ||
1406            (!isa<StoreInst>(UI) && !isa<LoadInst>(UI))))
1407         return false;
1408     }
1409     return true;
1410   } else
1411     return TTI->getUserCost(&I, TargetTransformInfo::TCK_SizeAndLatency) ==
1412            TargetTransformInfo::TCC_Free;
1413 }
1414 
1415 /// Return true if the only users of this instruction are outside of
1416 /// the loop. If this is true, we can sink the instruction to the exit
1417 /// blocks of the loop.
1418 ///
1419 /// We also return true if the instruction could be folded away in lowering.
1420 /// (e.g.,  a GEP can be folded into a load as an addressing mode in the loop).
1421 static bool isNotUsedOrFreeInLoop(const Instruction &I, const Loop *CurLoop,
1422                                   const LoopSafetyInfo *SafetyInfo,
1423                                   TargetTransformInfo *TTI, bool &FreeInLoop) {
1424   const auto &BlockColors = SafetyInfo->getBlockColors();
1425   bool IsFree = isFreeInLoop(I, CurLoop, TTI);
1426   for (const User *U : I.users()) {
1427     const Instruction *UI = cast<Instruction>(U);
1428     if (const PHINode *PN = dyn_cast<PHINode>(UI)) {
1429       const BasicBlock *BB = PN->getParent();
1430       // We cannot sink uses in catchswitches.
1431       if (isa<CatchSwitchInst>(BB->getTerminator()))
1432         return false;
1433 
1434       // We need to sink a callsite to a unique funclet.  Avoid sinking if the
1435       // phi use is too muddled.
1436       if (isa<CallInst>(I))
1437         if (!BlockColors.empty() &&
1438             BlockColors.find(const_cast<BasicBlock *>(BB))->second.size() != 1)
1439           return false;
1440     }
1441 
1442     if (CurLoop->contains(UI)) {
1443       if (IsFree) {
1444         FreeInLoop = true;
1445         continue;
1446       }
1447       return false;
1448     }
1449   }
1450   return true;
1451 }
1452 
1453 static Instruction *cloneInstructionInExitBlock(
1454     Instruction &I, BasicBlock &ExitBlock, PHINode &PN, const LoopInfo *LI,
1455     const LoopSafetyInfo *SafetyInfo, MemorySSAUpdater *MSSAU) {
1456   Instruction *New;
1457   if (auto *CI = dyn_cast<CallInst>(&I)) {
1458     const auto &BlockColors = SafetyInfo->getBlockColors();
1459 
1460     // Sinking call-sites need to be handled differently from other
1461     // instructions.  The cloned call-site needs a funclet bundle operand
1462     // appropriate for its location in the CFG.
1463     SmallVector<OperandBundleDef, 1> OpBundles;
1464     for (unsigned BundleIdx = 0, BundleEnd = CI->getNumOperandBundles();
1465          BundleIdx != BundleEnd; ++BundleIdx) {
1466       OperandBundleUse Bundle = CI->getOperandBundleAt(BundleIdx);
1467       if (Bundle.getTagID() == LLVMContext::OB_funclet)
1468         continue;
1469 
1470       OpBundles.emplace_back(Bundle);
1471     }
1472 
1473     if (!BlockColors.empty()) {
1474       const ColorVector &CV = BlockColors.find(&ExitBlock)->second;
1475       assert(CV.size() == 1 && "non-unique color for exit block!");
1476       BasicBlock *BBColor = CV.front();
1477       Instruction *EHPad = BBColor->getFirstNonPHI();
1478       if (EHPad->isEHPad())
1479         OpBundles.emplace_back("funclet", EHPad);
1480     }
1481 
1482     New = CallInst::Create(CI, OpBundles);
1483   } else {
1484     New = I.clone();
1485   }
1486 
1487   ExitBlock.getInstList().insert(ExitBlock.getFirstInsertionPt(), New);
1488   if (!I.getName().empty())
1489     New->setName(I.getName() + ".le");
1490 
1491   if (MSSAU && MSSAU->getMemorySSA()->getMemoryAccess(&I)) {
1492     // Create a new MemoryAccess and let MemorySSA set its defining access.
1493     MemoryAccess *NewMemAcc = MSSAU->createMemoryAccessInBB(
1494         New, nullptr, New->getParent(), MemorySSA::Beginning);
1495     if (NewMemAcc) {
1496       if (auto *MemDef = dyn_cast<MemoryDef>(NewMemAcc))
1497         MSSAU->insertDef(MemDef, /*RenameUses=*/true);
1498       else {
1499         auto *MemUse = cast<MemoryUse>(NewMemAcc);
1500         MSSAU->insertUse(MemUse, /*RenameUses=*/true);
1501       }
1502     }
1503   }
1504 
1505   // Build LCSSA PHI nodes for any in-loop operands. Note that this is
1506   // particularly cheap because we can rip off the PHI node that we're
1507   // replacing for the number and blocks of the predecessors.
1508   // OPT: If this shows up in a profile, we can instead finish sinking all
1509   // invariant instructions, and then walk their operands to re-establish
1510   // LCSSA. That will eliminate creating PHI nodes just to nuke them when
1511   // sinking bottom-up.
1512   for (Use &Op : New->operands())
1513     if (Instruction *OInst = dyn_cast<Instruction>(Op))
1514       if (Loop *OLoop = LI->getLoopFor(OInst->getParent()))
1515         if (!OLoop->contains(&PN)) {
1516           PHINode *OpPN =
1517               PHINode::Create(OInst->getType(), PN.getNumIncomingValues(),
1518                               OInst->getName() + ".lcssa", &ExitBlock.front());
1519           for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i)
1520             OpPN->addIncoming(OInst, PN.getIncomingBlock(i));
1521           Op = OpPN;
1522         }
1523   return New;
1524 }
1525 
1526 static void eraseInstruction(Instruction &I, ICFLoopSafetyInfo &SafetyInfo,
1527                              AliasSetTracker *AST, MemorySSAUpdater *MSSAU) {
1528   if (AST)
1529     AST->deleteValue(&I);
1530   if (MSSAU)
1531     MSSAU->removeMemoryAccess(&I);
1532   SafetyInfo.removeInstruction(&I);
1533   I.eraseFromParent();
1534 }
1535 
1536 static void moveInstructionBefore(Instruction &I, Instruction &Dest,
1537                                   ICFLoopSafetyInfo &SafetyInfo,
1538                                   MemorySSAUpdater *MSSAU,
1539                                   ScalarEvolution *SE) {
1540   SafetyInfo.removeInstruction(&I);
1541   SafetyInfo.insertInstructionTo(&I, Dest.getParent());
1542   I.moveBefore(&Dest);
1543   if (MSSAU)
1544     if (MemoryUseOrDef *OldMemAcc = cast_or_null<MemoryUseOrDef>(
1545             MSSAU->getMemorySSA()->getMemoryAccess(&I)))
1546       MSSAU->moveToPlace(OldMemAcc, Dest.getParent(),
1547                          MemorySSA::BeforeTerminator);
1548   if (SE)
1549     SE->forgetValue(&I);
1550 }
1551 
1552 static Instruction *sinkThroughTriviallyReplaceablePHI(
1553     PHINode *TPN, Instruction *I, LoopInfo *LI,
1554     SmallDenseMap<BasicBlock *, Instruction *, 32> &SunkCopies,
1555     const LoopSafetyInfo *SafetyInfo, const Loop *CurLoop,
1556     MemorySSAUpdater *MSSAU) {
1557   assert(isTriviallyReplaceablePHI(*TPN, *I) &&
1558          "Expect only trivially replaceable PHI");
1559   BasicBlock *ExitBlock = TPN->getParent();
1560   Instruction *New;
1561   auto It = SunkCopies.find(ExitBlock);
1562   if (It != SunkCopies.end())
1563     New = It->second;
1564   else
1565     New = SunkCopies[ExitBlock] = cloneInstructionInExitBlock(
1566         *I, *ExitBlock, *TPN, LI, SafetyInfo, MSSAU);
1567   return New;
1568 }
1569 
1570 static bool canSplitPredecessors(PHINode *PN, LoopSafetyInfo *SafetyInfo) {
1571   BasicBlock *BB = PN->getParent();
1572   if (!BB->canSplitPredecessors())
1573     return false;
1574   // It's not impossible to split EHPad blocks, but if BlockColors already exist
1575   // it require updating BlockColors for all offspring blocks accordingly. By
1576   // skipping such corner case, we can make updating BlockColors after splitting
1577   // predecessor fairly simple.
1578   if (!SafetyInfo->getBlockColors().empty() && BB->getFirstNonPHI()->isEHPad())
1579     return false;
1580   for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) {
1581     BasicBlock *BBPred = *PI;
1582     if (isa<IndirectBrInst>(BBPred->getTerminator()) ||
1583         isa<CallBrInst>(BBPred->getTerminator()))
1584       return false;
1585   }
1586   return true;
1587 }
1588 
1589 static void splitPredecessorsOfLoopExit(PHINode *PN, DominatorTree *DT,
1590                                         LoopInfo *LI, const Loop *CurLoop,
1591                                         LoopSafetyInfo *SafetyInfo,
1592                                         MemorySSAUpdater *MSSAU) {
1593 #ifndef NDEBUG
1594   SmallVector<BasicBlock *, 32> ExitBlocks;
1595   CurLoop->getUniqueExitBlocks(ExitBlocks);
1596   SmallPtrSet<BasicBlock *, 32> ExitBlockSet(ExitBlocks.begin(),
1597                                              ExitBlocks.end());
1598 #endif
1599   BasicBlock *ExitBB = PN->getParent();
1600   assert(ExitBlockSet.count(ExitBB) && "Expect the PHI is in an exit block.");
1601 
1602   // Split predecessors of the loop exit to make instructions in the loop are
1603   // exposed to exit blocks through trivially replaceable PHIs while keeping the
1604   // loop in the canonical form where each predecessor of each exit block should
1605   // be contained within the loop. For example, this will convert the loop below
1606   // from
1607   //
1608   // LB1:
1609   //   %v1 =
1610   //   br %LE, %LB2
1611   // LB2:
1612   //   %v2 =
1613   //   br %LE, %LB1
1614   // LE:
1615   //   %p = phi [%v1, %LB1], [%v2, %LB2] <-- non-trivially replaceable
1616   //
1617   // to
1618   //
1619   // LB1:
1620   //   %v1 =
1621   //   br %LE.split, %LB2
1622   // LB2:
1623   //   %v2 =
1624   //   br %LE.split2, %LB1
1625   // LE.split:
1626   //   %p1 = phi [%v1, %LB1]  <-- trivially replaceable
1627   //   br %LE
1628   // LE.split2:
1629   //   %p2 = phi [%v2, %LB2]  <-- trivially replaceable
1630   //   br %LE
1631   // LE:
1632   //   %p = phi [%p1, %LE.split], [%p2, %LE.split2]
1633   //
1634   const auto &BlockColors = SafetyInfo->getBlockColors();
1635   SmallSetVector<BasicBlock *, 8> PredBBs(pred_begin(ExitBB), pred_end(ExitBB));
1636   while (!PredBBs.empty()) {
1637     BasicBlock *PredBB = *PredBBs.begin();
1638     assert(CurLoop->contains(PredBB) &&
1639            "Expect all predecessors are in the loop");
1640     if (PN->getBasicBlockIndex(PredBB) >= 0) {
1641       BasicBlock *NewPred = SplitBlockPredecessors(
1642           ExitBB, PredBB, ".split.loop.exit", DT, LI, MSSAU, true);
1643       // Since we do not allow splitting EH-block with BlockColors in
1644       // canSplitPredecessors(), we can simply assign predecessor's color to
1645       // the new block.
1646       if (!BlockColors.empty())
1647         // Grab a reference to the ColorVector to be inserted before getting the
1648         // reference to the vector we are copying because inserting the new
1649         // element in BlockColors might cause the map to be reallocated.
1650         SafetyInfo->copyColors(NewPred, PredBB);
1651     }
1652     PredBBs.remove(PredBB);
1653   }
1654 }
1655 
1656 /// When an instruction is found to only be used outside of the loop, this
1657 /// function moves it to the exit blocks and patches up SSA form as needed.
1658 /// This method is guaranteed to remove the original instruction from its
1659 /// position, and may either delete it or move it to outside of the loop.
1660 ///
1661 static bool sink(Instruction &I, LoopInfo *LI, DominatorTree *DT,
1662                  BlockFrequencyInfo *BFI, const Loop *CurLoop,
1663                  ICFLoopSafetyInfo *SafetyInfo, MemorySSAUpdater *MSSAU,
1664                  OptimizationRemarkEmitter *ORE) {
1665   LLVM_DEBUG(dbgs() << "LICM sinking instruction: " << I << "\n");
1666   ORE->emit([&]() {
1667     return OptimizationRemark(DEBUG_TYPE, "InstSunk", &I)
1668            << "sinking " << ore::NV("Inst", &I);
1669   });
1670   bool Changed = false;
1671   if (isa<LoadInst>(I))
1672     ++NumMovedLoads;
1673   else if (isa<CallInst>(I))
1674     ++NumMovedCalls;
1675   ++NumSunk;
1676 
1677   // Iterate over users to be ready for actual sinking. Replace users via
1678   // unreachable blocks with undef and make all user PHIs trivially replaceable.
1679   SmallPtrSet<Instruction *, 8> VisitedUsers;
1680   for (Value::user_iterator UI = I.user_begin(), UE = I.user_end(); UI != UE;) {
1681     auto *User = cast<Instruction>(*UI);
1682     Use &U = UI.getUse();
1683     ++UI;
1684 
1685     if (VisitedUsers.count(User) || CurLoop->contains(User))
1686       continue;
1687 
1688     if (!DT->isReachableFromEntry(User->getParent())) {
1689       U = UndefValue::get(I.getType());
1690       Changed = true;
1691       continue;
1692     }
1693 
1694     // The user must be a PHI node.
1695     PHINode *PN = cast<PHINode>(User);
1696 
1697     // Surprisingly, instructions can be used outside of loops without any
1698     // exits.  This can only happen in PHI nodes if the incoming block is
1699     // unreachable.
1700     BasicBlock *BB = PN->getIncomingBlock(U);
1701     if (!DT->isReachableFromEntry(BB)) {
1702       U = UndefValue::get(I.getType());
1703       Changed = true;
1704       continue;
1705     }
1706 
1707     VisitedUsers.insert(PN);
1708     if (isTriviallyReplaceablePHI(*PN, I))
1709       continue;
1710 
1711     if (!canSplitPredecessors(PN, SafetyInfo))
1712       return Changed;
1713 
1714     // Split predecessors of the PHI so that we can make users trivially
1715     // replaceable.
1716     splitPredecessorsOfLoopExit(PN, DT, LI, CurLoop, SafetyInfo, MSSAU);
1717 
1718     // Should rebuild the iterators, as they may be invalidated by
1719     // splitPredecessorsOfLoopExit().
1720     UI = I.user_begin();
1721     UE = I.user_end();
1722   }
1723 
1724   if (VisitedUsers.empty())
1725     return Changed;
1726 
1727 #ifndef NDEBUG
1728   SmallVector<BasicBlock *, 32> ExitBlocks;
1729   CurLoop->getUniqueExitBlocks(ExitBlocks);
1730   SmallPtrSet<BasicBlock *, 32> ExitBlockSet(ExitBlocks.begin(),
1731                                              ExitBlocks.end());
1732 #endif
1733 
1734   // Clones of this instruction. Don't create more than one per exit block!
1735   SmallDenseMap<BasicBlock *, Instruction *, 32> SunkCopies;
1736 
1737   // If this instruction is only used outside of the loop, then all users are
1738   // PHI nodes in exit blocks due to LCSSA form. Just RAUW them with clones of
1739   // the instruction.
1740   // First check if I is worth sinking for all uses. Sink only when it is worth
1741   // across all uses.
1742   SmallSetVector<User*, 8> Users(I.user_begin(), I.user_end());
1743   SmallVector<PHINode *, 8> ExitPNs;
1744   for (auto *UI : Users) {
1745     auto *User = cast<Instruction>(UI);
1746 
1747     if (CurLoop->contains(User))
1748       continue;
1749 
1750     PHINode *PN = cast<PHINode>(User);
1751     assert(ExitBlockSet.count(PN->getParent()) &&
1752            "The LCSSA PHI is not in an exit block!");
1753     if (!worthSinkOrHoistInst(I, PN->getParent(), ORE, BFI)) {
1754       return Changed;
1755     }
1756 
1757     ExitPNs.push_back(PN);
1758   }
1759 
1760   for (auto *PN : ExitPNs) {
1761 
1762     // The PHI must be trivially replaceable.
1763     Instruction *New = sinkThroughTriviallyReplaceablePHI(
1764         PN, &I, LI, SunkCopies, SafetyInfo, CurLoop, MSSAU);
1765     PN->replaceAllUsesWith(New);
1766     eraseInstruction(*PN, *SafetyInfo, nullptr, nullptr);
1767     Changed = true;
1768   }
1769   return Changed;
1770 }
1771 
1772 /// When an instruction is found to only use loop invariant operands that
1773 /// is safe to hoist, this instruction is called to do the dirty work.
1774 ///
1775 static void hoist(Instruction &I, const DominatorTree *DT, const Loop *CurLoop,
1776                   BasicBlock *Dest, ICFLoopSafetyInfo *SafetyInfo,
1777                   MemorySSAUpdater *MSSAU, ScalarEvolution *SE,
1778                   OptimizationRemarkEmitter *ORE) {
1779   LLVM_DEBUG(dbgs() << "LICM hoisting to " << Dest->getNameOrAsOperand() << ": "
1780                     << I << "\n");
1781   ORE->emit([&]() {
1782     return OptimizationRemark(DEBUG_TYPE, "Hoisted", &I) << "hoisting "
1783                                                          << ore::NV("Inst", &I);
1784   });
1785 
1786   // Metadata can be dependent on conditions we are hoisting above.
1787   // Conservatively strip all metadata on the instruction unless we were
1788   // guaranteed to execute I if we entered the loop, in which case the metadata
1789   // is valid in the loop preheader.
1790   if (I.hasMetadataOtherThanDebugLoc() &&
1791       // The check on hasMetadataOtherThanDebugLoc is to prevent us from burning
1792       // time in isGuaranteedToExecute if we don't actually have anything to
1793       // drop.  It is a compile time optimization, not required for correctness.
1794       !SafetyInfo->isGuaranteedToExecute(I, DT, CurLoop))
1795     I.dropUnknownNonDebugMetadata();
1796 
1797   if (isa<PHINode>(I))
1798     // Move the new node to the end of the phi list in the destination block.
1799     moveInstructionBefore(I, *Dest->getFirstNonPHI(), *SafetyInfo, MSSAU, SE);
1800   else
1801     // Move the new node to the destination block, before its terminator.
1802     moveInstructionBefore(I, *Dest->getTerminator(), *SafetyInfo, MSSAU, SE);
1803 
1804   I.updateLocationAfterHoist();
1805 
1806   if (isa<LoadInst>(I))
1807     ++NumMovedLoads;
1808   else if (isa<CallInst>(I))
1809     ++NumMovedCalls;
1810   ++NumHoisted;
1811 }
1812 
1813 /// Only sink or hoist an instruction if it is not a trapping instruction,
1814 /// or if the instruction is known not to trap when moved to the preheader.
1815 /// or if it is a trapping instruction and is guaranteed to execute.
1816 static bool isSafeToExecuteUnconditionally(Instruction &Inst,
1817                                            const DominatorTree *DT,
1818                                            const Loop *CurLoop,
1819                                            const LoopSafetyInfo *SafetyInfo,
1820                                            OptimizationRemarkEmitter *ORE,
1821                                            const Instruction *CtxI) {
1822   if (isSafeToSpeculativelyExecute(&Inst, CtxI, DT))
1823     return true;
1824 
1825   bool GuaranteedToExecute =
1826       SafetyInfo->isGuaranteedToExecute(Inst, DT, CurLoop);
1827 
1828   if (!GuaranteedToExecute) {
1829     auto *LI = dyn_cast<LoadInst>(&Inst);
1830     if (LI && CurLoop->isLoopInvariant(LI->getPointerOperand()))
1831       ORE->emit([&]() {
1832         return OptimizationRemarkMissed(
1833                    DEBUG_TYPE, "LoadWithLoopInvariantAddressCondExecuted", LI)
1834                << "failed to hoist load with loop-invariant address "
1835                   "because load is conditionally executed";
1836       });
1837   }
1838 
1839   return GuaranteedToExecute;
1840 }
1841 
1842 namespace {
1843 class LoopPromoter : public LoadAndStorePromoter {
1844   Value *SomePtr; // Designated pointer to store to.
1845   const SmallSetVector<Value *, 8> &PointerMustAliases;
1846   SmallVectorImpl<BasicBlock *> &LoopExitBlocks;
1847   SmallVectorImpl<Instruction *> &LoopInsertPts;
1848   SmallVectorImpl<MemoryAccess *> &MSSAInsertPts;
1849   PredIteratorCache &PredCache;
1850   AliasSetTracker *AST;
1851   MemorySSAUpdater *MSSAU;
1852   LoopInfo &LI;
1853   DebugLoc DL;
1854   int Alignment;
1855   bool UnorderedAtomic;
1856   AAMDNodes AATags;
1857   ICFLoopSafetyInfo &SafetyInfo;
1858 
1859   Value *maybeInsertLCSSAPHI(Value *V, BasicBlock *BB) const {
1860     if (Instruction *I = dyn_cast<Instruction>(V))
1861       if (Loop *L = LI.getLoopFor(I->getParent()))
1862         if (!L->contains(BB)) {
1863           // We need to create an LCSSA PHI node for the incoming value and
1864           // store that.
1865           PHINode *PN = PHINode::Create(I->getType(), PredCache.size(BB),
1866                                         I->getName() + ".lcssa", &BB->front());
1867           for (BasicBlock *Pred : PredCache.get(BB))
1868             PN->addIncoming(I, Pred);
1869           return PN;
1870         }
1871     return V;
1872   }
1873 
1874 public:
1875   LoopPromoter(Value *SP, ArrayRef<const Instruction *> Insts, SSAUpdater &S,
1876                const SmallSetVector<Value *, 8> &PMA,
1877                SmallVectorImpl<BasicBlock *> &LEB,
1878                SmallVectorImpl<Instruction *> &LIP,
1879                SmallVectorImpl<MemoryAccess *> &MSSAIP, PredIteratorCache &PIC,
1880                AliasSetTracker *ast, MemorySSAUpdater *MSSAU, LoopInfo &li,
1881                DebugLoc dl, int alignment, bool UnorderedAtomic,
1882                const AAMDNodes &AATags, ICFLoopSafetyInfo &SafetyInfo)
1883       : LoadAndStorePromoter(Insts, S), SomePtr(SP), PointerMustAliases(PMA),
1884         LoopExitBlocks(LEB), LoopInsertPts(LIP), MSSAInsertPts(MSSAIP),
1885         PredCache(PIC), AST(ast), MSSAU(MSSAU), LI(li), DL(std::move(dl)),
1886         Alignment(alignment), UnorderedAtomic(UnorderedAtomic), AATags(AATags),
1887         SafetyInfo(SafetyInfo) {}
1888 
1889   bool isInstInList(Instruction *I,
1890                     const SmallVectorImpl<Instruction *> &) const override {
1891     Value *Ptr;
1892     if (LoadInst *LI = dyn_cast<LoadInst>(I))
1893       Ptr = LI->getOperand(0);
1894     else
1895       Ptr = cast<StoreInst>(I)->getPointerOperand();
1896     return PointerMustAliases.count(Ptr);
1897   }
1898 
1899   void doExtraRewritesBeforeFinalDeletion() override {
1900     // Insert stores after in the loop exit blocks.  Each exit block gets a
1901     // store of the live-out values that feed them.  Since we've already told
1902     // the SSA updater about the defs in the loop and the preheader
1903     // definition, it is all set and we can start using it.
1904     for (unsigned i = 0, e = LoopExitBlocks.size(); i != e; ++i) {
1905       BasicBlock *ExitBlock = LoopExitBlocks[i];
1906       Value *LiveInValue = SSA.GetValueInMiddleOfBlock(ExitBlock);
1907       LiveInValue = maybeInsertLCSSAPHI(LiveInValue, ExitBlock);
1908       Value *Ptr = maybeInsertLCSSAPHI(SomePtr, ExitBlock);
1909       Instruction *InsertPos = LoopInsertPts[i];
1910       StoreInst *NewSI = new StoreInst(LiveInValue, Ptr, InsertPos);
1911       if (UnorderedAtomic)
1912         NewSI->setOrdering(AtomicOrdering::Unordered);
1913       NewSI->setAlignment(Align(Alignment));
1914       NewSI->setDebugLoc(DL);
1915       if (AATags)
1916         NewSI->setAAMetadata(AATags);
1917 
1918       if (MSSAU) {
1919         MemoryAccess *MSSAInsertPoint = MSSAInsertPts[i];
1920         MemoryAccess *NewMemAcc;
1921         if (!MSSAInsertPoint) {
1922           NewMemAcc = MSSAU->createMemoryAccessInBB(
1923               NewSI, nullptr, NewSI->getParent(), MemorySSA::Beginning);
1924         } else {
1925           NewMemAcc =
1926               MSSAU->createMemoryAccessAfter(NewSI, nullptr, MSSAInsertPoint);
1927         }
1928         MSSAInsertPts[i] = NewMemAcc;
1929         MSSAU->insertDef(cast<MemoryDef>(NewMemAcc), true);
1930         // FIXME: true for safety, false may still be correct.
1931       }
1932     }
1933   }
1934 
1935   void replaceLoadWithValue(LoadInst *LI, Value *V) const override {
1936     // Update alias analysis.
1937     if (AST)
1938       AST->copyValue(LI, V);
1939   }
1940   void instructionDeleted(Instruction *I) const override {
1941     SafetyInfo.removeInstruction(I);
1942     if (AST)
1943       AST->deleteValue(I);
1944     if (MSSAU)
1945       MSSAU->removeMemoryAccess(I);
1946   }
1947 };
1948 
1949 
1950 /// Return true iff we can prove that a caller of this function can not inspect
1951 /// the contents of the provided object in a well defined program.
1952 bool isKnownNonEscaping(Value *Object, const TargetLibraryInfo *TLI) {
1953   if (isa<AllocaInst>(Object))
1954     // Since the alloca goes out of scope, we know the caller can't retain a
1955     // reference to it and be well defined.  Thus, we don't need to check for
1956     // capture.
1957     return true;
1958 
1959   // For all other objects we need to know that the caller can't possibly
1960   // have gotten a reference to the object.  There are two components of
1961   // that:
1962   //   1) Object can't be escaped by this function.  This is what
1963   //      PointerMayBeCaptured checks.
1964   //   2) Object can't have been captured at definition site.  For this, we
1965   //      need to know the return value is noalias.  At the moment, we use a
1966   //      weaker condition and handle only AllocLikeFunctions (which are
1967   //      known to be noalias).  TODO
1968   return isAllocLikeFn(Object, TLI) &&
1969     !PointerMayBeCaptured(Object, true, true);
1970 }
1971 
1972 } // namespace
1973 
1974 /// Try to promote memory values to scalars by sinking stores out of the
1975 /// loop and moving loads to before the loop.  We do this by looping over
1976 /// the stores in the loop, looking for stores to Must pointers which are
1977 /// loop invariant.
1978 ///
1979 bool llvm::promoteLoopAccessesToScalars(
1980     const SmallSetVector<Value *, 8> &PointerMustAliases,
1981     SmallVectorImpl<BasicBlock *> &ExitBlocks,
1982     SmallVectorImpl<Instruction *> &InsertPts,
1983     SmallVectorImpl<MemoryAccess *> &MSSAInsertPts, PredIteratorCache &PIC,
1984     LoopInfo *LI, DominatorTree *DT, const TargetLibraryInfo *TLI,
1985     Loop *CurLoop, AliasSetTracker *CurAST, MemorySSAUpdater *MSSAU,
1986     ICFLoopSafetyInfo *SafetyInfo, OptimizationRemarkEmitter *ORE) {
1987   // Verify inputs.
1988   assert(LI != nullptr && DT != nullptr && CurLoop != nullptr &&
1989          SafetyInfo != nullptr &&
1990          "Unexpected Input to promoteLoopAccessesToScalars");
1991 
1992   Value *SomePtr = *PointerMustAliases.begin();
1993   BasicBlock *Preheader = CurLoop->getLoopPreheader();
1994 
1995   // It is not safe to promote a load/store from the loop if the load/store is
1996   // conditional.  For example, turning:
1997   //
1998   //    for () { if (c) *P += 1; }
1999   //
2000   // into:
2001   //
2002   //    tmp = *P;  for () { if (c) tmp +=1; } *P = tmp;
2003   //
2004   // is not safe, because *P may only be valid to access if 'c' is true.
2005   //
2006   // The safety property divides into two parts:
2007   // p1) The memory may not be dereferenceable on entry to the loop.  In this
2008   //    case, we can't insert the required load in the preheader.
2009   // p2) The memory model does not allow us to insert a store along any dynamic
2010   //    path which did not originally have one.
2011   //
2012   // If at least one store is guaranteed to execute, both properties are
2013   // satisfied, and promotion is legal.
2014   //
2015   // This, however, is not a necessary condition. Even if no store/load is
2016   // guaranteed to execute, we can still establish these properties.
2017   // We can establish (p1) by proving that hoisting the load into the preheader
2018   // is safe (i.e. proving dereferenceability on all paths through the loop). We
2019   // can use any access within the alias set to prove dereferenceability,
2020   // since they're all must alias.
2021   //
2022   // There are two ways establish (p2):
2023   // a) Prove the location is thread-local. In this case the memory model
2024   // requirement does not apply, and stores are safe to insert.
2025   // b) Prove a store dominates every exit block. In this case, if an exit
2026   // blocks is reached, the original dynamic path would have taken us through
2027   // the store, so inserting a store into the exit block is safe. Note that this
2028   // is different from the store being guaranteed to execute. For instance,
2029   // if an exception is thrown on the first iteration of the loop, the original
2030   // store is never executed, but the exit blocks are not executed either.
2031 
2032   bool DereferenceableInPH = false;
2033   bool SafeToInsertStore = false;
2034 
2035   SmallVector<Instruction *, 64> LoopUses;
2036 
2037   // We start with an alignment of one and try to find instructions that allow
2038   // us to prove better alignment.
2039   Align Alignment;
2040   // Keep track of which types of access we see
2041   bool SawUnorderedAtomic = false;
2042   bool SawNotAtomic = false;
2043   AAMDNodes AATags;
2044 
2045   const DataLayout &MDL = Preheader->getModule()->getDataLayout();
2046 
2047   bool IsKnownThreadLocalObject = false;
2048   if (SafetyInfo->anyBlockMayThrow()) {
2049     // If a loop can throw, we have to insert a store along each unwind edge.
2050     // That said, we can't actually make the unwind edge explicit. Therefore,
2051     // we have to prove that the store is dead along the unwind edge.  We do
2052     // this by proving that the caller can't have a reference to the object
2053     // after return and thus can't possibly load from the object.
2054     Value *Object = getUnderlyingObject(SomePtr);
2055     if (!isKnownNonEscaping(Object, TLI))
2056       return false;
2057     // Subtlety: Alloca's aren't visible to callers, but *are* potentially
2058     // visible to other threads if captured and used during their lifetimes.
2059     IsKnownThreadLocalObject = !isa<AllocaInst>(Object);
2060   }
2061 
2062   // Check that all of the pointers in the alias set have the same type.  We
2063   // cannot (yet) promote a memory location that is loaded and stored in
2064   // different sizes.  While we are at it, collect alignment and AA info.
2065   for (Value *ASIV : PointerMustAliases) {
2066     // Check that all of the pointers in the alias set have the same type.  We
2067     // cannot (yet) promote a memory location that is loaded and stored in
2068     // different sizes.
2069     if (SomePtr->getType() != ASIV->getType())
2070       return false;
2071 
2072     for (User *U : ASIV->users()) {
2073       // Ignore instructions that are outside the loop.
2074       Instruction *UI = dyn_cast<Instruction>(U);
2075       if (!UI || !CurLoop->contains(UI))
2076         continue;
2077 
2078       // If there is an non-load/store instruction in the loop, we can't promote
2079       // it.
2080       if (LoadInst *Load = dyn_cast<LoadInst>(UI)) {
2081         if (!Load->isUnordered())
2082           return false;
2083 
2084         SawUnorderedAtomic |= Load->isAtomic();
2085         SawNotAtomic |= !Load->isAtomic();
2086 
2087         Align InstAlignment = Load->getAlign();
2088 
2089         // Note that proving a load safe to speculate requires proving
2090         // sufficient alignment at the target location.  Proving it guaranteed
2091         // to execute does as well.  Thus we can increase our guaranteed
2092         // alignment as well.
2093         if (!DereferenceableInPH || (InstAlignment > Alignment))
2094           if (isSafeToExecuteUnconditionally(*Load, DT, CurLoop, SafetyInfo,
2095                                              ORE, Preheader->getTerminator())) {
2096             DereferenceableInPH = true;
2097             Alignment = std::max(Alignment, InstAlignment);
2098           }
2099       } else if (const StoreInst *Store = dyn_cast<StoreInst>(UI)) {
2100         // Stores *of* the pointer are not interesting, only stores *to* the
2101         // pointer.
2102         if (UI->getOperand(1) != ASIV)
2103           continue;
2104         if (!Store->isUnordered())
2105           return false;
2106 
2107         SawUnorderedAtomic |= Store->isAtomic();
2108         SawNotAtomic |= !Store->isAtomic();
2109 
2110         // If the store is guaranteed to execute, both properties are satisfied.
2111         // We may want to check if a store is guaranteed to execute even if we
2112         // already know that promotion is safe, since it may have higher
2113         // alignment than any other guaranteed stores, in which case we can
2114         // raise the alignment on the promoted store.
2115         Align InstAlignment = Store->getAlign();
2116 
2117         if (!DereferenceableInPH || !SafeToInsertStore ||
2118             (InstAlignment > Alignment)) {
2119           if (SafetyInfo->isGuaranteedToExecute(*UI, DT, CurLoop)) {
2120             DereferenceableInPH = true;
2121             SafeToInsertStore = true;
2122             Alignment = std::max(Alignment, InstAlignment);
2123           }
2124         }
2125 
2126         // If a store dominates all exit blocks, it is safe to sink.
2127         // As explained above, if an exit block was executed, a dominating
2128         // store must have been executed at least once, so we are not
2129         // introducing stores on paths that did not have them.
2130         // Note that this only looks at explicit exit blocks. If we ever
2131         // start sinking stores into unwind edges (see above), this will break.
2132         if (!SafeToInsertStore)
2133           SafeToInsertStore = llvm::all_of(ExitBlocks, [&](BasicBlock *Exit) {
2134             return DT->dominates(Store->getParent(), Exit);
2135           });
2136 
2137         // If the store is not guaranteed to execute, we may still get
2138         // deref info through it.
2139         if (!DereferenceableInPH) {
2140           DereferenceableInPH = isDereferenceableAndAlignedPointer(
2141               Store->getPointerOperand(), Store->getValueOperand()->getType(),
2142               Store->getAlign(), MDL, Preheader->getTerminator(), DT);
2143         }
2144       } else
2145         return false; // Not a load or store.
2146 
2147       // Merge the AA tags.
2148       if (LoopUses.empty()) {
2149         // On the first load/store, just take its AA tags.
2150         UI->getAAMetadata(AATags);
2151       } else if (AATags) {
2152         UI->getAAMetadata(AATags, /* Merge = */ true);
2153       }
2154 
2155       LoopUses.push_back(UI);
2156     }
2157   }
2158 
2159   // If we found both an unordered atomic instruction and a non-atomic memory
2160   // access, bail.  We can't blindly promote non-atomic to atomic since we
2161   // might not be able to lower the result.  We can't downgrade since that
2162   // would violate memory model.  Also, align 0 is an error for atomics.
2163   if (SawUnorderedAtomic && SawNotAtomic)
2164     return false;
2165 
2166   // If we're inserting an atomic load in the preheader, we must be able to
2167   // lower it.  We're only guaranteed to be able to lower naturally aligned
2168   // atomics.
2169   auto *SomePtrElemType = SomePtr->getType()->getPointerElementType();
2170   if (SawUnorderedAtomic &&
2171       Alignment < MDL.getTypeStoreSize(SomePtrElemType))
2172     return false;
2173 
2174   // If we couldn't prove we can hoist the load, bail.
2175   if (!DereferenceableInPH)
2176     return false;
2177 
2178   // We know we can hoist the load, but don't have a guaranteed store.
2179   // Check whether the location is thread-local. If it is, then we can insert
2180   // stores along paths which originally didn't have them without violating the
2181   // memory model.
2182   if (!SafeToInsertStore) {
2183     if (IsKnownThreadLocalObject)
2184       SafeToInsertStore = true;
2185     else {
2186       Value *Object = getUnderlyingObject(SomePtr);
2187       SafeToInsertStore =
2188           (isAllocLikeFn(Object, TLI) || isa<AllocaInst>(Object)) &&
2189           !PointerMayBeCaptured(Object, true, true);
2190     }
2191   }
2192 
2193   // If we've still failed to prove we can sink the store, give up.
2194   if (!SafeToInsertStore)
2195     return false;
2196 
2197   // Otherwise, this is safe to promote, lets do it!
2198   LLVM_DEBUG(dbgs() << "LICM: Promoting value stored to in loop: " << *SomePtr
2199                     << '\n');
2200   ORE->emit([&]() {
2201     return OptimizationRemark(DEBUG_TYPE, "PromoteLoopAccessesToScalar",
2202                               LoopUses[0])
2203            << "Moving accesses to memory location out of the loop";
2204   });
2205   ++NumPromoted;
2206 
2207   // Look at all the loop uses, and try to merge their locations.
2208   std::vector<const DILocation *> LoopUsesLocs;
2209   for (auto U : LoopUses)
2210     LoopUsesLocs.push_back(U->getDebugLoc().get());
2211   auto DL = DebugLoc(DILocation::getMergedLocations(LoopUsesLocs));
2212 
2213   // We use the SSAUpdater interface to insert phi nodes as required.
2214   SmallVector<PHINode *, 16> NewPHIs;
2215   SSAUpdater SSA(&NewPHIs);
2216   LoopPromoter Promoter(SomePtr, LoopUses, SSA, PointerMustAliases, ExitBlocks,
2217                         InsertPts, MSSAInsertPts, PIC, CurAST, MSSAU, *LI, DL,
2218                         Alignment.value(), SawUnorderedAtomic, AATags,
2219                         *SafetyInfo);
2220 
2221   // Set up the preheader to have a definition of the value.  It is the live-out
2222   // value from the preheader that uses in the loop will use.
2223   LoadInst *PreheaderLoad = new LoadInst(
2224       SomePtr->getType()->getPointerElementType(), SomePtr,
2225       SomePtr->getName() + ".promoted", Preheader->getTerminator());
2226   if (SawUnorderedAtomic)
2227     PreheaderLoad->setOrdering(AtomicOrdering::Unordered);
2228   PreheaderLoad->setAlignment(Alignment);
2229   PreheaderLoad->setDebugLoc(DebugLoc());
2230   if (AATags)
2231     PreheaderLoad->setAAMetadata(AATags);
2232   SSA.AddAvailableValue(Preheader, PreheaderLoad);
2233 
2234   if (MSSAU) {
2235     MemoryAccess *PreheaderLoadMemoryAccess = MSSAU->createMemoryAccessInBB(
2236         PreheaderLoad, nullptr, PreheaderLoad->getParent(), MemorySSA::End);
2237     MemoryUse *NewMemUse = cast<MemoryUse>(PreheaderLoadMemoryAccess);
2238     MSSAU->insertUse(NewMemUse, /*RenameUses=*/true);
2239   }
2240 
2241   if (MSSAU && VerifyMemorySSA)
2242     MSSAU->getMemorySSA()->verifyMemorySSA();
2243   // Rewrite all the loads in the loop and remember all the definitions from
2244   // stores in the loop.
2245   Promoter.run(LoopUses);
2246 
2247   if (MSSAU && VerifyMemorySSA)
2248     MSSAU->getMemorySSA()->verifyMemorySSA();
2249   // If the SSAUpdater didn't use the load in the preheader, just zap it now.
2250   if (PreheaderLoad->use_empty())
2251     eraseInstruction(*PreheaderLoad, *SafetyInfo, CurAST, MSSAU);
2252 
2253   return true;
2254 }
2255 
2256 static void foreachMemoryAccess(MemorySSA *MSSA, Loop *L,
2257                                 function_ref<void(Instruction *)> Fn) {
2258   for (const BasicBlock *BB : L->blocks())
2259     if (const auto *Accesses = MSSA->getBlockAccesses(BB))
2260       for (const auto &Access : *Accesses)
2261         if (const auto *MUD = dyn_cast<MemoryUseOrDef>(&Access))
2262           Fn(MUD->getMemoryInst());
2263 }
2264 
2265 static SmallVector<SmallSetVector<Value *, 8>, 0>
2266 collectPromotionCandidates(MemorySSA *MSSA, AliasAnalysis *AA, Loop *L,
2267                            SmallVectorImpl<Instruction *> &MaybePromotable) {
2268   AliasSetTracker AST(*AA);
2269 
2270   auto IsPotentiallyPromotable = [L](const Instruction *I) {
2271     if (const auto *SI = dyn_cast<StoreInst>(I))
2272       return L->isLoopInvariant(SI->getPointerOperand());
2273     if (const auto *LI = dyn_cast<LoadInst>(I))
2274       return L->isLoopInvariant(LI->getPointerOperand());
2275     return false;
2276   };
2277 
2278   // Populate AST with potentially promotable accesses and remove them from
2279   // MaybePromotable, so they will not be checked again on the next iteration.
2280   SmallPtrSet<Value *, 16> AttemptingPromotion;
2281   llvm::erase_if(MaybePromotable, [&](Instruction *I) {
2282     if (IsPotentiallyPromotable(I)) {
2283       AttemptingPromotion.insert(I);
2284       AST.add(I);
2285       return true;
2286     }
2287     return false;
2288   });
2289 
2290   // We're only interested in must-alias sets that contain a mod.
2291   SmallVector<const AliasSet *, 8> Sets;
2292   for (AliasSet &AS : AST)
2293     if (!AS.isForwardingAliasSet() && AS.isMod() && AS.isMustAlias())
2294       Sets.push_back(&AS);
2295 
2296   if (Sets.empty())
2297     return {}; // Nothing to promote...
2298 
2299   // Discard any sets for which there is an aliasing non-promotable access.
2300   foreachMemoryAccess(MSSA, L, [&](Instruction *I) {
2301     if (AttemptingPromotion.contains(I))
2302       return;
2303 
2304     if (Optional<MemoryLocation> Loc = MemoryLocation::getOrNone(I)) {
2305       llvm::erase_if(Sets, [&](const AliasSet *AS) {
2306         return AS->aliasesPointer(Loc->Ptr, Loc->Size, Loc->AATags, *AA)
2307                != NoAlias;
2308       });
2309     } else {
2310       llvm::erase_if(Sets, [&](const AliasSet *AS) {
2311         return AS->aliasesUnknownInst(I, *AA);
2312       });
2313     }
2314   });
2315 
2316   SmallVector<SmallSetVector<Value *, 8>, 0> Result;
2317   for (const AliasSet *Set : Sets) {
2318     SmallSetVector<Value *, 8> PointerMustAliases;
2319     for (const auto &ASI : *Set)
2320       PointerMustAliases.insert(ASI.getValue());
2321     Result.push_back(std::move(PointerMustAliases));
2322   }
2323 
2324   return Result;
2325 }
2326 
2327 /// Returns an owning pointer to an alias set which incorporates aliasing info
2328 /// from L and all subloops of L.
2329 std::unique_ptr<AliasSetTracker>
2330 LoopInvariantCodeMotion::collectAliasInfoForLoop(Loop *L, LoopInfo *LI,
2331                                                  AAResults *AA) {
2332   auto CurAST = std::make_unique<AliasSetTracker>(*AA);
2333 
2334   // Add everything from all the sub loops.
2335   for (Loop *InnerL : L->getSubLoops())
2336     for (BasicBlock *BB : InnerL->blocks())
2337       CurAST->add(*BB);
2338 
2339   // And merge in this loop (without anything from inner loops).
2340   for (BasicBlock *BB : L->blocks())
2341     if (LI->getLoopFor(BB) == L)
2342       CurAST->add(*BB);
2343 
2344   return CurAST;
2345 }
2346 
2347 static bool pointerInvalidatedByLoop(MemoryLocation MemLoc,
2348                                      AliasSetTracker *CurAST, Loop *CurLoop,
2349                                      AAResults *AA) {
2350   // First check to see if any of the basic blocks in CurLoop invalidate *V.
2351   bool isInvalidatedAccordingToAST = CurAST->getAliasSetFor(MemLoc).isMod();
2352 
2353   if (!isInvalidatedAccordingToAST || !LICMN2Theshold)
2354     return isInvalidatedAccordingToAST;
2355 
2356   // Check with a diagnostic analysis if we can refine the information above.
2357   // This is to identify the limitations of using the AST.
2358   // The alias set mechanism used by LICM has a major weakness in that it
2359   // combines all things which may alias into a single set *before* asking
2360   // modref questions. As a result, a single readonly call within a loop will
2361   // collapse all loads and stores into a single alias set and report
2362   // invalidation if the loop contains any store. For example, readonly calls
2363   // with deopt states have this form and create a general alias set with all
2364   // loads and stores.  In order to get any LICM in loops containing possible
2365   // deopt states we need a more precise invalidation of checking the mod ref
2366   // info of each instruction within the loop and LI. This has a complexity of
2367   // O(N^2), so currently, it is used only as a diagnostic tool since the
2368   // default value of LICMN2Threshold is zero.
2369 
2370   // Don't look at nested loops.
2371   if (CurLoop->begin() != CurLoop->end())
2372     return true;
2373 
2374   int N = 0;
2375   for (BasicBlock *BB : CurLoop->getBlocks())
2376     for (Instruction &I : *BB) {
2377       if (N >= LICMN2Theshold) {
2378         LLVM_DEBUG(dbgs() << "Alasing N2 threshold exhausted for "
2379                           << *(MemLoc.Ptr) << "\n");
2380         return true;
2381       }
2382       N++;
2383       auto Res = AA->getModRefInfo(&I, MemLoc);
2384       if (isModSet(Res)) {
2385         LLVM_DEBUG(dbgs() << "Aliasing failed on " << I << " for "
2386                           << *(MemLoc.Ptr) << "\n");
2387         return true;
2388       }
2389     }
2390   LLVM_DEBUG(dbgs() << "Aliasing okay for " << *(MemLoc.Ptr) << "\n");
2391   return false;
2392 }
2393 
2394 bool pointerInvalidatedByLoopWithMSSA(MemorySSA *MSSA, MemoryUse *MU,
2395                                       Loop *CurLoop, Instruction &I,
2396                                       SinkAndHoistLICMFlags &Flags) {
2397   // For hoisting, use the walker to determine safety
2398   if (!Flags.getIsSink()) {
2399     MemoryAccess *Source;
2400     // See declaration of SetLicmMssaOptCap for usage details.
2401     if (Flags.tooManyClobberingCalls())
2402       Source = MU->getDefiningAccess();
2403     else {
2404       Source = MSSA->getSkipSelfWalker()->getClobberingMemoryAccess(MU);
2405       Flags.incrementClobberingCalls();
2406     }
2407     return !MSSA->isLiveOnEntryDef(Source) &&
2408            CurLoop->contains(Source->getBlock());
2409   }
2410 
2411   // For sinking, we'd need to check all Defs below this use. The getClobbering
2412   // call will look on the backedge of the loop, but will check aliasing with
2413   // the instructions on the previous iteration.
2414   // For example:
2415   // for (i ... )
2416   //   load a[i] ( Use (LoE)
2417   //   store a[i] ( 1 = Def (2), with 2 = Phi for the loop.
2418   //   i++;
2419   // The load sees no clobbering inside the loop, as the backedge alias check
2420   // does phi translation, and will check aliasing against store a[i-1].
2421   // However sinking the load outside the loop, below the store is incorrect.
2422 
2423   // For now, only sink if there are no Defs in the loop, and the existing ones
2424   // precede the use and are in the same block.
2425   // FIXME: Increase precision: Safe to sink if Use post dominates the Def;
2426   // needs PostDominatorTreeAnalysis.
2427   // FIXME: More precise: no Defs that alias this Use.
2428   if (Flags.tooManyMemoryAccesses())
2429     return true;
2430   for (auto *BB : CurLoop->getBlocks())
2431     if (pointerInvalidatedByBlockWithMSSA(*BB, *MSSA, *MU))
2432       return true;
2433   // When sinking, the source block may not be part of the loop so check it.
2434   if (!CurLoop->contains(&I))
2435     return pointerInvalidatedByBlockWithMSSA(*I.getParent(), *MSSA, *MU);
2436 
2437   return false;
2438 }
2439 
2440 bool pointerInvalidatedByBlockWithMSSA(BasicBlock &BB, MemorySSA &MSSA,
2441                                        MemoryUse &MU) {
2442   if (const auto *Accesses = MSSA.getBlockDefs(&BB))
2443     for (const auto &MA : *Accesses)
2444       if (const auto *MD = dyn_cast<MemoryDef>(&MA))
2445         if (MU.getBlock() != MD->getBlock() || !MSSA.locallyDominates(MD, &MU))
2446           return true;
2447   return false;
2448 }
2449 
2450 /// Little predicate that returns true if the specified basic block is in
2451 /// a subloop of the current one, not the current one itself.
2452 ///
2453 static bool inSubLoop(BasicBlock *BB, Loop *CurLoop, LoopInfo *LI) {
2454   assert(CurLoop->contains(BB) && "Only valid if BB is IN the loop");
2455   return LI->getLoopFor(BB) != CurLoop;
2456 }
2457