1 //===-- LICM.cpp - Loop Invariant Code Motion Pass ------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass performs loop invariant code motion, attempting to remove as much
10 // code from the body of a loop as possible.  It does this by either hoisting
11 // code into the preheader block, or by sinking code to the exit blocks if it is
12 // safe.  This pass also promotes must-aliased memory locations in the loop to
13 // live in registers, thus hoisting and sinking "invariant" loads and stores.
14 //
15 // Hoisting operations out of loops is a canonicalization transform.  It
16 // enables and simplifies subsequent optimizations in the middle-end.
17 // Rematerialization of hoisted instructions to reduce register pressure is the
18 // responsibility of the back-end, which has more accurate information about
19 // register pressure and also handles other optimizations than LICM that
20 // increase live-ranges.
21 //
22 // This pass uses alias analysis for two purposes:
23 //
24 //  1. Moving loop invariant loads and calls out of loops.  If we can determine
25 //     that a load or call inside of a loop never aliases anything stored to,
26 //     we can hoist it or sink it like any other instruction.
27 //  2. Scalar Promotion of Memory - If there is a store instruction inside of
28 //     the loop, we try to move the store to happen AFTER the loop instead of
29 //     inside of the loop.  This can only happen if a few conditions are true:
30 //       A. The pointer stored through is loop invariant
31 //       B. There are no stores or loads in the loop which _may_ alias the
32 //          pointer.  There are no calls in the loop which mod/ref the pointer.
33 //     If these conditions are true, we can promote the loads and stores in the
34 //     loop of the pointer to use a temporary alloca'd variable.  We then use
35 //     the SSAUpdater to construct the appropriate SSA form for the value.
36 //
37 //===----------------------------------------------------------------------===//
38 
39 #include "llvm/Transforms/Scalar/LICM.h"
40 #include "llvm/ADT/SetOperations.h"
41 #include "llvm/ADT/Statistic.h"
42 #include "llvm/Analysis/AliasAnalysis.h"
43 #include "llvm/Analysis/AliasSetTracker.h"
44 #include "llvm/Analysis/BasicAliasAnalysis.h"
45 #include "llvm/Analysis/BlockFrequencyInfo.h"
46 #include "llvm/Analysis/CaptureTracking.h"
47 #include "llvm/Analysis/ConstantFolding.h"
48 #include "llvm/Analysis/GlobalsModRef.h"
49 #include "llvm/Analysis/GuardUtils.h"
50 #include "llvm/Analysis/LazyBlockFrequencyInfo.h"
51 #include "llvm/Analysis/Loads.h"
52 #include "llvm/Analysis/LoopInfo.h"
53 #include "llvm/Analysis/LoopIterator.h"
54 #include "llvm/Analysis/LoopPass.h"
55 #include "llvm/Analysis/MemoryBuiltins.h"
56 #include "llvm/Analysis/MemorySSA.h"
57 #include "llvm/Analysis/MemorySSAUpdater.h"
58 #include "llvm/Analysis/MustExecute.h"
59 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
60 #include "llvm/Analysis/ScalarEvolution.h"
61 #include "llvm/Analysis/ScalarEvolutionAliasAnalysis.h"
62 #include "llvm/Analysis/TargetLibraryInfo.h"
63 #include "llvm/Analysis/ValueTracking.h"
64 #include "llvm/IR/CFG.h"
65 #include "llvm/IR/Constants.h"
66 #include "llvm/IR/DataLayout.h"
67 #include "llvm/IR/DebugInfoMetadata.h"
68 #include "llvm/IR/DerivedTypes.h"
69 #include "llvm/IR/Dominators.h"
70 #include "llvm/IR/Instructions.h"
71 #include "llvm/IR/IntrinsicInst.h"
72 #include "llvm/IR/LLVMContext.h"
73 #include "llvm/IR/Metadata.h"
74 #include "llvm/IR/PatternMatch.h"
75 #include "llvm/IR/PredIteratorCache.h"
76 #include "llvm/InitializePasses.h"
77 #include "llvm/Support/CommandLine.h"
78 #include "llvm/Support/Debug.h"
79 #include "llvm/Support/raw_ostream.h"
80 #include "llvm/Transforms/Scalar.h"
81 #include "llvm/Transforms/Scalar/LoopPassManager.h"
82 #include "llvm/Transforms/Utils/AssumeBundleBuilder.h"
83 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
84 #include "llvm/Transforms/Utils/Local.h"
85 #include "llvm/Transforms/Utils/LoopUtils.h"
86 #include "llvm/Transforms/Utils/SSAUpdater.h"
87 #include <algorithm>
88 #include <utility>
89 using namespace llvm;
90 
91 #define DEBUG_TYPE "licm"
92 
93 STATISTIC(NumCreatedBlocks, "Number of blocks created");
94 STATISTIC(NumClonedBranches, "Number of branches cloned");
95 STATISTIC(NumSunk, "Number of instructions sunk out of loop");
96 STATISTIC(NumHoisted, "Number of instructions hoisted out of loop");
97 STATISTIC(NumMovedLoads, "Number of load insts hoisted or sunk");
98 STATISTIC(NumMovedCalls, "Number of call insts hoisted or sunk");
99 STATISTIC(NumPromoted, "Number of memory locations promoted to registers");
100 
101 /// Memory promotion is enabled by default.
102 static cl::opt<bool>
103     DisablePromotion("disable-licm-promotion", cl::Hidden, cl::init(false),
104                      cl::desc("Disable memory promotion in LICM pass"));
105 
106 static cl::opt<bool> ControlFlowHoisting(
107     "licm-control-flow-hoisting", cl::Hidden, cl::init(false),
108     cl::desc("Enable control flow (and PHI) hoisting in LICM"));
109 
110 static cl::opt<unsigned> HoistSinkColdnessThreshold(
111     "licm-coldness-threshold", cl::Hidden, cl::init(4),
112     cl::desc("Relative coldness Threshold of hoisting/sinking destination "
113              "block for LICM to be considered beneficial"));
114 
115 static cl::opt<uint32_t> MaxNumUsesTraversed(
116     "licm-max-num-uses-traversed", cl::Hidden, cl::init(8),
117     cl::desc("Max num uses visited for identifying load "
118              "invariance in loop using invariant start (default = 8)"));
119 
120 // Default value of zero implies we use the regular alias set tracker mechanism
121 // instead of the cross product using AA to identify aliasing of the memory
122 // location we are interested in.
123 static cl::opt<int>
124 LICMN2Theshold("licm-n2-threshold", cl::Hidden, cl::init(0),
125                cl::desc("How many instruction to cross product using AA"));
126 
127 // Experimental option to allow imprecision in LICM in pathological cases, in
128 // exchange for faster compile. This is to be removed if MemorySSA starts to
129 // address the same issue. This flag applies only when LICM uses MemorySSA
130 // instead on AliasSetTracker. LICM calls MemorySSAWalker's
131 // getClobberingMemoryAccess, up to the value of the Cap, getting perfect
132 // accuracy. Afterwards, LICM will call into MemorySSA's getDefiningAccess,
133 // which may not be precise, since optimizeUses is capped. The result is
134 // correct, but we may not get as "far up" as possible to get which access is
135 // clobbering the one queried.
136 cl::opt<unsigned> llvm::SetLicmMssaOptCap(
137     "licm-mssa-optimization-cap", cl::init(100), cl::Hidden,
138     cl::desc("Enable imprecision in LICM in pathological cases, in exchange "
139              "for faster compile. Caps the MemorySSA clobbering calls."));
140 
141 // Experimentally, memory promotion carries less importance than sinking and
142 // hoisting. Limit when we do promotion when using MemorySSA, in order to save
143 // compile time.
144 cl::opt<unsigned> llvm::SetLicmMssaNoAccForPromotionCap(
145     "licm-mssa-max-acc-promotion", cl::init(250), cl::Hidden,
146     cl::desc("[LICM & MemorySSA] When MSSA in LICM is disabled, this has no "
147              "effect. When MSSA in LICM is enabled, then this is the maximum "
148              "number of accesses allowed to be present in a loop in order to "
149              "enable memory promotion."));
150 
151 static bool inSubLoop(BasicBlock *BB, Loop *CurLoop, LoopInfo *LI);
152 static bool isNotUsedOrFreeInLoop(const Instruction &I, const Loop *CurLoop,
153                                   const LoopSafetyInfo *SafetyInfo,
154                                   TargetTransformInfo *TTI, bool &FreeInLoop);
155 static void hoist(Instruction &I, const DominatorTree *DT, const Loop *CurLoop,
156                   BasicBlock *Dest, ICFLoopSafetyInfo *SafetyInfo,
157                   MemorySSAUpdater *MSSAU, ScalarEvolution *SE,
158                   OptimizationRemarkEmitter *ORE);
159 static bool sink(Instruction &I, LoopInfo *LI, DominatorTree *DT,
160                  BlockFrequencyInfo *BFI, const Loop *CurLoop,
161                  ICFLoopSafetyInfo *SafetyInfo, MemorySSAUpdater *MSSAU,
162                  OptimizationRemarkEmitter *ORE);
163 static bool isSafeToExecuteUnconditionally(Instruction &Inst,
164                                            const DominatorTree *DT,
165                                            const TargetLibraryInfo *TLI,
166                                            const Loop *CurLoop,
167                                            const LoopSafetyInfo *SafetyInfo,
168                                            OptimizationRemarkEmitter *ORE,
169                                            const Instruction *CtxI = nullptr);
170 static bool pointerInvalidatedByLoop(MemoryLocation MemLoc,
171                                      AliasSetTracker *CurAST, Loop *CurLoop,
172                                      AAResults *AA);
173 static bool pointerInvalidatedByLoopWithMSSA(MemorySSA *MSSA, MemoryUse *MU,
174                                              Loop *CurLoop, Instruction &I,
175                                              SinkAndHoistLICMFlags &Flags);
176 static bool pointerInvalidatedByBlockWithMSSA(BasicBlock &BB, MemorySSA &MSSA,
177                                               MemoryUse &MU);
178 static Instruction *cloneInstructionInExitBlock(
179     Instruction &I, BasicBlock &ExitBlock, PHINode &PN, const LoopInfo *LI,
180     const LoopSafetyInfo *SafetyInfo, MemorySSAUpdater *MSSAU);
181 
182 static void eraseInstruction(Instruction &I, ICFLoopSafetyInfo &SafetyInfo,
183                              AliasSetTracker *AST, MemorySSAUpdater *MSSAU);
184 
185 static void moveInstructionBefore(Instruction &I, Instruction &Dest,
186                                   ICFLoopSafetyInfo &SafetyInfo,
187                                   MemorySSAUpdater *MSSAU, ScalarEvolution *SE);
188 
189 static void foreachMemoryAccess(MemorySSA *MSSA, Loop *L,
190                                 function_ref<void(Instruction *)> Fn);
191 static SmallVector<SmallSetVector<Value *, 8>, 0>
192 collectPromotionCandidates(MemorySSA *MSSA, AliasAnalysis *AA, Loop *L,
193                            SmallVectorImpl<Instruction *> &MaybePromotable);
194 
195 namespace {
196 struct LoopInvariantCodeMotion {
197   bool runOnLoop(Loop *L, AAResults *AA, LoopInfo *LI, DominatorTree *DT,
198                  BlockFrequencyInfo *BFI, TargetLibraryInfo *TLI,
199                  TargetTransformInfo *TTI, ScalarEvolution *SE, MemorySSA *MSSA,
200                  OptimizationRemarkEmitter *ORE);
201 
202   LoopInvariantCodeMotion(unsigned LicmMssaOptCap,
203                           unsigned LicmMssaNoAccForPromotionCap)
204       : LicmMssaOptCap(LicmMssaOptCap),
205         LicmMssaNoAccForPromotionCap(LicmMssaNoAccForPromotionCap) {}
206 
207 private:
208   unsigned LicmMssaOptCap;
209   unsigned LicmMssaNoAccForPromotionCap;
210 
211   std::unique_ptr<AliasSetTracker>
212   collectAliasInfoForLoop(Loop *L, LoopInfo *LI, AAResults *AA);
213 };
214 
215 struct LegacyLICMPass : public LoopPass {
216   static char ID; // Pass identification, replacement for typeid
217   LegacyLICMPass(
218       unsigned LicmMssaOptCap = SetLicmMssaOptCap,
219       unsigned LicmMssaNoAccForPromotionCap = SetLicmMssaNoAccForPromotionCap)
220       : LoopPass(ID), LICM(LicmMssaOptCap, LicmMssaNoAccForPromotionCap) {
221     initializeLegacyLICMPassPass(*PassRegistry::getPassRegistry());
222   }
223 
224   bool runOnLoop(Loop *L, LPPassManager &LPM) override {
225     if (skipLoop(L))
226       return false;
227 
228     LLVM_DEBUG(dbgs() << "Perform LICM on Loop with header at block "
229                       << L->getHeader()->getNameOrAsOperand() << "\n");
230 
231     auto *SE = getAnalysisIfAvailable<ScalarEvolutionWrapperPass>();
232     MemorySSA *MSSA = EnableMSSALoopDependency
233                           ? (&getAnalysis<MemorySSAWrapperPass>().getMSSA())
234                           : nullptr;
235     bool hasProfileData = L->getHeader()->getParent()->hasProfileData();
236     BlockFrequencyInfo *BFI =
237         hasProfileData ? &getAnalysis<LazyBlockFrequencyInfoPass>().getBFI()
238                        : nullptr;
239     // For the old PM, we can't use OptimizationRemarkEmitter as an analysis
240     // pass. Function analyses need to be preserved across loop transformations
241     // but ORE cannot be preserved (see comment before the pass definition).
242     OptimizationRemarkEmitter ORE(L->getHeader()->getParent());
243     return LICM.runOnLoop(
244         L, &getAnalysis<AAResultsWrapperPass>().getAAResults(),
245         &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(),
246         &getAnalysis<DominatorTreeWrapperPass>().getDomTree(), BFI,
247         &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(
248             *L->getHeader()->getParent()),
249         &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(
250             *L->getHeader()->getParent()),
251         SE ? &SE->getSE() : nullptr, MSSA, &ORE);
252   }
253 
254   /// This transformation requires natural loop information & requires that
255   /// loop preheaders be inserted into the CFG...
256   ///
257   void getAnalysisUsage(AnalysisUsage &AU) const override {
258     AU.addPreserved<DominatorTreeWrapperPass>();
259     AU.addPreserved<LoopInfoWrapperPass>();
260     AU.addRequired<TargetLibraryInfoWrapperPass>();
261     if (EnableMSSALoopDependency) {
262       AU.addRequired<MemorySSAWrapperPass>();
263       AU.addPreserved<MemorySSAWrapperPass>();
264     }
265     AU.addRequired<TargetTransformInfoWrapperPass>();
266     getLoopAnalysisUsage(AU);
267     LazyBlockFrequencyInfoPass::getLazyBFIAnalysisUsage(AU);
268     AU.addPreserved<LazyBlockFrequencyInfoPass>();
269     AU.addPreserved<LazyBranchProbabilityInfoPass>();
270   }
271 
272 private:
273   LoopInvariantCodeMotion LICM;
274 };
275 } // namespace
276 
277 PreservedAnalyses LICMPass::run(Loop &L, LoopAnalysisManager &AM,
278                                 LoopStandardAnalysisResults &AR, LPMUpdater &) {
279   // For the new PM, we also can't use OptimizationRemarkEmitter as an analysis
280   // pass.  Function analyses need to be preserved across loop transformations
281   // but ORE cannot be preserved (see comment before the pass definition).
282   OptimizationRemarkEmitter ORE(L.getHeader()->getParent());
283 
284   LoopInvariantCodeMotion LICM(LicmMssaOptCap, LicmMssaNoAccForPromotionCap);
285   if (!LICM.runOnLoop(&L, &AR.AA, &AR.LI, &AR.DT, AR.BFI, &AR.TLI, &AR.TTI,
286                       &AR.SE, AR.MSSA, &ORE))
287     return PreservedAnalyses::all();
288 
289   auto PA = getLoopPassPreservedAnalyses();
290 
291   PA.preserve<DominatorTreeAnalysis>();
292   PA.preserve<LoopAnalysis>();
293   if (AR.MSSA)
294     PA.preserve<MemorySSAAnalysis>();
295 
296   return PA;
297 }
298 
299 char LegacyLICMPass::ID = 0;
300 INITIALIZE_PASS_BEGIN(LegacyLICMPass, "licm", "Loop Invariant Code Motion",
301                       false, false)
302 INITIALIZE_PASS_DEPENDENCY(LoopPass)
303 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
304 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
305 INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass)
306 INITIALIZE_PASS_DEPENDENCY(LazyBFIPass)
307 INITIALIZE_PASS_END(LegacyLICMPass, "licm", "Loop Invariant Code Motion", false,
308                     false)
309 
310 Pass *llvm::createLICMPass() { return new LegacyLICMPass(); }
311 Pass *llvm::createLICMPass(unsigned LicmMssaOptCap,
312                            unsigned LicmMssaNoAccForPromotionCap) {
313   return new LegacyLICMPass(LicmMssaOptCap, LicmMssaNoAccForPromotionCap);
314 }
315 
316 llvm::SinkAndHoistLICMFlags::SinkAndHoistLICMFlags(bool IsSink, Loop *L,
317                                                    MemorySSA *MSSA)
318     : SinkAndHoistLICMFlags(SetLicmMssaOptCap, SetLicmMssaNoAccForPromotionCap,
319                             IsSink, L, MSSA) {}
320 
321 llvm::SinkAndHoistLICMFlags::SinkAndHoistLICMFlags(
322     unsigned LicmMssaOptCap, unsigned LicmMssaNoAccForPromotionCap, bool IsSink,
323     Loop *L, MemorySSA *MSSA)
324     : LicmMssaOptCap(LicmMssaOptCap),
325       LicmMssaNoAccForPromotionCap(LicmMssaNoAccForPromotionCap),
326       IsSink(IsSink) {
327   assert(((L != nullptr) == (MSSA != nullptr)) &&
328          "Unexpected values for SinkAndHoistLICMFlags");
329   if (!MSSA)
330     return;
331 
332   unsigned AccessCapCount = 0;
333   for (auto *BB : L->getBlocks())
334     if (const auto *Accesses = MSSA->getBlockAccesses(BB))
335       for (const auto &MA : *Accesses) {
336         (void)MA;
337         ++AccessCapCount;
338         if (AccessCapCount > LicmMssaNoAccForPromotionCap) {
339           NoOfMemAccTooLarge = true;
340           return;
341         }
342       }
343 }
344 
345 /// Hoist expressions out of the specified loop. Note, alias info for inner
346 /// loop is not preserved so it is not a good idea to run LICM multiple
347 /// times on one loop.
348 bool LoopInvariantCodeMotion::runOnLoop(
349     Loop *L, AAResults *AA, LoopInfo *LI, DominatorTree *DT,
350     BlockFrequencyInfo *BFI, TargetLibraryInfo *TLI, TargetTransformInfo *TTI,
351     ScalarEvolution *SE, MemorySSA *MSSA, OptimizationRemarkEmitter *ORE) {
352   bool Changed = false;
353 
354   assert(L->isLCSSAForm(*DT) && "Loop is not in LCSSA form.");
355 
356   // If this loop has metadata indicating that LICM is not to be performed then
357   // just exit.
358   if (hasDisableLICMTransformsHint(L)) {
359     return false;
360   }
361 
362   std::unique_ptr<AliasSetTracker> CurAST;
363   std::unique_ptr<MemorySSAUpdater> MSSAU;
364   std::unique_ptr<SinkAndHoistLICMFlags> Flags;
365 
366   // Don't sink stores from loops with coroutine suspend instructions.
367   // LICM would sink instructions into the default destination of
368   // the coroutine switch. The default destination of the switch is to
369   // handle the case where the coroutine is suspended, by which point the
370   // coroutine frame may have been destroyed. No instruction can be sunk there.
371   // FIXME: This would unfortunately hurt the performance of coroutines, however
372   // there is currently no general solution for this. Similar issues could also
373   // potentially happen in other passes where instructions are being moved
374   // across that edge.
375   bool HasCoroSuspendInst = llvm::any_of(L->getBlocks(), [](BasicBlock *BB) {
376     return llvm::any_of(*BB, [](Instruction &I) {
377       IntrinsicInst *II = dyn_cast<IntrinsicInst>(&I);
378       return II && II->getIntrinsicID() == Intrinsic::coro_suspend;
379     });
380   });
381 
382   if (!MSSA) {
383     LLVM_DEBUG(dbgs() << "LICM: Using Alias Set Tracker.\n");
384     CurAST = collectAliasInfoForLoop(L, LI, AA);
385     Flags = std::make_unique<SinkAndHoistLICMFlags>(
386         LicmMssaOptCap, LicmMssaNoAccForPromotionCap, /*IsSink=*/true);
387   } else {
388     LLVM_DEBUG(dbgs() << "LICM: Using MemorySSA.\n");
389     MSSAU = std::make_unique<MemorySSAUpdater>(MSSA);
390     Flags = std::make_unique<SinkAndHoistLICMFlags>(
391         LicmMssaOptCap, LicmMssaNoAccForPromotionCap, /*IsSink=*/true, L, MSSA);
392   }
393 
394   // Get the preheader block to move instructions into...
395   BasicBlock *Preheader = L->getLoopPreheader();
396 
397   // Compute loop safety information.
398   ICFLoopSafetyInfo SafetyInfo;
399   SafetyInfo.computeLoopSafetyInfo(L);
400 
401   // We want to visit all of the instructions in this loop... that are not parts
402   // of our subloops (they have already had their invariants hoisted out of
403   // their loop, into this loop, so there is no need to process the BODIES of
404   // the subloops).
405   //
406   // Traverse the body of the loop in depth first order on the dominator tree so
407   // that we are guaranteed to see definitions before we see uses.  This allows
408   // us to sink instructions in one pass, without iteration.  After sinking
409   // instructions, we perform another pass to hoist them out of the loop.
410   if (L->hasDedicatedExits())
411     Changed |=
412         sinkRegion(DT->getNode(L->getHeader()), AA, LI, DT, BFI, TLI, TTI, L,
413                    CurAST.get(), MSSAU.get(), &SafetyInfo, *Flags.get(), ORE);
414   Flags->setIsSink(false);
415   if (Preheader)
416     Changed |= hoistRegion(DT->getNode(L->getHeader()), AA, LI, DT, BFI, TLI, L,
417                            CurAST.get(), MSSAU.get(), SE, &SafetyInfo,
418                            *Flags.get(), ORE);
419 
420   // Now that all loop invariants have been removed from the loop, promote any
421   // memory references to scalars that we can.
422   // Don't sink stores from loops without dedicated block exits. Exits
423   // containing indirect branches are not transformed by loop simplify,
424   // make sure we catch that. An additional load may be generated in the
425   // preheader for SSA updater, so also avoid sinking when no preheader
426   // is available.
427   if (!DisablePromotion && Preheader && L->hasDedicatedExits() &&
428       !Flags->tooManyMemoryAccesses() && !HasCoroSuspendInst) {
429     // Figure out the loop exits and their insertion points
430     SmallVector<BasicBlock *, 8> ExitBlocks;
431     L->getUniqueExitBlocks(ExitBlocks);
432 
433     // We can't insert into a catchswitch.
434     bool HasCatchSwitch = llvm::any_of(ExitBlocks, [](BasicBlock *Exit) {
435       return isa<CatchSwitchInst>(Exit->getTerminator());
436     });
437 
438     if (!HasCatchSwitch) {
439       SmallVector<Instruction *, 8> InsertPts;
440       SmallVector<MemoryAccess *, 8> MSSAInsertPts;
441       InsertPts.reserve(ExitBlocks.size());
442       if (MSSAU)
443         MSSAInsertPts.reserve(ExitBlocks.size());
444       for (BasicBlock *ExitBlock : ExitBlocks) {
445         InsertPts.push_back(&*ExitBlock->getFirstInsertionPt());
446         if (MSSAU)
447           MSSAInsertPts.push_back(nullptr);
448       }
449 
450       PredIteratorCache PIC;
451 
452       bool Promoted = false;
453       if (CurAST.get()) {
454         // Loop over all of the alias sets in the tracker object.
455         for (AliasSet &AS : *CurAST) {
456           // We can promote this alias set if it has a store, if it is a "Must"
457           // alias set, if the pointer is loop invariant, and if we are not
458           // eliminating any volatile loads or stores.
459           if (AS.isForwardingAliasSet() || !AS.isMod() || !AS.isMustAlias() ||
460               !L->isLoopInvariant(AS.begin()->getValue()))
461             continue;
462 
463           assert(
464               !AS.empty() &&
465               "Must alias set should have at least one pointer element in it!");
466 
467           SmallSetVector<Value *, 8> PointerMustAliases;
468           for (const auto &ASI : AS)
469             PointerMustAliases.insert(ASI.getValue());
470 
471           Promoted |= promoteLoopAccessesToScalars(
472               PointerMustAliases, ExitBlocks, InsertPts, MSSAInsertPts, PIC, LI,
473               DT, TLI, L, CurAST.get(), MSSAU.get(), &SafetyInfo, ORE);
474         }
475       } else {
476         SmallVector<Instruction *, 16> MaybePromotable;
477         foreachMemoryAccess(MSSA, L, [&](Instruction *I) {
478           MaybePromotable.push_back(I);
479         });
480 
481         // Promoting one set of accesses may make the pointers for another set
482         // loop invariant, so run this in a loop (with the MaybePromotable set
483         // decreasing in size over time).
484         bool LocalPromoted;
485         do {
486           LocalPromoted = false;
487           for (const SmallSetVector<Value *, 8> &PointerMustAliases :
488                collectPromotionCandidates(MSSA, AA, L, MaybePromotable)) {
489             LocalPromoted |= promoteLoopAccessesToScalars(
490                 PointerMustAliases, ExitBlocks, InsertPts, MSSAInsertPts, PIC,
491                 LI, DT, TLI, L, /*AST*/nullptr, MSSAU.get(), &SafetyInfo, ORE);
492           }
493           Promoted |= LocalPromoted;
494         } while (LocalPromoted);
495       }
496 
497       // Once we have promoted values across the loop body we have to
498       // recursively reform LCSSA as any nested loop may now have values defined
499       // within the loop used in the outer loop.
500       // FIXME: This is really heavy handed. It would be a bit better to use an
501       // SSAUpdater strategy during promotion that was LCSSA aware and reformed
502       // it as it went.
503       if (Promoted)
504         formLCSSARecursively(*L, *DT, LI, SE);
505 
506       Changed |= Promoted;
507     }
508   }
509 
510   // Check that neither this loop nor its parent have had LCSSA broken. LICM is
511   // specifically moving instructions across the loop boundary and so it is
512   // especially in need of sanity checking here.
513   assert(L->isLCSSAForm(*DT) && "Loop not left in LCSSA form after LICM!");
514   assert((L->isOutermost() || L->getParentLoop()->isLCSSAForm(*DT)) &&
515          "Parent loop not left in LCSSA form after LICM!");
516 
517   if (MSSAU.get() && VerifyMemorySSA)
518     MSSAU->getMemorySSA()->verifyMemorySSA();
519 
520   if (Changed && SE)
521     SE->forgetLoopDispositions(L);
522   return Changed;
523 }
524 
525 /// Walk the specified region of the CFG (defined by all blocks dominated by
526 /// the specified block, and that are in the current loop) in reverse depth
527 /// first order w.r.t the DominatorTree.  This allows us to visit uses before
528 /// definitions, allowing us to sink a loop body in one pass without iteration.
529 ///
530 bool llvm::sinkRegion(DomTreeNode *N, AAResults *AA, LoopInfo *LI,
531                       DominatorTree *DT, BlockFrequencyInfo *BFI,
532                       TargetLibraryInfo *TLI, TargetTransformInfo *TTI,
533                       Loop *CurLoop, AliasSetTracker *CurAST,
534                       MemorySSAUpdater *MSSAU, ICFLoopSafetyInfo *SafetyInfo,
535                       SinkAndHoistLICMFlags &Flags,
536                       OptimizationRemarkEmitter *ORE) {
537 
538   // Verify inputs.
539   assert(N != nullptr && AA != nullptr && LI != nullptr && DT != nullptr &&
540          CurLoop != nullptr && SafetyInfo != nullptr &&
541          "Unexpected input to sinkRegion.");
542   assert(((CurAST != nullptr) ^ (MSSAU != nullptr)) &&
543          "Either AliasSetTracker or MemorySSA should be initialized.");
544 
545   // We want to visit children before parents. We will enque all the parents
546   // before their children in the worklist and process the worklist in reverse
547   // order.
548   SmallVector<DomTreeNode *, 16> Worklist = collectChildrenInLoop(N, CurLoop);
549 
550   bool Changed = false;
551   for (DomTreeNode *DTN : reverse(Worklist)) {
552     BasicBlock *BB = DTN->getBlock();
553     // Only need to process the contents of this block if it is not part of a
554     // subloop (which would already have been processed).
555     if (inSubLoop(BB, CurLoop, LI))
556       continue;
557 
558     for (BasicBlock::iterator II = BB->end(); II != BB->begin();) {
559       Instruction &I = *--II;
560 
561       // The instruction is not used in the loop if it is dead.  In this case,
562       // we just delete it instead of sinking it.
563       if (isInstructionTriviallyDead(&I, TLI)) {
564         LLVM_DEBUG(dbgs() << "LICM deleting dead inst: " << I << '\n');
565         salvageKnowledge(&I);
566         salvageDebugInfo(I);
567         ++II;
568         eraseInstruction(I, *SafetyInfo, CurAST, MSSAU);
569         Changed = true;
570         continue;
571       }
572 
573       // Check to see if we can sink this instruction to the exit blocks
574       // of the loop.  We can do this if the all users of the instruction are
575       // outside of the loop.  In this case, it doesn't even matter if the
576       // operands of the instruction are loop invariant.
577       //
578       bool FreeInLoop = false;
579       if (!I.mayHaveSideEffects() &&
580           isNotUsedOrFreeInLoop(I, CurLoop, SafetyInfo, TTI, FreeInLoop) &&
581           canSinkOrHoistInst(I, AA, DT, CurLoop, CurAST, MSSAU, true, &Flags,
582                              ORE)) {
583         if (sink(I, LI, DT, BFI, CurLoop, SafetyInfo, MSSAU, ORE)) {
584           if (!FreeInLoop) {
585             ++II;
586             salvageDebugInfo(I);
587             eraseInstruction(I, *SafetyInfo, CurAST, MSSAU);
588           }
589           Changed = true;
590         }
591       }
592     }
593   }
594   if (MSSAU && VerifyMemorySSA)
595     MSSAU->getMemorySSA()->verifyMemorySSA();
596   return Changed;
597 }
598 
599 namespace {
600 // This is a helper class for hoistRegion to make it able to hoist control flow
601 // in order to be able to hoist phis. The way this works is that we initially
602 // start hoisting to the loop preheader, and when we see a loop invariant branch
603 // we make note of this. When we then come to hoist an instruction that's
604 // conditional on such a branch we duplicate the branch and the relevant control
605 // flow, then hoist the instruction into the block corresponding to its original
606 // block in the duplicated control flow.
607 class ControlFlowHoister {
608 private:
609   // Information about the loop we are hoisting from
610   LoopInfo *LI;
611   DominatorTree *DT;
612   Loop *CurLoop;
613   MemorySSAUpdater *MSSAU;
614 
615   // A map of blocks in the loop to the block their instructions will be hoisted
616   // to.
617   DenseMap<BasicBlock *, BasicBlock *> HoistDestinationMap;
618 
619   // The branches that we can hoist, mapped to the block that marks a
620   // convergence point of their control flow.
621   DenseMap<BranchInst *, BasicBlock *> HoistableBranches;
622 
623 public:
624   ControlFlowHoister(LoopInfo *LI, DominatorTree *DT, Loop *CurLoop,
625                      MemorySSAUpdater *MSSAU)
626       : LI(LI), DT(DT), CurLoop(CurLoop), MSSAU(MSSAU) {}
627 
628   void registerPossiblyHoistableBranch(BranchInst *BI) {
629     // We can only hoist conditional branches with loop invariant operands.
630     if (!ControlFlowHoisting || !BI->isConditional() ||
631         !CurLoop->hasLoopInvariantOperands(BI))
632       return;
633 
634     // The branch destinations need to be in the loop, and we don't gain
635     // anything by duplicating conditional branches with duplicate successors,
636     // as it's essentially the same as an unconditional branch.
637     BasicBlock *TrueDest = BI->getSuccessor(0);
638     BasicBlock *FalseDest = BI->getSuccessor(1);
639     if (!CurLoop->contains(TrueDest) || !CurLoop->contains(FalseDest) ||
640         TrueDest == FalseDest)
641       return;
642 
643     // We can hoist BI if one branch destination is the successor of the other,
644     // or both have common successor which we check by seeing if the
645     // intersection of their successors is non-empty.
646     // TODO: This could be expanded to allowing branches where both ends
647     // eventually converge to a single block.
648     SmallPtrSet<BasicBlock *, 4> TrueDestSucc, FalseDestSucc;
649     TrueDestSucc.insert(succ_begin(TrueDest), succ_end(TrueDest));
650     FalseDestSucc.insert(succ_begin(FalseDest), succ_end(FalseDest));
651     BasicBlock *CommonSucc = nullptr;
652     if (TrueDestSucc.count(FalseDest)) {
653       CommonSucc = FalseDest;
654     } else if (FalseDestSucc.count(TrueDest)) {
655       CommonSucc = TrueDest;
656     } else {
657       set_intersect(TrueDestSucc, FalseDestSucc);
658       // If there's one common successor use that.
659       if (TrueDestSucc.size() == 1)
660         CommonSucc = *TrueDestSucc.begin();
661       // If there's more than one pick whichever appears first in the block list
662       // (we can't use the value returned by TrueDestSucc.begin() as it's
663       // unpredicatable which element gets returned).
664       else if (!TrueDestSucc.empty()) {
665         Function *F = TrueDest->getParent();
666         auto IsSucc = [&](BasicBlock &BB) { return TrueDestSucc.count(&BB); };
667         auto It = llvm::find_if(*F, IsSucc);
668         assert(It != F->end() && "Could not find successor in function");
669         CommonSucc = &*It;
670       }
671     }
672     // The common successor has to be dominated by the branch, as otherwise
673     // there will be some other path to the successor that will not be
674     // controlled by this branch so any phi we hoist would be controlled by the
675     // wrong condition. This also takes care of avoiding hoisting of loop back
676     // edges.
677     // TODO: In some cases this could be relaxed if the successor is dominated
678     // by another block that's been hoisted and we can guarantee that the
679     // control flow has been replicated exactly.
680     if (CommonSucc && DT->dominates(BI, CommonSucc))
681       HoistableBranches[BI] = CommonSucc;
682   }
683 
684   bool canHoistPHI(PHINode *PN) {
685     // The phi must have loop invariant operands.
686     if (!ControlFlowHoisting || !CurLoop->hasLoopInvariantOperands(PN))
687       return false;
688     // We can hoist phis if the block they are in is the target of hoistable
689     // branches which cover all of the predecessors of the block.
690     SmallPtrSet<BasicBlock *, 8> PredecessorBlocks;
691     BasicBlock *BB = PN->getParent();
692     for (BasicBlock *PredBB : predecessors(BB))
693       PredecessorBlocks.insert(PredBB);
694     // If we have less predecessor blocks than predecessors then the phi will
695     // have more than one incoming value for the same block which we can't
696     // handle.
697     // TODO: This could be handled be erasing some of the duplicate incoming
698     // values.
699     if (PredecessorBlocks.size() != pred_size(BB))
700       return false;
701     for (auto &Pair : HoistableBranches) {
702       if (Pair.second == BB) {
703         // Which blocks are predecessors via this branch depends on if the
704         // branch is triangle-like or diamond-like.
705         if (Pair.first->getSuccessor(0) == BB) {
706           PredecessorBlocks.erase(Pair.first->getParent());
707           PredecessorBlocks.erase(Pair.first->getSuccessor(1));
708         } else if (Pair.first->getSuccessor(1) == BB) {
709           PredecessorBlocks.erase(Pair.first->getParent());
710           PredecessorBlocks.erase(Pair.first->getSuccessor(0));
711         } else {
712           PredecessorBlocks.erase(Pair.first->getSuccessor(0));
713           PredecessorBlocks.erase(Pair.first->getSuccessor(1));
714         }
715       }
716     }
717     // PredecessorBlocks will now be empty if for every predecessor of BB we
718     // found a hoistable branch source.
719     return PredecessorBlocks.empty();
720   }
721 
722   BasicBlock *getOrCreateHoistedBlock(BasicBlock *BB) {
723     if (!ControlFlowHoisting)
724       return CurLoop->getLoopPreheader();
725     // If BB has already been hoisted, return that
726     if (HoistDestinationMap.count(BB))
727       return HoistDestinationMap[BB];
728 
729     // Check if this block is conditional based on a pending branch
730     auto HasBBAsSuccessor =
731         [&](DenseMap<BranchInst *, BasicBlock *>::value_type &Pair) {
732           return BB != Pair.second && (Pair.first->getSuccessor(0) == BB ||
733                                        Pair.first->getSuccessor(1) == BB);
734         };
735     auto It = llvm::find_if(HoistableBranches, HasBBAsSuccessor);
736 
737     // If not involved in a pending branch, hoist to preheader
738     BasicBlock *InitialPreheader = CurLoop->getLoopPreheader();
739     if (It == HoistableBranches.end()) {
740       LLVM_DEBUG(dbgs() << "LICM using "
741                         << InitialPreheader->getNameOrAsOperand()
742                         << " as hoist destination for "
743                         << BB->getNameOrAsOperand() << "\n");
744       HoistDestinationMap[BB] = InitialPreheader;
745       return InitialPreheader;
746     }
747     BranchInst *BI = It->first;
748     assert(std::find_if(++It, HoistableBranches.end(), HasBBAsSuccessor) ==
749                HoistableBranches.end() &&
750            "BB is expected to be the target of at most one branch");
751 
752     LLVMContext &C = BB->getContext();
753     BasicBlock *TrueDest = BI->getSuccessor(0);
754     BasicBlock *FalseDest = BI->getSuccessor(1);
755     BasicBlock *CommonSucc = HoistableBranches[BI];
756     BasicBlock *HoistTarget = getOrCreateHoistedBlock(BI->getParent());
757 
758     // Create hoisted versions of blocks that currently don't have them
759     auto CreateHoistedBlock = [&](BasicBlock *Orig) {
760       if (HoistDestinationMap.count(Orig))
761         return HoistDestinationMap[Orig];
762       BasicBlock *New =
763           BasicBlock::Create(C, Orig->getName() + ".licm", Orig->getParent());
764       HoistDestinationMap[Orig] = New;
765       DT->addNewBlock(New, HoistTarget);
766       if (CurLoop->getParentLoop())
767         CurLoop->getParentLoop()->addBasicBlockToLoop(New, *LI);
768       ++NumCreatedBlocks;
769       LLVM_DEBUG(dbgs() << "LICM created " << New->getName()
770                         << " as hoist destination for " << Orig->getName()
771                         << "\n");
772       return New;
773     };
774     BasicBlock *HoistTrueDest = CreateHoistedBlock(TrueDest);
775     BasicBlock *HoistFalseDest = CreateHoistedBlock(FalseDest);
776     BasicBlock *HoistCommonSucc = CreateHoistedBlock(CommonSucc);
777 
778     // Link up these blocks with branches.
779     if (!HoistCommonSucc->getTerminator()) {
780       // The new common successor we've generated will branch to whatever that
781       // hoist target branched to.
782       BasicBlock *TargetSucc = HoistTarget->getSingleSuccessor();
783       assert(TargetSucc && "Expected hoist target to have a single successor");
784       HoistCommonSucc->moveBefore(TargetSucc);
785       BranchInst::Create(TargetSucc, HoistCommonSucc);
786     }
787     if (!HoistTrueDest->getTerminator()) {
788       HoistTrueDest->moveBefore(HoistCommonSucc);
789       BranchInst::Create(HoistCommonSucc, HoistTrueDest);
790     }
791     if (!HoistFalseDest->getTerminator()) {
792       HoistFalseDest->moveBefore(HoistCommonSucc);
793       BranchInst::Create(HoistCommonSucc, HoistFalseDest);
794     }
795 
796     // If BI is being cloned to what was originally the preheader then
797     // HoistCommonSucc will now be the new preheader.
798     if (HoistTarget == InitialPreheader) {
799       // Phis in the loop header now need to use the new preheader.
800       InitialPreheader->replaceSuccessorsPhiUsesWith(HoistCommonSucc);
801       if (MSSAU)
802         MSSAU->wireOldPredecessorsToNewImmediatePredecessor(
803             HoistTarget->getSingleSuccessor(), HoistCommonSucc, {HoistTarget});
804       // The new preheader dominates the loop header.
805       DomTreeNode *PreheaderNode = DT->getNode(HoistCommonSucc);
806       DomTreeNode *HeaderNode = DT->getNode(CurLoop->getHeader());
807       DT->changeImmediateDominator(HeaderNode, PreheaderNode);
808       // The preheader hoist destination is now the new preheader, with the
809       // exception of the hoist destination of this branch.
810       for (auto &Pair : HoistDestinationMap)
811         if (Pair.second == InitialPreheader && Pair.first != BI->getParent())
812           Pair.second = HoistCommonSucc;
813     }
814 
815     // Now finally clone BI.
816     ReplaceInstWithInst(
817         HoistTarget->getTerminator(),
818         BranchInst::Create(HoistTrueDest, HoistFalseDest, BI->getCondition()));
819     ++NumClonedBranches;
820 
821     assert(CurLoop->getLoopPreheader() &&
822            "Hoisting blocks should not have destroyed preheader");
823     return HoistDestinationMap[BB];
824   }
825 };
826 } // namespace
827 
828 // Hoisting/sinking instruction out of a loop isn't always beneficial. It's only
829 // only worthwhile if the destination block is actually colder than current
830 // block.
831 static bool worthSinkOrHoistInst(Instruction &I, BasicBlock *DstBlock,
832                                  OptimizationRemarkEmitter *ORE,
833                                  BlockFrequencyInfo *BFI) {
834   // Check block frequency only when runtime profile is available
835   // to avoid pathological cases. With static profile, lean towards
836   // hosting because it helps canonicalize the loop for vectorizer.
837   if (!DstBlock->getParent()->hasProfileData())
838     return true;
839 
840   if (!HoistSinkColdnessThreshold || !BFI)
841     return true;
842 
843   BasicBlock *SrcBlock = I.getParent();
844   if (BFI->getBlockFreq(DstBlock).getFrequency() / HoistSinkColdnessThreshold >
845       BFI->getBlockFreq(SrcBlock).getFrequency()) {
846     ORE->emit([&]() {
847       return OptimizationRemarkMissed(DEBUG_TYPE, "SinkHoistInst", &I)
848              << "failed to sink or hoist instruction because containing block "
849                 "has lower frequency than destination block";
850     });
851     return false;
852   }
853 
854   return true;
855 }
856 
857 /// Walk the specified region of the CFG (defined by all blocks dominated by
858 /// the specified block, and that are in the current loop) in depth first
859 /// order w.r.t the DominatorTree.  This allows us to visit definitions before
860 /// uses, allowing us to hoist a loop body in one pass without iteration.
861 ///
862 bool llvm::hoistRegion(DomTreeNode *N, AAResults *AA, LoopInfo *LI,
863                        DominatorTree *DT, BlockFrequencyInfo *BFI,
864                        TargetLibraryInfo *TLI, Loop *CurLoop,
865                        AliasSetTracker *CurAST, MemorySSAUpdater *MSSAU,
866                        ScalarEvolution *SE, ICFLoopSafetyInfo *SafetyInfo,
867                        SinkAndHoistLICMFlags &Flags,
868                        OptimizationRemarkEmitter *ORE) {
869   // Verify inputs.
870   assert(N != nullptr && AA != nullptr && LI != nullptr && DT != nullptr &&
871          CurLoop != nullptr && SafetyInfo != nullptr &&
872          "Unexpected input to hoistRegion.");
873   assert(((CurAST != nullptr) ^ (MSSAU != nullptr)) &&
874          "Either AliasSetTracker or MemorySSA should be initialized.");
875 
876   ControlFlowHoister CFH(LI, DT, CurLoop, MSSAU);
877 
878   // Keep track of instructions that have been hoisted, as they may need to be
879   // re-hoisted if they end up not dominating all of their uses.
880   SmallVector<Instruction *, 16> HoistedInstructions;
881 
882   // For PHI hoisting to work we need to hoist blocks before their successors.
883   // We can do this by iterating through the blocks in the loop in reverse
884   // post-order.
885   LoopBlocksRPO Worklist(CurLoop);
886   Worklist.perform(LI);
887   bool Changed = false;
888   for (BasicBlock *BB : Worklist) {
889     // Only need to process the contents of this block if it is not part of a
890     // subloop (which would already have been processed).
891     if (inSubLoop(BB, CurLoop, LI))
892       continue;
893 
894     for (BasicBlock::iterator II = BB->begin(), E = BB->end(); II != E;) {
895       Instruction &I = *II++;
896       // Try constant folding this instruction.  If all the operands are
897       // constants, it is technically hoistable, but it would be better to
898       // just fold it.
899       if (Constant *C = ConstantFoldInstruction(
900               &I, I.getModule()->getDataLayout(), TLI)) {
901         LLVM_DEBUG(dbgs() << "LICM folding inst: " << I << "  --> " << *C
902                           << '\n');
903         if (CurAST)
904           CurAST->copyValue(&I, C);
905         // FIXME MSSA: Such replacements may make accesses unoptimized (D51960).
906         I.replaceAllUsesWith(C);
907         if (isInstructionTriviallyDead(&I, TLI))
908           eraseInstruction(I, *SafetyInfo, CurAST, MSSAU);
909         Changed = true;
910         continue;
911       }
912 
913       // Try hoisting the instruction out to the preheader.  We can only do
914       // this if all of the operands of the instruction are loop invariant and
915       // if it is safe to hoist the instruction. We also check block frequency
916       // to make sure instruction only gets hoisted into colder blocks.
917       // TODO: It may be safe to hoist if we are hoisting to a conditional block
918       // and we have accurately duplicated the control flow from the loop header
919       // to that block.
920       if (CurLoop->hasLoopInvariantOperands(&I) &&
921           canSinkOrHoistInst(I, AA, DT, CurLoop, CurAST, MSSAU, true, &Flags,
922                              ORE) &&
923           worthSinkOrHoistInst(I, CurLoop->getLoopPreheader(), ORE, BFI) &&
924           isSafeToExecuteUnconditionally(
925               I, DT, TLI, CurLoop, SafetyInfo, ORE,
926               CurLoop->getLoopPreheader()->getTerminator())) {
927         hoist(I, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB), SafetyInfo,
928               MSSAU, SE, ORE);
929         HoistedInstructions.push_back(&I);
930         Changed = true;
931         continue;
932       }
933 
934       // Attempt to remove floating point division out of the loop by
935       // converting it to a reciprocal multiplication.
936       if (I.getOpcode() == Instruction::FDiv && I.hasAllowReciprocal() &&
937           CurLoop->isLoopInvariant(I.getOperand(1))) {
938         auto Divisor = I.getOperand(1);
939         auto One = llvm::ConstantFP::get(Divisor->getType(), 1.0);
940         auto ReciprocalDivisor = BinaryOperator::CreateFDiv(One, Divisor);
941         ReciprocalDivisor->setFastMathFlags(I.getFastMathFlags());
942         SafetyInfo->insertInstructionTo(ReciprocalDivisor, I.getParent());
943         ReciprocalDivisor->insertBefore(&I);
944 
945         auto Product =
946             BinaryOperator::CreateFMul(I.getOperand(0), ReciprocalDivisor);
947         Product->setFastMathFlags(I.getFastMathFlags());
948         SafetyInfo->insertInstructionTo(Product, I.getParent());
949         Product->insertAfter(&I);
950         I.replaceAllUsesWith(Product);
951         eraseInstruction(I, *SafetyInfo, CurAST, MSSAU);
952 
953         hoist(*ReciprocalDivisor, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB),
954               SafetyInfo, MSSAU, SE, ORE);
955         HoistedInstructions.push_back(ReciprocalDivisor);
956         Changed = true;
957         continue;
958       }
959 
960       auto IsInvariantStart = [&](Instruction &I) {
961         using namespace PatternMatch;
962         return I.use_empty() &&
963                match(&I, m_Intrinsic<Intrinsic::invariant_start>());
964       };
965       auto MustExecuteWithoutWritesBefore = [&](Instruction &I) {
966         return SafetyInfo->isGuaranteedToExecute(I, DT, CurLoop) &&
967                SafetyInfo->doesNotWriteMemoryBefore(I, CurLoop);
968       };
969       if ((IsInvariantStart(I) || isGuard(&I)) &&
970           CurLoop->hasLoopInvariantOperands(&I) &&
971           MustExecuteWithoutWritesBefore(I)) {
972         hoist(I, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB), SafetyInfo,
973               MSSAU, SE, ORE);
974         HoistedInstructions.push_back(&I);
975         Changed = true;
976         continue;
977       }
978 
979       if (PHINode *PN = dyn_cast<PHINode>(&I)) {
980         if (CFH.canHoistPHI(PN)) {
981           // Redirect incoming blocks first to ensure that we create hoisted
982           // versions of those blocks before we hoist the phi.
983           for (unsigned int i = 0; i < PN->getNumIncomingValues(); ++i)
984             PN->setIncomingBlock(
985                 i, CFH.getOrCreateHoistedBlock(PN->getIncomingBlock(i)));
986           hoist(*PN, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB), SafetyInfo,
987                 MSSAU, SE, ORE);
988           assert(DT->dominates(PN, BB) && "Conditional PHIs not expected");
989           Changed = true;
990           continue;
991         }
992       }
993 
994       // Remember possibly hoistable branches so we can actually hoist them
995       // later if needed.
996       if (BranchInst *BI = dyn_cast<BranchInst>(&I))
997         CFH.registerPossiblyHoistableBranch(BI);
998     }
999   }
1000 
1001   // If we hoisted instructions to a conditional block they may not dominate
1002   // their uses that weren't hoisted (such as phis where some operands are not
1003   // loop invariant). If so make them unconditional by moving them to their
1004   // immediate dominator. We iterate through the instructions in reverse order
1005   // which ensures that when we rehoist an instruction we rehoist its operands,
1006   // and also keep track of where in the block we are rehoisting to to make sure
1007   // that we rehoist instructions before the instructions that use them.
1008   Instruction *HoistPoint = nullptr;
1009   if (ControlFlowHoisting) {
1010     for (Instruction *I : reverse(HoistedInstructions)) {
1011       if (!llvm::all_of(I->uses(),
1012                         [&](Use &U) { return DT->dominates(I, U); })) {
1013         BasicBlock *Dominator =
1014             DT->getNode(I->getParent())->getIDom()->getBlock();
1015         if (!HoistPoint || !DT->dominates(HoistPoint->getParent(), Dominator)) {
1016           if (HoistPoint)
1017             assert(DT->dominates(Dominator, HoistPoint->getParent()) &&
1018                    "New hoist point expected to dominate old hoist point");
1019           HoistPoint = Dominator->getTerminator();
1020         }
1021         LLVM_DEBUG(dbgs() << "LICM rehoisting to "
1022                           << HoistPoint->getParent()->getNameOrAsOperand()
1023                           << ": " << *I << "\n");
1024         moveInstructionBefore(*I, *HoistPoint, *SafetyInfo, MSSAU, SE);
1025         HoistPoint = I;
1026         Changed = true;
1027       }
1028     }
1029   }
1030   if (MSSAU && VerifyMemorySSA)
1031     MSSAU->getMemorySSA()->verifyMemorySSA();
1032 
1033     // Now that we've finished hoisting make sure that LI and DT are still
1034     // valid.
1035 #ifdef EXPENSIVE_CHECKS
1036   if (Changed) {
1037     assert(DT->verify(DominatorTree::VerificationLevel::Fast) &&
1038            "Dominator tree verification failed");
1039     LI->verify(*DT);
1040   }
1041 #endif
1042 
1043   return Changed;
1044 }
1045 
1046 // Return true if LI is invariant within scope of the loop. LI is invariant if
1047 // CurLoop is dominated by an invariant.start representing the same memory
1048 // location and size as the memory location LI loads from, and also the
1049 // invariant.start has no uses.
1050 static bool isLoadInvariantInLoop(LoadInst *LI, DominatorTree *DT,
1051                                   Loop *CurLoop) {
1052   Value *Addr = LI->getOperand(0);
1053   const DataLayout &DL = LI->getModule()->getDataLayout();
1054   const TypeSize LocSizeInBits = DL.getTypeSizeInBits(LI->getType());
1055 
1056   // It is not currently possible for clang to generate an invariant.start
1057   // intrinsic with scalable vector types because we don't support thread local
1058   // sizeless types and we don't permit sizeless types in structs or classes.
1059   // Furthermore, even if support is added for this in future the intrinsic
1060   // itself is defined to have a size of -1 for variable sized objects. This
1061   // makes it impossible to verify if the intrinsic envelops our region of
1062   // interest. For example, both <vscale x 32 x i8> and <vscale x 16 x i8>
1063   // types would have a -1 parameter, but the former is clearly double the size
1064   // of the latter.
1065   if (LocSizeInBits.isScalable())
1066     return false;
1067 
1068   // if the type is i8 addrspace(x)*, we know this is the type of
1069   // llvm.invariant.start operand
1070   auto *PtrInt8Ty = PointerType::get(Type::getInt8Ty(LI->getContext()),
1071                                      LI->getPointerAddressSpace());
1072   unsigned BitcastsVisited = 0;
1073   // Look through bitcasts until we reach the i8* type (this is invariant.start
1074   // operand type).
1075   while (Addr->getType() != PtrInt8Ty) {
1076     auto *BC = dyn_cast<BitCastInst>(Addr);
1077     // Avoid traversing high number of bitcast uses.
1078     if (++BitcastsVisited > MaxNumUsesTraversed || !BC)
1079       return false;
1080     Addr = BC->getOperand(0);
1081   }
1082 
1083   unsigned UsesVisited = 0;
1084   // Traverse all uses of the load operand value, to see if invariant.start is
1085   // one of the uses, and whether it dominates the load instruction.
1086   for (auto *U : Addr->users()) {
1087     // Avoid traversing for Load operand with high number of users.
1088     if (++UsesVisited > MaxNumUsesTraversed)
1089       return false;
1090     IntrinsicInst *II = dyn_cast<IntrinsicInst>(U);
1091     // If there are escaping uses of invariant.start instruction, the load maybe
1092     // non-invariant.
1093     if (!II || II->getIntrinsicID() != Intrinsic::invariant_start ||
1094         !II->use_empty())
1095       continue;
1096     ConstantInt *InvariantSize = cast<ConstantInt>(II->getArgOperand(0));
1097     // The intrinsic supports having a -1 argument for variable sized objects
1098     // so we should check for that here.
1099     if (InvariantSize->isNegative())
1100       continue;
1101     uint64_t InvariantSizeInBits = InvariantSize->getSExtValue() * 8;
1102     // Confirm the invariant.start location size contains the load operand size
1103     // in bits. Also, the invariant.start should dominate the load, and we
1104     // should not hoist the load out of a loop that contains this dominating
1105     // invariant.start.
1106     if (LocSizeInBits.getFixedSize() <= InvariantSizeInBits &&
1107         DT->properlyDominates(II->getParent(), CurLoop->getHeader()))
1108       return true;
1109   }
1110 
1111   return false;
1112 }
1113 
1114 namespace {
1115 /// Return true if-and-only-if we know how to (mechanically) both hoist and
1116 /// sink a given instruction out of a loop.  Does not address legality
1117 /// concerns such as aliasing or speculation safety.
1118 bool isHoistableAndSinkableInst(Instruction &I) {
1119   // Only these instructions are hoistable/sinkable.
1120   return (isa<LoadInst>(I) || isa<StoreInst>(I) || isa<CallInst>(I) ||
1121           isa<FenceInst>(I) || isa<CastInst>(I) || isa<UnaryOperator>(I) ||
1122           isa<BinaryOperator>(I) || isa<SelectInst>(I) ||
1123           isa<GetElementPtrInst>(I) || isa<CmpInst>(I) ||
1124           isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) ||
1125           isa<ShuffleVectorInst>(I) || isa<ExtractValueInst>(I) ||
1126           isa<InsertValueInst>(I) || isa<FreezeInst>(I));
1127 }
1128 /// Return true if all of the alias sets within this AST are known not to
1129 /// contain a Mod, or if MSSA knows there are no MemoryDefs in the loop.
1130 bool isReadOnly(AliasSetTracker *CurAST, const MemorySSAUpdater *MSSAU,
1131                 const Loop *L) {
1132   if (CurAST) {
1133     for (AliasSet &AS : *CurAST) {
1134       if (!AS.isForwardingAliasSet() && AS.isMod()) {
1135         return false;
1136       }
1137     }
1138     return true;
1139   } else { /*MSSAU*/
1140     for (auto *BB : L->getBlocks())
1141       if (MSSAU->getMemorySSA()->getBlockDefs(BB))
1142         return false;
1143     return true;
1144   }
1145 }
1146 
1147 /// Return true if I is the only Instruction with a MemoryAccess in L.
1148 bool isOnlyMemoryAccess(const Instruction *I, const Loop *L,
1149                         const MemorySSAUpdater *MSSAU) {
1150   for (auto *BB : L->getBlocks())
1151     if (auto *Accs = MSSAU->getMemorySSA()->getBlockAccesses(BB)) {
1152       int NotAPhi = 0;
1153       for (const auto &Acc : *Accs) {
1154         if (isa<MemoryPhi>(&Acc))
1155           continue;
1156         const auto *MUD = cast<MemoryUseOrDef>(&Acc);
1157         if (MUD->getMemoryInst() != I || NotAPhi++ == 1)
1158           return false;
1159       }
1160     }
1161   return true;
1162 }
1163 }
1164 
1165 bool llvm::canSinkOrHoistInst(Instruction &I, AAResults *AA, DominatorTree *DT,
1166                               Loop *CurLoop, AliasSetTracker *CurAST,
1167                               MemorySSAUpdater *MSSAU,
1168                               bool TargetExecutesOncePerLoop,
1169                               SinkAndHoistLICMFlags *Flags,
1170                               OptimizationRemarkEmitter *ORE) {
1171   assert(((CurAST != nullptr) ^ (MSSAU != nullptr)) &&
1172          "Either AliasSetTracker or MemorySSA should be initialized.");
1173 
1174   // If we don't understand the instruction, bail early.
1175   if (!isHoistableAndSinkableInst(I))
1176     return false;
1177 
1178   MemorySSA *MSSA = MSSAU ? MSSAU->getMemorySSA() : nullptr;
1179   if (MSSA)
1180     assert(Flags != nullptr && "Flags cannot be null.");
1181 
1182   // Loads have extra constraints we have to verify before we can hoist them.
1183   if (LoadInst *LI = dyn_cast<LoadInst>(&I)) {
1184     if (!LI->isUnordered())
1185       return false; // Don't sink/hoist volatile or ordered atomic loads!
1186 
1187     // Loads from constant memory are always safe to move, even if they end up
1188     // in the same alias set as something that ends up being modified.
1189     if (AA->pointsToConstantMemory(LI->getOperand(0)))
1190       return true;
1191     if (LI->hasMetadata(LLVMContext::MD_invariant_load))
1192       return true;
1193 
1194     if (LI->isAtomic() && !TargetExecutesOncePerLoop)
1195       return false; // Don't risk duplicating unordered loads
1196 
1197     // This checks for an invariant.start dominating the load.
1198     if (isLoadInvariantInLoop(LI, DT, CurLoop))
1199       return true;
1200 
1201     // Stores with an invariant.group metadata are ok to sink/hoist.
1202     if (LI->hasMetadata(LLVMContext::MD_invariant_group))
1203       return true;
1204 
1205     bool Invalidated;
1206     if (CurAST)
1207       Invalidated = pointerInvalidatedByLoop(MemoryLocation::get(LI), CurAST,
1208                                              CurLoop, AA);
1209     else
1210       Invalidated = pointerInvalidatedByLoopWithMSSA(
1211           MSSA, cast<MemoryUse>(MSSA->getMemoryAccess(LI)), CurLoop, I, *Flags);
1212     // Check loop-invariant address because this may also be a sinkable load
1213     // whose address is not necessarily loop-invariant.
1214     if (ORE && Invalidated && CurLoop->isLoopInvariant(LI->getPointerOperand()))
1215       ORE->emit([&]() {
1216         return OptimizationRemarkMissed(
1217                    DEBUG_TYPE, "LoadWithLoopInvariantAddressInvalidated", LI)
1218                << "failed to move load with loop-invariant address "
1219                   "because the loop may invalidate its value";
1220       });
1221 
1222     return !Invalidated;
1223   } else if (CallInst *CI = dyn_cast<CallInst>(&I)) {
1224     // Don't sink or hoist dbg info; it's legal, but not useful.
1225     if (isa<DbgInfoIntrinsic>(I))
1226       return false;
1227 
1228     // Don't sink calls which can throw.
1229     if (CI->mayThrow())
1230       return false;
1231 
1232     // Convergent attribute has been used on operations that involve
1233     // inter-thread communication which results are implicitly affected by the
1234     // enclosing control flows. It is not safe to hoist or sink such operations
1235     // across control flow.
1236     if (CI->isConvergent())
1237       return false;
1238 
1239     using namespace PatternMatch;
1240     if (match(CI, m_Intrinsic<Intrinsic::assume>()))
1241       // Assumes don't actually alias anything or throw
1242       return true;
1243 
1244     if (match(CI, m_Intrinsic<Intrinsic::experimental_widenable_condition>()))
1245       // Widenable conditions don't actually alias anything or throw
1246       return true;
1247 
1248     // Handle simple cases by querying alias analysis.
1249     FunctionModRefBehavior Behavior = AA->getModRefBehavior(CI);
1250     if (Behavior == FMRB_DoesNotAccessMemory)
1251       return true;
1252     if (AAResults::onlyReadsMemory(Behavior)) {
1253       // A readonly argmemonly function only reads from memory pointed to by
1254       // it's arguments with arbitrary offsets.  If we can prove there are no
1255       // writes to this memory in the loop, we can hoist or sink.
1256       if (AAResults::onlyAccessesArgPointees(Behavior)) {
1257         // TODO: expand to writeable arguments
1258         for (Value *Op : CI->arg_operands())
1259           if (Op->getType()->isPointerTy()) {
1260             bool Invalidated;
1261             if (CurAST)
1262               Invalidated = pointerInvalidatedByLoop(
1263                   MemoryLocation::getBeforeOrAfter(Op), CurAST, CurLoop, AA);
1264             else
1265               Invalidated = pointerInvalidatedByLoopWithMSSA(
1266                   MSSA, cast<MemoryUse>(MSSA->getMemoryAccess(CI)), CurLoop, I,
1267                   *Flags);
1268             if (Invalidated)
1269               return false;
1270           }
1271         return true;
1272       }
1273 
1274       // If this call only reads from memory and there are no writes to memory
1275       // in the loop, we can hoist or sink the call as appropriate.
1276       if (isReadOnly(CurAST, MSSAU, CurLoop))
1277         return true;
1278     }
1279 
1280     // FIXME: This should use mod/ref information to see if we can hoist or
1281     // sink the call.
1282 
1283     return false;
1284   } else if (auto *FI = dyn_cast<FenceInst>(&I)) {
1285     // Fences alias (most) everything to provide ordering.  For the moment,
1286     // just give up if there are any other memory operations in the loop.
1287     if (CurAST) {
1288       auto Begin = CurAST->begin();
1289       assert(Begin != CurAST->end() && "must contain FI");
1290       if (std::next(Begin) != CurAST->end())
1291         // constant memory for instance, TODO: handle better
1292         return false;
1293       auto *UniqueI = Begin->getUniqueInstruction();
1294       if (!UniqueI)
1295         // other memory op, give up
1296         return false;
1297       (void)FI; // suppress unused variable warning
1298       assert(UniqueI == FI && "AS must contain FI");
1299       return true;
1300     } else // MSSAU
1301       return isOnlyMemoryAccess(FI, CurLoop, MSSAU);
1302   } else if (auto *SI = dyn_cast<StoreInst>(&I)) {
1303     if (!SI->isUnordered())
1304       return false; // Don't sink/hoist volatile or ordered atomic store!
1305 
1306     // We can only hoist a store that we can prove writes a value which is not
1307     // read or overwritten within the loop.  For those cases, we fallback to
1308     // load store promotion instead.  TODO: We can extend this to cases where
1309     // there is exactly one write to the location and that write dominates an
1310     // arbitrary number of reads in the loop.
1311     if (CurAST) {
1312       auto &AS = CurAST->getAliasSetFor(MemoryLocation::get(SI));
1313 
1314       if (AS.isRef() || !AS.isMustAlias())
1315         // Quick exit test, handled by the full path below as well.
1316         return false;
1317       auto *UniqueI = AS.getUniqueInstruction();
1318       if (!UniqueI)
1319         // other memory op, give up
1320         return false;
1321       assert(UniqueI == SI && "AS must contain SI");
1322       return true;
1323     } else { // MSSAU
1324       if (isOnlyMemoryAccess(SI, CurLoop, MSSAU))
1325         return true;
1326       // If there are more accesses than the Promotion cap or no "quota" to
1327       // check clobber, then give up as we're not walking a list that long.
1328       if (Flags->tooManyMemoryAccesses() || Flags->tooManyClobberingCalls())
1329         return false;
1330       // If there are interfering Uses (i.e. their defining access is in the
1331       // loop), or ordered loads (stored as Defs!), don't move this store.
1332       // Could do better here, but this is conservatively correct.
1333       // TODO: Cache set of Uses on the first walk in runOnLoop, update when
1334       // moving accesses. Can also extend to dominating uses.
1335       auto *SIMD = MSSA->getMemoryAccess(SI);
1336       for (auto *BB : CurLoop->getBlocks())
1337         if (auto *Accesses = MSSA->getBlockAccesses(BB)) {
1338           for (const auto &MA : *Accesses)
1339             if (const auto *MU = dyn_cast<MemoryUse>(&MA)) {
1340               auto *MD = MU->getDefiningAccess();
1341               if (!MSSA->isLiveOnEntryDef(MD) &&
1342                   CurLoop->contains(MD->getBlock()))
1343                 return false;
1344               // Disable hoisting past potentially interfering loads. Optimized
1345               // Uses may point to an access outside the loop, as getClobbering
1346               // checks the previous iteration when walking the backedge.
1347               // FIXME: More precise: no Uses that alias SI.
1348               if (!Flags->getIsSink() && !MSSA->dominates(SIMD, MU))
1349                 return false;
1350             } else if (const auto *MD = dyn_cast<MemoryDef>(&MA)) {
1351               if (auto *LI = dyn_cast<LoadInst>(MD->getMemoryInst())) {
1352                 (void)LI; // Silence warning.
1353                 assert(!LI->isUnordered() && "Expected unordered load");
1354                 return false;
1355               }
1356               // Any call, while it may not be clobbering SI, it may be a use.
1357               if (auto *CI = dyn_cast<CallInst>(MD->getMemoryInst())) {
1358                 // Check if the call may read from the memory location written
1359                 // to by SI. Check CI's attributes and arguments; the number of
1360                 // such checks performed is limited above by NoOfMemAccTooLarge.
1361                 ModRefInfo MRI = AA->getModRefInfo(CI, MemoryLocation::get(SI));
1362                 if (isModOrRefSet(MRI))
1363                   return false;
1364               }
1365             }
1366         }
1367       auto *Source = MSSA->getSkipSelfWalker()->getClobberingMemoryAccess(SI);
1368       Flags->incrementClobberingCalls();
1369       // If there are no clobbering Defs in the loop, store is safe to hoist.
1370       return MSSA->isLiveOnEntryDef(Source) ||
1371              !CurLoop->contains(Source->getBlock());
1372     }
1373   }
1374 
1375   assert(!I.mayReadOrWriteMemory() && "unhandled aliasing");
1376 
1377   // We've established mechanical ability and aliasing, it's up to the caller
1378   // to check fault safety
1379   return true;
1380 }
1381 
1382 /// Returns true if a PHINode is a trivially replaceable with an
1383 /// Instruction.
1384 /// This is true when all incoming values are that instruction.
1385 /// This pattern occurs most often with LCSSA PHI nodes.
1386 ///
1387 static bool isTriviallyReplaceablePHI(const PHINode &PN, const Instruction &I) {
1388   for (const Value *IncValue : PN.incoming_values())
1389     if (IncValue != &I)
1390       return false;
1391 
1392   return true;
1393 }
1394 
1395 /// Return true if the instruction is free in the loop.
1396 static bool isFreeInLoop(const Instruction &I, const Loop *CurLoop,
1397                          const TargetTransformInfo *TTI) {
1398 
1399   if (const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(&I)) {
1400     if (TTI->getUserCost(GEP, TargetTransformInfo::TCK_SizeAndLatency) !=
1401         TargetTransformInfo::TCC_Free)
1402       return false;
1403     // For a GEP, we cannot simply use getUserCost because currently it
1404     // optimistically assume that a GEP will fold into addressing mode
1405     // regardless of its users.
1406     const BasicBlock *BB = GEP->getParent();
1407     for (const User *U : GEP->users()) {
1408       const Instruction *UI = cast<Instruction>(U);
1409       if (CurLoop->contains(UI) &&
1410           (BB != UI->getParent() ||
1411            (!isa<StoreInst>(UI) && !isa<LoadInst>(UI))))
1412         return false;
1413     }
1414     return true;
1415   } else
1416     return TTI->getUserCost(&I, TargetTransformInfo::TCK_SizeAndLatency) ==
1417            TargetTransformInfo::TCC_Free;
1418 }
1419 
1420 /// Return true if the only users of this instruction are outside of
1421 /// the loop. If this is true, we can sink the instruction to the exit
1422 /// blocks of the loop.
1423 ///
1424 /// We also return true if the instruction could be folded away in lowering.
1425 /// (e.g.,  a GEP can be folded into a load as an addressing mode in the loop).
1426 static bool isNotUsedOrFreeInLoop(const Instruction &I, const Loop *CurLoop,
1427                                   const LoopSafetyInfo *SafetyInfo,
1428                                   TargetTransformInfo *TTI, bool &FreeInLoop) {
1429   const auto &BlockColors = SafetyInfo->getBlockColors();
1430   bool IsFree = isFreeInLoop(I, CurLoop, TTI);
1431   for (const User *U : I.users()) {
1432     const Instruction *UI = cast<Instruction>(U);
1433     if (const PHINode *PN = dyn_cast<PHINode>(UI)) {
1434       const BasicBlock *BB = PN->getParent();
1435       // We cannot sink uses in catchswitches.
1436       if (isa<CatchSwitchInst>(BB->getTerminator()))
1437         return false;
1438 
1439       // We need to sink a callsite to a unique funclet.  Avoid sinking if the
1440       // phi use is too muddled.
1441       if (isa<CallInst>(I))
1442         if (!BlockColors.empty() &&
1443             BlockColors.find(const_cast<BasicBlock *>(BB))->second.size() != 1)
1444           return false;
1445     }
1446 
1447     if (CurLoop->contains(UI)) {
1448       if (IsFree) {
1449         FreeInLoop = true;
1450         continue;
1451       }
1452       return false;
1453     }
1454   }
1455   return true;
1456 }
1457 
1458 static Instruction *cloneInstructionInExitBlock(
1459     Instruction &I, BasicBlock &ExitBlock, PHINode &PN, const LoopInfo *LI,
1460     const LoopSafetyInfo *SafetyInfo, MemorySSAUpdater *MSSAU) {
1461   Instruction *New;
1462   if (auto *CI = dyn_cast<CallInst>(&I)) {
1463     const auto &BlockColors = SafetyInfo->getBlockColors();
1464 
1465     // Sinking call-sites need to be handled differently from other
1466     // instructions.  The cloned call-site needs a funclet bundle operand
1467     // appropriate for its location in the CFG.
1468     SmallVector<OperandBundleDef, 1> OpBundles;
1469     for (unsigned BundleIdx = 0, BundleEnd = CI->getNumOperandBundles();
1470          BundleIdx != BundleEnd; ++BundleIdx) {
1471       OperandBundleUse Bundle = CI->getOperandBundleAt(BundleIdx);
1472       if (Bundle.getTagID() == LLVMContext::OB_funclet)
1473         continue;
1474 
1475       OpBundles.emplace_back(Bundle);
1476     }
1477 
1478     if (!BlockColors.empty()) {
1479       const ColorVector &CV = BlockColors.find(&ExitBlock)->second;
1480       assert(CV.size() == 1 && "non-unique color for exit block!");
1481       BasicBlock *BBColor = CV.front();
1482       Instruction *EHPad = BBColor->getFirstNonPHI();
1483       if (EHPad->isEHPad())
1484         OpBundles.emplace_back("funclet", EHPad);
1485     }
1486 
1487     New = CallInst::Create(CI, OpBundles);
1488   } else {
1489     New = I.clone();
1490   }
1491 
1492   ExitBlock.getInstList().insert(ExitBlock.getFirstInsertionPt(), New);
1493   if (!I.getName().empty())
1494     New->setName(I.getName() + ".le");
1495 
1496   if (MSSAU && MSSAU->getMemorySSA()->getMemoryAccess(&I)) {
1497     // Create a new MemoryAccess and let MemorySSA set its defining access.
1498     MemoryAccess *NewMemAcc = MSSAU->createMemoryAccessInBB(
1499         New, nullptr, New->getParent(), MemorySSA::Beginning);
1500     if (NewMemAcc) {
1501       if (auto *MemDef = dyn_cast<MemoryDef>(NewMemAcc))
1502         MSSAU->insertDef(MemDef, /*RenameUses=*/true);
1503       else {
1504         auto *MemUse = cast<MemoryUse>(NewMemAcc);
1505         MSSAU->insertUse(MemUse, /*RenameUses=*/true);
1506       }
1507     }
1508   }
1509 
1510   // Build LCSSA PHI nodes for any in-loop operands (if legal).  Note that
1511   // this is particularly cheap because we can rip off the PHI node that we're
1512   // replacing for the number and blocks of the predecessors.
1513   // OPT: If this shows up in a profile, we can instead finish sinking all
1514   // invariant instructions, and then walk their operands to re-establish
1515   // LCSSA. That will eliminate creating PHI nodes just to nuke them when
1516   // sinking bottom-up.
1517   for (Use &Op : New->operands())
1518     if (LI->wouldBeOutOfLoopUseRequiringLCSSA(Op.get(), PN.getParent())) {
1519       auto *OInst = cast<Instruction>(Op.get());
1520       PHINode *OpPN =
1521         PHINode::Create(OInst->getType(), PN.getNumIncomingValues(),
1522                         OInst->getName() + ".lcssa", &ExitBlock.front());
1523       for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i)
1524         OpPN->addIncoming(OInst, PN.getIncomingBlock(i));
1525       Op = OpPN;
1526     }
1527   return New;
1528 }
1529 
1530 static void eraseInstruction(Instruction &I, ICFLoopSafetyInfo &SafetyInfo,
1531                              AliasSetTracker *AST, MemorySSAUpdater *MSSAU) {
1532   if (AST)
1533     AST->deleteValue(&I);
1534   if (MSSAU)
1535     MSSAU->removeMemoryAccess(&I);
1536   SafetyInfo.removeInstruction(&I);
1537   I.eraseFromParent();
1538 }
1539 
1540 static void moveInstructionBefore(Instruction &I, Instruction &Dest,
1541                                   ICFLoopSafetyInfo &SafetyInfo,
1542                                   MemorySSAUpdater *MSSAU,
1543                                   ScalarEvolution *SE) {
1544   SafetyInfo.removeInstruction(&I);
1545   SafetyInfo.insertInstructionTo(&I, Dest.getParent());
1546   I.moveBefore(&Dest);
1547   if (MSSAU)
1548     if (MemoryUseOrDef *OldMemAcc = cast_or_null<MemoryUseOrDef>(
1549             MSSAU->getMemorySSA()->getMemoryAccess(&I)))
1550       MSSAU->moveToPlace(OldMemAcc, Dest.getParent(),
1551                          MemorySSA::BeforeTerminator);
1552   if (SE)
1553     SE->forgetValue(&I);
1554 }
1555 
1556 static Instruction *sinkThroughTriviallyReplaceablePHI(
1557     PHINode *TPN, Instruction *I, LoopInfo *LI,
1558     SmallDenseMap<BasicBlock *, Instruction *, 32> &SunkCopies,
1559     const LoopSafetyInfo *SafetyInfo, const Loop *CurLoop,
1560     MemorySSAUpdater *MSSAU) {
1561   assert(isTriviallyReplaceablePHI(*TPN, *I) &&
1562          "Expect only trivially replaceable PHI");
1563   BasicBlock *ExitBlock = TPN->getParent();
1564   Instruction *New;
1565   auto It = SunkCopies.find(ExitBlock);
1566   if (It != SunkCopies.end())
1567     New = It->second;
1568   else
1569     New = SunkCopies[ExitBlock] = cloneInstructionInExitBlock(
1570         *I, *ExitBlock, *TPN, LI, SafetyInfo, MSSAU);
1571   return New;
1572 }
1573 
1574 static bool canSplitPredecessors(PHINode *PN, LoopSafetyInfo *SafetyInfo) {
1575   BasicBlock *BB = PN->getParent();
1576   if (!BB->canSplitPredecessors())
1577     return false;
1578   // It's not impossible to split EHPad blocks, but if BlockColors already exist
1579   // it require updating BlockColors for all offspring blocks accordingly. By
1580   // skipping such corner case, we can make updating BlockColors after splitting
1581   // predecessor fairly simple.
1582   if (!SafetyInfo->getBlockColors().empty() && BB->getFirstNonPHI()->isEHPad())
1583     return false;
1584   for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) {
1585     BasicBlock *BBPred = *PI;
1586     if (isa<IndirectBrInst>(BBPred->getTerminator()) ||
1587         isa<CallBrInst>(BBPred->getTerminator()))
1588       return false;
1589   }
1590   return true;
1591 }
1592 
1593 static void splitPredecessorsOfLoopExit(PHINode *PN, DominatorTree *DT,
1594                                         LoopInfo *LI, const Loop *CurLoop,
1595                                         LoopSafetyInfo *SafetyInfo,
1596                                         MemorySSAUpdater *MSSAU) {
1597 #ifndef NDEBUG
1598   SmallVector<BasicBlock *, 32> ExitBlocks;
1599   CurLoop->getUniqueExitBlocks(ExitBlocks);
1600   SmallPtrSet<BasicBlock *, 32> ExitBlockSet(ExitBlocks.begin(),
1601                                              ExitBlocks.end());
1602 #endif
1603   BasicBlock *ExitBB = PN->getParent();
1604   assert(ExitBlockSet.count(ExitBB) && "Expect the PHI is in an exit block.");
1605 
1606   // Split predecessors of the loop exit to make instructions in the loop are
1607   // exposed to exit blocks through trivially replaceable PHIs while keeping the
1608   // loop in the canonical form where each predecessor of each exit block should
1609   // be contained within the loop. For example, this will convert the loop below
1610   // from
1611   //
1612   // LB1:
1613   //   %v1 =
1614   //   br %LE, %LB2
1615   // LB2:
1616   //   %v2 =
1617   //   br %LE, %LB1
1618   // LE:
1619   //   %p = phi [%v1, %LB1], [%v2, %LB2] <-- non-trivially replaceable
1620   //
1621   // to
1622   //
1623   // LB1:
1624   //   %v1 =
1625   //   br %LE.split, %LB2
1626   // LB2:
1627   //   %v2 =
1628   //   br %LE.split2, %LB1
1629   // LE.split:
1630   //   %p1 = phi [%v1, %LB1]  <-- trivially replaceable
1631   //   br %LE
1632   // LE.split2:
1633   //   %p2 = phi [%v2, %LB2]  <-- trivially replaceable
1634   //   br %LE
1635   // LE:
1636   //   %p = phi [%p1, %LE.split], [%p2, %LE.split2]
1637   //
1638   const auto &BlockColors = SafetyInfo->getBlockColors();
1639   SmallSetVector<BasicBlock *, 8> PredBBs(pred_begin(ExitBB), pred_end(ExitBB));
1640   while (!PredBBs.empty()) {
1641     BasicBlock *PredBB = *PredBBs.begin();
1642     assert(CurLoop->contains(PredBB) &&
1643            "Expect all predecessors are in the loop");
1644     if (PN->getBasicBlockIndex(PredBB) >= 0) {
1645       BasicBlock *NewPred = SplitBlockPredecessors(
1646           ExitBB, PredBB, ".split.loop.exit", DT, LI, MSSAU, true);
1647       // Since we do not allow splitting EH-block with BlockColors in
1648       // canSplitPredecessors(), we can simply assign predecessor's color to
1649       // the new block.
1650       if (!BlockColors.empty())
1651         // Grab a reference to the ColorVector to be inserted before getting the
1652         // reference to the vector we are copying because inserting the new
1653         // element in BlockColors might cause the map to be reallocated.
1654         SafetyInfo->copyColors(NewPred, PredBB);
1655     }
1656     PredBBs.remove(PredBB);
1657   }
1658 }
1659 
1660 /// When an instruction is found to only be used outside of the loop, this
1661 /// function moves it to the exit blocks and patches up SSA form as needed.
1662 /// This method is guaranteed to remove the original instruction from its
1663 /// position, and may either delete it or move it to outside of the loop.
1664 ///
1665 static bool sink(Instruction &I, LoopInfo *LI, DominatorTree *DT,
1666                  BlockFrequencyInfo *BFI, const Loop *CurLoop,
1667                  ICFLoopSafetyInfo *SafetyInfo, MemorySSAUpdater *MSSAU,
1668                  OptimizationRemarkEmitter *ORE) {
1669   bool Changed = false;
1670   LLVM_DEBUG(dbgs() << "LICM sinking instruction: " << I << "\n");
1671 
1672   // Iterate over users to be ready for actual sinking. Replace users via
1673   // unreachable blocks with undef and make all user PHIs trivially replaceable.
1674   SmallPtrSet<Instruction *, 8> VisitedUsers;
1675   for (Value::user_iterator UI = I.user_begin(), UE = I.user_end(); UI != UE;) {
1676     auto *User = cast<Instruction>(*UI);
1677     Use &U = UI.getUse();
1678     ++UI;
1679 
1680     if (VisitedUsers.count(User) || CurLoop->contains(User))
1681       continue;
1682 
1683     if (!DT->isReachableFromEntry(User->getParent())) {
1684       U = UndefValue::get(I.getType());
1685       Changed = true;
1686       continue;
1687     }
1688 
1689     // The user must be a PHI node.
1690     PHINode *PN = cast<PHINode>(User);
1691 
1692     // Surprisingly, instructions can be used outside of loops without any
1693     // exits.  This can only happen in PHI nodes if the incoming block is
1694     // unreachable.
1695     BasicBlock *BB = PN->getIncomingBlock(U);
1696     if (!DT->isReachableFromEntry(BB)) {
1697       U = UndefValue::get(I.getType());
1698       Changed = true;
1699       continue;
1700     }
1701 
1702     VisitedUsers.insert(PN);
1703     if (isTriviallyReplaceablePHI(*PN, I))
1704       continue;
1705 
1706     if (!canSplitPredecessors(PN, SafetyInfo))
1707       return Changed;
1708 
1709     // Split predecessors of the PHI so that we can make users trivially
1710     // replaceable.
1711     splitPredecessorsOfLoopExit(PN, DT, LI, CurLoop, SafetyInfo, MSSAU);
1712 
1713     // Should rebuild the iterators, as they may be invalidated by
1714     // splitPredecessorsOfLoopExit().
1715     UI = I.user_begin();
1716     UE = I.user_end();
1717   }
1718 
1719   if (VisitedUsers.empty())
1720     return Changed;
1721 
1722   ORE->emit([&]() {
1723     return OptimizationRemark(DEBUG_TYPE, "InstSunk", &I)
1724            << "sinking " << ore::NV("Inst", &I);
1725   });
1726   if (isa<LoadInst>(I))
1727     ++NumMovedLoads;
1728   else if (isa<CallInst>(I))
1729     ++NumMovedCalls;
1730   ++NumSunk;
1731 
1732 #ifndef NDEBUG
1733   SmallVector<BasicBlock *, 32> ExitBlocks;
1734   CurLoop->getUniqueExitBlocks(ExitBlocks);
1735   SmallPtrSet<BasicBlock *, 32> ExitBlockSet(ExitBlocks.begin(),
1736                                              ExitBlocks.end());
1737 #endif
1738 
1739   // Clones of this instruction. Don't create more than one per exit block!
1740   SmallDenseMap<BasicBlock *, Instruction *, 32> SunkCopies;
1741 
1742   // If this instruction is only used outside of the loop, then all users are
1743   // PHI nodes in exit blocks due to LCSSA form. Just RAUW them with clones of
1744   // the instruction.
1745   // First check if I is worth sinking for all uses. Sink only when it is worth
1746   // across all uses.
1747   SmallSetVector<User*, 8> Users(I.user_begin(), I.user_end());
1748   SmallVector<PHINode *, 8> ExitPNs;
1749   for (auto *UI : Users) {
1750     auto *User = cast<Instruction>(UI);
1751 
1752     if (CurLoop->contains(User))
1753       continue;
1754 
1755     PHINode *PN = cast<PHINode>(User);
1756     assert(ExitBlockSet.count(PN->getParent()) &&
1757            "The LCSSA PHI is not in an exit block!");
1758     if (!worthSinkOrHoistInst(I, PN->getParent(), ORE, BFI)) {
1759       return Changed;
1760     }
1761 
1762     ExitPNs.push_back(PN);
1763   }
1764 
1765   for (auto *PN : ExitPNs) {
1766 
1767     // The PHI must be trivially replaceable.
1768     Instruction *New = sinkThroughTriviallyReplaceablePHI(
1769         PN, &I, LI, SunkCopies, SafetyInfo, CurLoop, MSSAU);
1770     PN->replaceAllUsesWith(New);
1771     eraseInstruction(*PN, *SafetyInfo, nullptr, nullptr);
1772     Changed = true;
1773   }
1774   return Changed;
1775 }
1776 
1777 /// When an instruction is found to only use loop invariant operands that
1778 /// is safe to hoist, this instruction is called to do the dirty work.
1779 ///
1780 static void hoist(Instruction &I, const DominatorTree *DT, const Loop *CurLoop,
1781                   BasicBlock *Dest, ICFLoopSafetyInfo *SafetyInfo,
1782                   MemorySSAUpdater *MSSAU, ScalarEvolution *SE,
1783                   OptimizationRemarkEmitter *ORE) {
1784   LLVM_DEBUG(dbgs() << "LICM hoisting to " << Dest->getNameOrAsOperand() << ": "
1785                     << I << "\n");
1786   ORE->emit([&]() {
1787     return OptimizationRemark(DEBUG_TYPE, "Hoisted", &I) << "hoisting "
1788                                                          << ore::NV("Inst", &I);
1789   });
1790 
1791   // Metadata can be dependent on conditions we are hoisting above.
1792   // Conservatively strip all metadata on the instruction unless we were
1793   // guaranteed to execute I if we entered the loop, in which case the metadata
1794   // is valid in the loop preheader.
1795   if (I.hasMetadataOtherThanDebugLoc() &&
1796       // The check on hasMetadataOtherThanDebugLoc is to prevent us from burning
1797       // time in isGuaranteedToExecute if we don't actually have anything to
1798       // drop.  It is a compile time optimization, not required for correctness.
1799       !SafetyInfo->isGuaranteedToExecute(I, DT, CurLoop))
1800     I.dropUnknownNonDebugMetadata();
1801 
1802   if (isa<PHINode>(I))
1803     // Move the new node to the end of the phi list in the destination block.
1804     moveInstructionBefore(I, *Dest->getFirstNonPHI(), *SafetyInfo, MSSAU, SE);
1805   else
1806     // Move the new node to the destination block, before its terminator.
1807     moveInstructionBefore(I, *Dest->getTerminator(), *SafetyInfo, MSSAU, SE);
1808 
1809   I.updateLocationAfterHoist();
1810 
1811   if (isa<LoadInst>(I))
1812     ++NumMovedLoads;
1813   else if (isa<CallInst>(I))
1814     ++NumMovedCalls;
1815   ++NumHoisted;
1816 }
1817 
1818 /// Only sink or hoist an instruction if it is not a trapping instruction,
1819 /// or if the instruction is known not to trap when moved to the preheader.
1820 /// or if it is a trapping instruction and is guaranteed to execute.
1821 static bool isSafeToExecuteUnconditionally(Instruction &Inst,
1822                                            const DominatorTree *DT,
1823                                            const TargetLibraryInfo *TLI,
1824                                            const Loop *CurLoop,
1825                                            const LoopSafetyInfo *SafetyInfo,
1826                                            OptimizationRemarkEmitter *ORE,
1827                                            const Instruction *CtxI) {
1828   if (isSafeToSpeculativelyExecute(&Inst, CtxI, DT, TLI))
1829     return true;
1830 
1831   bool GuaranteedToExecute =
1832       SafetyInfo->isGuaranteedToExecute(Inst, DT, CurLoop);
1833 
1834   if (!GuaranteedToExecute) {
1835     auto *LI = dyn_cast<LoadInst>(&Inst);
1836     if (LI && CurLoop->isLoopInvariant(LI->getPointerOperand()))
1837       ORE->emit([&]() {
1838         return OptimizationRemarkMissed(
1839                    DEBUG_TYPE, "LoadWithLoopInvariantAddressCondExecuted", LI)
1840                << "failed to hoist load with loop-invariant address "
1841                   "because load is conditionally executed";
1842       });
1843   }
1844 
1845   return GuaranteedToExecute;
1846 }
1847 
1848 namespace {
1849 class LoopPromoter : public LoadAndStorePromoter {
1850   Value *SomePtr; // Designated pointer to store to.
1851   const SmallSetVector<Value *, 8> &PointerMustAliases;
1852   SmallVectorImpl<BasicBlock *> &LoopExitBlocks;
1853   SmallVectorImpl<Instruction *> &LoopInsertPts;
1854   SmallVectorImpl<MemoryAccess *> &MSSAInsertPts;
1855   PredIteratorCache &PredCache;
1856   AliasSetTracker *AST;
1857   MemorySSAUpdater *MSSAU;
1858   LoopInfo &LI;
1859   DebugLoc DL;
1860   int Alignment;
1861   bool UnorderedAtomic;
1862   AAMDNodes AATags;
1863   ICFLoopSafetyInfo &SafetyInfo;
1864 
1865   // We're about to add a use of V in a loop exit block.  Insert an LCSSA phi
1866   // (if legal) if doing so would add an out-of-loop use to an instruction
1867   // defined in-loop.
1868   Value *maybeInsertLCSSAPHI(Value *V, BasicBlock *BB) const {
1869     if (!LI.wouldBeOutOfLoopUseRequiringLCSSA(V, BB))
1870       return V;
1871 
1872     Instruction *I = cast<Instruction>(V);
1873     // We need to create an LCSSA PHI node for the incoming value and
1874     // store that.
1875     PHINode *PN = PHINode::Create(I->getType(), PredCache.size(BB),
1876                                   I->getName() + ".lcssa", &BB->front());
1877     for (BasicBlock *Pred : PredCache.get(BB))
1878       PN->addIncoming(I, Pred);
1879     return PN;
1880   }
1881 
1882 public:
1883   LoopPromoter(Value *SP, ArrayRef<const Instruction *> Insts, SSAUpdater &S,
1884                const SmallSetVector<Value *, 8> &PMA,
1885                SmallVectorImpl<BasicBlock *> &LEB,
1886                SmallVectorImpl<Instruction *> &LIP,
1887                SmallVectorImpl<MemoryAccess *> &MSSAIP, PredIteratorCache &PIC,
1888                AliasSetTracker *ast, MemorySSAUpdater *MSSAU, LoopInfo &li,
1889                DebugLoc dl, int alignment, bool UnorderedAtomic,
1890                const AAMDNodes &AATags, ICFLoopSafetyInfo &SafetyInfo)
1891       : LoadAndStorePromoter(Insts, S), SomePtr(SP), PointerMustAliases(PMA),
1892         LoopExitBlocks(LEB), LoopInsertPts(LIP), MSSAInsertPts(MSSAIP),
1893         PredCache(PIC), AST(ast), MSSAU(MSSAU), LI(li), DL(std::move(dl)),
1894         Alignment(alignment), UnorderedAtomic(UnorderedAtomic), AATags(AATags),
1895         SafetyInfo(SafetyInfo) {}
1896 
1897   bool isInstInList(Instruction *I,
1898                     const SmallVectorImpl<Instruction *> &) const override {
1899     Value *Ptr;
1900     if (LoadInst *LI = dyn_cast<LoadInst>(I))
1901       Ptr = LI->getOperand(0);
1902     else
1903       Ptr = cast<StoreInst>(I)->getPointerOperand();
1904     return PointerMustAliases.count(Ptr);
1905   }
1906 
1907   void doExtraRewritesBeforeFinalDeletion() override {
1908     // Insert stores after in the loop exit blocks.  Each exit block gets a
1909     // store of the live-out values that feed them.  Since we've already told
1910     // the SSA updater about the defs in the loop and the preheader
1911     // definition, it is all set and we can start using it.
1912     for (unsigned i = 0, e = LoopExitBlocks.size(); i != e; ++i) {
1913       BasicBlock *ExitBlock = LoopExitBlocks[i];
1914       Value *LiveInValue = SSA.GetValueInMiddleOfBlock(ExitBlock);
1915       LiveInValue = maybeInsertLCSSAPHI(LiveInValue, ExitBlock);
1916       Value *Ptr = maybeInsertLCSSAPHI(SomePtr, ExitBlock);
1917       Instruction *InsertPos = LoopInsertPts[i];
1918       StoreInst *NewSI = new StoreInst(LiveInValue, Ptr, InsertPos);
1919       if (UnorderedAtomic)
1920         NewSI->setOrdering(AtomicOrdering::Unordered);
1921       NewSI->setAlignment(Align(Alignment));
1922       NewSI->setDebugLoc(DL);
1923       if (AATags)
1924         NewSI->setAAMetadata(AATags);
1925 
1926       if (MSSAU) {
1927         MemoryAccess *MSSAInsertPoint = MSSAInsertPts[i];
1928         MemoryAccess *NewMemAcc;
1929         if (!MSSAInsertPoint) {
1930           NewMemAcc = MSSAU->createMemoryAccessInBB(
1931               NewSI, nullptr, NewSI->getParent(), MemorySSA::Beginning);
1932         } else {
1933           NewMemAcc =
1934               MSSAU->createMemoryAccessAfter(NewSI, nullptr, MSSAInsertPoint);
1935         }
1936         MSSAInsertPts[i] = NewMemAcc;
1937         MSSAU->insertDef(cast<MemoryDef>(NewMemAcc), true);
1938         // FIXME: true for safety, false may still be correct.
1939       }
1940     }
1941   }
1942 
1943   void replaceLoadWithValue(LoadInst *LI, Value *V) const override {
1944     // Update alias analysis.
1945     if (AST)
1946       AST->copyValue(LI, V);
1947   }
1948   void instructionDeleted(Instruction *I) const override {
1949     SafetyInfo.removeInstruction(I);
1950     if (AST)
1951       AST->deleteValue(I);
1952     if (MSSAU)
1953       MSSAU->removeMemoryAccess(I);
1954   }
1955 };
1956 
1957 bool isNotCapturedBeforeOrInLoop(const Value *V, const Loop *L,
1958                                  DominatorTree *DT) {
1959   // We can perform the captured-before check against any instruction in the
1960   // loop header, as the loop header is reachable from any instruction inside
1961   // the loop.
1962   // TODO: ReturnCaptures=true shouldn't be necessary here.
1963   return !PointerMayBeCapturedBefore(V, /* ReturnCaptures */ true,
1964                                      /* StoreCaptures */ true,
1965                                      L->getHeader()->getTerminator(), DT);
1966 }
1967 
1968 /// Return true iff we can prove that a caller of this function can not inspect
1969 /// the contents of the provided object in a well defined program.
1970 bool isKnownNonEscaping(Value *Object, const Loop *L,
1971                         const TargetLibraryInfo *TLI, DominatorTree *DT) {
1972   if (isa<AllocaInst>(Object))
1973     // Since the alloca goes out of scope, we know the caller can't retain a
1974     // reference to it and be well defined.  Thus, we don't need to check for
1975     // capture.
1976     return true;
1977 
1978   // For all other objects we need to know that the caller can't possibly
1979   // have gotten a reference to the object.  There are two components of
1980   // that:
1981   //   1) Object can't be escaped by this function.  This is what
1982   //      PointerMayBeCaptured checks.
1983   //   2) Object can't have been captured at definition site.  For this, we
1984   //      need to know the return value is noalias.  At the moment, we use a
1985   //      weaker condition and handle only AllocLikeFunctions (which are
1986   //      known to be noalias).  TODO
1987   return isAllocLikeFn(Object, TLI) &&
1988          isNotCapturedBeforeOrInLoop(Object, L, DT);
1989 }
1990 
1991 } // namespace
1992 
1993 /// Try to promote memory values to scalars by sinking stores out of the
1994 /// loop and moving loads to before the loop.  We do this by looping over
1995 /// the stores in the loop, looking for stores to Must pointers which are
1996 /// loop invariant.
1997 ///
1998 bool llvm::promoteLoopAccessesToScalars(
1999     const SmallSetVector<Value *, 8> &PointerMustAliases,
2000     SmallVectorImpl<BasicBlock *> &ExitBlocks,
2001     SmallVectorImpl<Instruction *> &InsertPts,
2002     SmallVectorImpl<MemoryAccess *> &MSSAInsertPts, PredIteratorCache &PIC,
2003     LoopInfo *LI, DominatorTree *DT, const TargetLibraryInfo *TLI,
2004     Loop *CurLoop, AliasSetTracker *CurAST, MemorySSAUpdater *MSSAU,
2005     ICFLoopSafetyInfo *SafetyInfo, OptimizationRemarkEmitter *ORE) {
2006   // Verify inputs.
2007   assert(LI != nullptr && DT != nullptr && CurLoop != nullptr &&
2008          SafetyInfo != nullptr &&
2009          "Unexpected Input to promoteLoopAccessesToScalars");
2010 
2011   Value *SomePtr = *PointerMustAliases.begin();
2012   BasicBlock *Preheader = CurLoop->getLoopPreheader();
2013 
2014   // It is not safe to promote a load/store from the loop if the load/store is
2015   // conditional.  For example, turning:
2016   //
2017   //    for () { if (c) *P += 1; }
2018   //
2019   // into:
2020   //
2021   //    tmp = *P;  for () { if (c) tmp +=1; } *P = tmp;
2022   //
2023   // is not safe, because *P may only be valid to access if 'c' is true.
2024   //
2025   // The safety property divides into two parts:
2026   // p1) The memory may not be dereferenceable on entry to the loop.  In this
2027   //    case, we can't insert the required load in the preheader.
2028   // p2) The memory model does not allow us to insert a store along any dynamic
2029   //    path which did not originally have one.
2030   //
2031   // If at least one store is guaranteed to execute, both properties are
2032   // satisfied, and promotion is legal.
2033   //
2034   // This, however, is not a necessary condition. Even if no store/load is
2035   // guaranteed to execute, we can still establish these properties.
2036   // We can establish (p1) by proving that hoisting the load into the preheader
2037   // is safe (i.e. proving dereferenceability on all paths through the loop). We
2038   // can use any access within the alias set to prove dereferenceability,
2039   // since they're all must alias.
2040   //
2041   // There are two ways establish (p2):
2042   // a) Prove the location is thread-local. In this case the memory model
2043   // requirement does not apply, and stores are safe to insert.
2044   // b) Prove a store dominates every exit block. In this case, if an exit
2045   // blocks is reached, the original dynamic path would have taken us through
2046   // the store, so inserting a store into the exit block is safe. Note that this
2047   // is different from the store being guaranteed to execute. For instance,
2048   // if an exception is thrown on the first iteration of the loop, the original
2049   // store is never executed, but the exit blocks are not executed either.
2050 
2051   bool DereferenceableInPH = false;
2052   bool SafeToInsertStore = false;
2053 
2054   SmallVector<Instruction *, 64> LoopUses;
2055 
2056   // We start with an alignment of one and try to find instructions that allow
2057   // us to prove better alignment.
2058   Align Alignment;
2059   // Keep track of which types of access we see
2060   bool SawUnorderedAtomic = false;
2061   bool SawNotAtomic = false;
2062   AAMDNodes AATags;
2063 
2064   const DataLayout &MDL = Preheader->getModule()->getDataLayout();
2065 
2066   bool IsKnownThreadLocalObject = false;
2067   if (SafetyInfo->anyBlockMayThrow()) {
2068     // If a loop can throw, we have to insert a store along each unwind edge.
2069     // That said, we can't actually make the unwind edge explicit. Therefore,
2070     // we have to prove that the store is dead along the unwind edge.  We do
2071     // this by proving that the caller can't have a reference to the object
2072     // after return and thus can't possibly load from the object.
2073     Value *Object = getUnderlyingObject(SomePtr);
2074     if (!isKnownNonEscaping(Object, CurLoop, TLI, DT))
2075       return false;
2076     // Subtlety: Alloca's aren't visible to callers, but *are* potentially
2077     // visible to other threads if captured and used during their lifetimes.
2078     IsKnownThreadLocalObject = !isa<AllocaInst>(Object);
2079   }
2080 
2081   // Check that all of the pointers in the alias set have the same type.  We
2082   // cannot (yet) promote a memory location that is loaded and stored in
2083   // different sizes.  While we are at it, collect alignment and AA info.
2084   for (Value *ASIV : PointerMustAliases) {
2085     // Check that all of the pointers in the alias set have the same type.  We
2086     // cannot (yet) promote a memory location that is loaded and stored in
2087     // different sizes.
2088     if (SomePtr->getType() != ASIV->getType())
2089       return false;
2090 
2091     for (User *U : ASIV->users()) {
2092       // Ignore instructions that are outside the loop.
2093       Instruction *UI = dyn_cast<Instruction>(U);
2094       if (!UI || !CurLoop->contains(UI))
2095         continue;
2096 
2097       // If there is an non-load/store instruction in the loop, we can't promote
2098       // it.
2099       if (LoadInst *Load = dyn_cast<LoadInst>(UI)) {
2100         if (!Load->isUnordered())
2101           return false;
2102 
2103         SawUnorderedAtomic |= Load->isAtomic();
2104         SawNotAtomic |= !Load->isAtomic();
2105 
2106         Align InstAlignment = Load->getAlign();
2107 
2108         // Note that proving a load safe to speculate requires proving
2109         // sufficient alignment at the target location.  Proving it guaranteed
2110         // to execute does as well.  Thus we can increase our guaranteed
2111         // alignment as well.
2112         if (!DereferenceableInPH || (InstAlignment > Alignment))
2113           if (isSafeToExecuteUnconditionally(*Load, DT, TLI, CurLoop,
2114                                              SafetyInfo, ORE,
2115                                              Preheader->getTerminator())) {
2116             DereferenceableInPH = true;
2117             Alignment = std::max(Alignment, InstAlignment);
2118           }
2119       } else if (const StoreInst *Store = dyn_cast<StoreInst>(UI)) {
2120         // Stores *of* the pointer are not interesting, only stores *to* the
2121         // pointer.
2122         if (UI->getOperand(1) != ASIV)
2123           continue;
2124         if (!Store->isUnordered())
2125           return false;
2126 
2127         SawUnorderedAtomic |= Store->isAtomic();
2128         SawNotAtomic |= !Store->isAtomic();
2129 
2130         // If the store is guaranteed to execute, both properties are satisfied.
2131         // We may want to check if a store is guaranteed to execute even if we
2132         // already know that promotion is safe, since it may have higher
2133         // alignment than any other guaranteed stores, in which case we can
2134         // raise the alignment on the promoted store.
2135         Align InstAlignment = Store->getAlign();
2136 
2137         if (!DereferenceableInPH || !SafeToInsertStore ||
2138             (InstAlignment > Alignment)) {
2139           if (SafetyInfo->isGuaranteedToExecute(*UI, DT, CurLoop)) {
2140             DereferenceableInPH = true;
2141             SafeToInsertStore = true;
2142             Alignment = std::max(Alignment, InstAlignment);
2143           }
2144         }
2145 
2146         // If a store dominates all exit blocks, it is safe to sink.
2147         // As explained above, if an exit block was executed, a dominating
2148         // store must have been executed at least once, so we are not
2149         // introducing stores on paths that did not have them.
2150         // Note that this only looks at explicit exit blocks. If we ever
2151         // start sinking stores into unwind edges (see above), this will break.
2152         if (!SafeToInsertStore)
2153           SafeToInsertStore = llvm::all_of(ExitBlocks, [&](BasicBlock *Exit) {
2154             return DT->dominates(Store->getParent(), Exit);
2155           });
2156 
2157         // If the store is not guaranteed to execute, we may still get
2158         // deref info through it.
2159         if (!DereferenceableInPH) {
2160           DereferenceableInPH = isDereferenceableAndAlignedPointer(
2161               Store->getPointerOperand(), Store->getValueOperand()->getType(),
2162               Store->getAlign(), MDL, Preheader->getTerminator(), DT, TLI);
2163         }
2164       } else
2165         return false; // Not a load or store.
2166 
2167       // Merge the AA tags.
2168       if (LoopUses.empty()) {
2169         // On the first load/store, just take its AA tags.
2170         UI->getAAMetadata(AATags);
2171       } else if (AATags) {
2172         UI->getAAMetadata(AATags, /* Merge = */ true);
2173       }
2174 
2175       LoopUses.push_back(UI);
2176     }
2177   }
2178 
2179   // If we found both an unordered atomic instruction and a non-atomic memory
2180   // access, bail.  We can't blindly promote non-atomic to atomic since we
2181   // might not be able to lower the result.  We can't downgrade since that
2182   // would violate memory model.  Also, align 0 is an error for atomics.
2183   if (SawUnorderedAtomic && SawNotAtomic)
2184     return false;
2185 
2186   // If we're inserting an atomic load in the preheader, we must be able to
2187   // lower it.  We're only guaranteed to be able to lower naturally aligned
2188   // atomics.
2189   auto *SomePtrElemType = SomePtr->getType()->getPointerElementType();
2190   if (SawUnorderedAtomic &&
2191       Alignment < MDL.getTypeStoreSize(SomePtrElemType))
2192     return false;
2193 
2194   // If we couldn't prove we can hoist the load, bail.
2195   if (!DereferenceableInPH)
2196     return false;
2197 
2198   // We know we can hoist the load, but don't have a guaranteed store.
2199   // Check whether the location is thread-local. If it is, then we can insert
2200   // stores along paths which originally didn't have them without violating the
2201   // memory model.
2202   if (!SafeToInsertStore) {
2203     if (IsKnownThreadLocalObject)
2204       SafeToInsertStore = true;
2205     else {
2206       Value *Object = getUnderlyingObject(SomePtr);
2207       SafeToInsertStore =
2208           (isAllocLikeFn(Object, TLI) || isa<AllocaInst>(Object)) &&
2209           isNotCapturedBeforeOrInLoop(Object, CurLoop, DT);
2210     }
2211   }
2212 
2213   // If we've still failed to prove we can sink the store, give up.
2214   if (!SafeToInsertStore)
2215     return false;
2216 
2217   // Otherwise, this is safe to promote, lets do it!
2218   LLVM_DEBUG(dbgs() << "LICM: Promoting value stored to in loop: " << *SomePtr
2219                     << '\n');
2220   ORE->emit([&]() {
2221     return OptimizationRemark(DEBUG_TYPE, "PromoteLoopAccessesToScalar",
2222                               LoopUses[0])
2223            << "Moving accesses to memory location out of the loop";
2224   });
2225   ++NumPromoted;
2226 
2227   // Look at all the loop uses, and try to merge their locations.
2228   std::vector<const DILocation *> LoopUsesLocs;
2229   for (auto U : LoopUses)
2230     LoopUsesLocs.push_back(U->getDebugLoc().get());
2231   auto DL = DebugLoc(DILocation::getMergedLocations(LoopUsesLocs));
2232 
2233   // We use the SSAUpdater interface to insert phi nodes as required.
2234   SmallVector<PHINode *, 16> NewPHIs;
2235   SSAUpdater SSA(&NewPHIs);
2236   LoopPromoter Promoter(SomePtr, LoopUses, SSA, PointerMustAliases, ExitBlocks,
2237                         InsertPts, MSSAInsertPts, PIC, CurAST, MSSAU, *LI, DL,
2238                         Alignment.value(), SawUnorderedAtomic, AATags,
2239                         *SafetyInfo);
2240 
2241   // Set up the preheader to have a definition of the value.  It is the live-out
2242   // value from the preheader that uses in the loop will use.
2243   LoadInst *PreheaderLoad = new LoadInst(
2244       SomePtr->getType()->getPointerElementType(), SomePtr,
2245       SomePtr->getName() + ".promoted", Preheader->getTerminator());
2246   if (SawUnorderedAtomic)
2247     PreheaderLoad->setOrdering(AtomicOrdering::Unordered);
2248   PreheaderLoad->setAlignment(Alignment);
2249   PreheaderLoad->setDebugLoc(DebugLoc());
2250   if (AATags)
2251     PreheaderLoad->setAAMetadata(AATags);
2252   SSA.AddAvailableValue(Preheader, PreheaderLoad);
2253 
2254   if (MSSAU) {
2255     MemoryAccess *PreheaderLoadMemoryAccess = MSSAU->createMemoryAccessInBB(
2256         PreheaderLoad, nullptr, PreheaderLoad->getParent(), MemorySSA::End);
2257     MemoryUse *NewMemUse = cast<MemoryUse>(PreheaderLoadMemoryAccess);
2258     MSSAU->insertUse(NewMemUse, /*RenameUses=*/true);
2259   }
2260 
2261   if (MSSAU && VerifyMemorySSA)
2262     MSSAU->getMemorySSA()->verifyMemorySSA();
2263   // Rewrite all the loads in the loop and remember all the definitions from
2264   // stores in the loop.
2265   Promoter.run(LoopUses);
2266 
2267   if (MSSAU && VerifyMemorySSA)
2268     MSSAU->getMemorySSA()->verifyMemorySSA();
2269   // If the SSAUpdater didn't use the load in the preheader, just zap it now.
2270   if (PreheaderLoad->use_empty())
2271     eraseInstruction(*PreheaderLoad, *SafetyInfo, CurAST, MSSAU);
2272 
2273   return true;
2274 }
2275 
2276 static void foreachMemoryAccess(MemorySSA *MSSA, Loop *L,
2277                                 function_ref<void(Instruction *)> Fn) {
2278   for (const BasicBlock *BB : L->blocks())
2279     if (const auto *Accesses = MSSA->getBlockAccesses(BB))
2280       for (const auto &Access : *Accesses)
2281         if (const auto *MUD = dyn_cast<MemoryUseOrDef>(&Access))
2282           Fn(MUD->getMemoryInst());
2283 }
2284 
2285 static SmallVector<SmallSetVector<Value *, 8>, 0>
2286 collectPromotionCandidates(MemorySSA *MSSA, AliasAnalysis *AA, Loop *L,
2287                            SmallVectorImpl<Instruction *> &MaybePromotable) {
2288   AliasSetTracker AST(*AA);
2289 
2290   auto IsPotentiallyPromotable = [L](const Instruction *I) {
2291     if (const auto *SI = dyn_cast<StoreInst>(I))
2292       return L->isLoopInvariant(SI->getPointerOperand());
2293     if (const auto *LI = dyn_cast<LoadInst>(I))
2294       return L->isLoopInvariant(LI->getPointerOperand());
2295     return false;
2296   };
2297 
2298   // Populate AST with potentially promotable accesses and remove them from
2299   // MaybePromotable, so they will not be checked again on the next iteration.
2300   SmallPtrSet<Value *, 16> AttemptingPromotion;
2301   llvm::erase_if(MaybePromotable, [&](Instruction *I) {
2302     if (IsPotentiallyPromotable(I)) {
2303       AttemptingPromotion.insert(I);
2304       AST.add(I);
2305       return true;
2306     }
2307     return false;
2308   });
2309 
2310   // We're only interested in must-alias sets that contain a mod.
2311   SmallVector<const AliasSet *, 8> Sets;
2312   for (AliasSet &AS : AST)
2313     if (!AS.isForwardingAliasSet() && AS.isMod() && AS.isMustAlias())
2314       Sets.push_back(&AS);
2315 
2316   if (Sets.empty())
2317     return {}; // Nothing to promote...
2318 
2319   // Discard any sets for which there is an aliasing non-promotable access.
2320   foreachMemoryAccess(MSSA, L, [&](Instruction *I) {
2321     if (AttemptingPromotion.contains(I))
2322       return;
2323 
2324     llvm::erase_if(Sets, [&](const AliasSet *AS) {
2325       return AS->aliasesUnknownInst(I, *AA);
2326     });
2327   });
2328 
2329   SmallVector<SmallSetVector<Value *, 8>, 0> Result;
2330   for (const AliasSet *Set : Sets) {
2331     SmallSetVector<Value *, 8> PointerMustAliases;
2332     for (const auto &ASI : *Set)
2333       PointerMustAliases.insert(ASI.getValue());
2334     Result.push_back(std::move(PointerMustAliases));
2335   }
2336 
2337   return Result;
2338 }
2339 
2340 /// Returns an owning pointer to an alias set which incorporates aliasing info
2341 /// from L and all subloops of L.
2342 std::unique_ptr<AliasSetTracker>
2343 LoopInvariantCodeMotion::collectAliasInfoForLoop(Loop *L, LoopInfo *LI,
2344                                                  AAResults *AA) {
2345   auto CurAST = std::make_unique<AliasSetTracker>(*AA);
2346 
2347   // Add everything from all the sub loops.
2348   for (Loop *InnerL : L->getSubLoops())
2349     for (BasicBlock *BB : InnerL->blocks())
2350       CurAST->add(*BB);
2351 
2352   // And merge in this loop (without anything from inner loops).
2353   for (BasicBlock *BB : L->blocks())
2354     if (LI->getLoopFor(BB) == L)
2355       CurAST->add(*BB);
2356 
2357   return CurAST;
2358 }
2359 
2360 static bool pointerInvalidatedByLoop(MemoryLocation MemLoc,
2361                                      AliasSetTracker *CurAST, Loop *CurLoop,
2362                                      AAResults *AA) {
2363   // First check to see if any of the basic blocks in CurLoop invalidate *V.
2364   bool isInvalidatedAccordingToAST = CurAST->getAliasSetFor(MemLoc).isMod();
2365 
2366   if (!isInvalidatedAccordingToAST || !LICMN2Theshold)
2367     return isInvalidatedAccordingToAST;
2368 
2369   // Check with a diagnostic analysis if we can refine the information above.
2370   // This is to identify the limitations of using the AST.
2371   // The alias set mechanism used by LICM has a major weakness in that it
2372   // combines all things which may alias into a single set *before* asking
2373   // modref questions. As a result, a single readonly call within a loop will
2374   // collapse all loads and stores into a single alias set and report
2375   // invalidation if the loop contains any store. For example, readonly calls
2376   // with deopt states have this form and create a general alias set with all
2377   // loads and stores.  In order to get any LICM in loops containing possible
2378   // deopt states we need a more precise invalidation of checking the mod ref
2379   // info of each instruction within the loop and LI. This has a complexity of
2380   // O(N^2), so currently, it is used only as a diagnostic tool since the
2381   // default value of LICMN2Threshold is zero.
2382 
2383   // Don't look at nested loops.
2384   if (CurLoop->begin() != CurLoop->end())
2385     return true;
2386 
2387   int N = 0;
2388   for (BasicBlock *BB : CurLoop->getBlocks())
2389     for (Instruction &I : *BB) {
2390       if (N >= LICMN2Theshold) {
2391         LLVM_DEBUG(dbgs() << "Alasing N2 threshold exhausted for "
2392                           << *(MemLoc.Ptr) << "\n");
2393         return true;
2394       }
2395       N++;
2396       auto Res = AA->getModRefInfo(&I, MemLoc);
2397       if (isModSet(Res)) {
2398         LLVM_DEBUG(dbgs() << "Aliasing failed on " << I << " for "
2399                           << *(MemLoc.Ptr) << "\n");
2400         return true;
2401       }
2402     }
2403   LLVM_DEBUG(dbgs() << "Aliasing okay for " << *(MemLoc.Ptr) << "\n");
2404   return false;
2405 }
2406 
2407 bool pointerInvalidatedByLoopWithMSSA(MemorySSA *MSSA, MemoryUse *MU,
2408                                       Loop *CurLoop, Instruction &I,
2409                                       SinkAndHoistLICMFlags &Flags) {
2410   // For hoisting, use the walker to determine safety
2411   if (!Flags.getIsSink()) {
2412     MemoryAccess *Source;
2413     // See declaration of SetLicmMssaOptCap for usage details.
2414     if (Flags.tooManyClobberingCalls())
2415       Source = MU->getDefiningAccess();
2416     else {
2417       Source = MSSA->getSkipSelfWalker()->getClobberingMemoryAccess(MU);
2418       Flags.incrementClobberingCalls();
2419     }
2420     return !MSSA->isLiveOnEntryDef(Source) &&
2421            CurLoop->contains(Source->getBlock());
2422   }
2423 
2424   // For sinking, we'd need to check all Defs below this use. The getClobbering
2425   // call will look on the backedge of the loop, but will check aliasing with
2426   // the instructions on the previous iteration.
2427   // For example:
2428   // for (i ... )
2429   //   load a[i] ( Use (LoE)
2430   //   store a[i] ( 1 = Def (2), with 2 = Phi for the loop.
2431   //   i++;
2432   // The load sees no clobbering inside the loop, as the backedge alias check
2433   // does phi translation, and will check aliasing against store a[i-1].
2434   // However sinking the load outside the loop, below the store is incorrect.
2435 
2436   // For now, only sink if there are no Defs in the loop, and the existing ones
2437   // precede the use and are in the same block.
2438   // FIXME: Increase precision: Safe to sink if Use post dominates the Def;
2439   // needs PostDominatorTreeAnalysis.
2440   // FIXME: More precise: no Defs that alias this Use.
2441   if (Flags.tooManyMemoryAccesses())
2442     return true;
2443   for (auto *BB : CurLoop->getBlocks())
2444     if (pointerInvalidatedByBlockWithMSSA(*BB, *MSSA, *MU))
2445       return true;
2446   // When sinking, the source block may not be part of the loop so check it.
2447   if (!CurLoop->contains(&I))
2448     return pointerInvalidatedByBlockWithMSSA(*I.getParent(), *MSSA, *MU);
2449 
2450   return false;
2451 }
2452 
2453 bool pointerInvalidatedByBlockWithMSSA(BasicBlock &BB, MemorySSA &MSSA,
2454                                        MemoryUse &MU) {
2455   if (const auto *Accesses = MSSA.getBlockDefs(&BB))
2456     for (const auto &MA : *Accesses)
2457       if (const auto *MD = dyn_cast<MemoryDef>(&MA))
2458         if (MU.getBlock() != MD->getBlock() || !MSSA.locallyDominates(MD, &MU))
2459           return true;
2460   return false;
2461 }
2462 
2463 /// Little predicate that returns true if the specified basic block is in
2464 /// a subloop of the current one, not the current one itself.
2465 ///
2466 static bool inSubLoop(BasicBlock *BB, Loop *CurLoop, LoopInfo *LI) {
2467   assert(CurLoop->contains(BB) && "Only valid if BB is IN the loop");
2468   return LI->getLoopFor(BB) != CurLoop;
2469 }
2470