1 //===- LoopIdiomRecognize.cpp - Loop idiom recognition --------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass implements an idiom recognizer that transforms simple loops into a
10 // non-loop form.  In cases that this kicks in, it can be a significant
11 // performance win.
12 //
13 // If compiling for code size we avoid idiom recognition if the resulting
14 // code could be larger than the code for the original loop. One way this could
15 // happen is if the loop is not removable after idiom recognition due to the
16 // presence of non-idiom instructions. The initial implementation of the
17 // heuristics applies to idioms in multi-block loops.
18 //
19 //===----------------------------------------------------------------------===//
20 //
21 // TODO List:
22 //
23 // Future loop memory idioms to recognize:
24 //   memcmp, memmove, strlen, etc.
25 // Future floating point idioms to recognize in -ffast-math mode:
26 //   fpowi
27 // Future integer operation idioms to recognize:
28 //   ctpop
29 //
30 // Beware that isel's default lowering for ctpop is highly inefficient for
31 // i64 and larger types when i64 is legal and the value has few bits set.  It
32 // would be good to enhance isel to emit a loop for ctpop in this case.
33 //
34 // This could recognize common matrix multiplies and dot product idioms and
35 // replace them with calls to BLAS (if linked in??).
36 //
37 //===----------------------------------------------------------------------===//
38 
39 #include "llvm/Transforms/Scalar/LoopIdiomRecognize.h"
40 #include "llvm/ADT/APInt.h"
41 #include "llvm/ADT/ArrayRef.h"
42 #include "llvm/ADT/DenseMap.h"
43 #include "llvm/ADT/MapVector.h"
44 #include "llvm/ADT/SetVector.h"
45 #include "llvm/ADT/SmallPtrSet.h"
46 #include "llvm/ADT/SmallVector.h"
47 #include "llvm/ADT/Statistic.h"
48 #include "llvm/ADT/StringRef.h"
49 #include "llvm/Analysis/AliasAnalysis.h"
50 #include "llvm/Analysis/LoopAccessAnalysis.h"
51 #include "llvm/Analysis/LoopInfo.h"
52 #include "llvm/Analysis/LoopPass.h"
53 #include "llvm/Analysis/MemoryLocation.h"
54 #include "llvm/Analysis/MemorySSA.h"
55 #include "llvm/Analysis/MemorySSAUpdater.h"
56 #include "llvm/Analysis/MustExecute.h"
57 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
58 #include "llvm/Analysis/ScalarEvolution.h"
59 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
60 #include "llvm/Analysis/TargetLibraryInfo.h"
61 #include "llvm/Analysis/TargetTransformInfo.h"
62 #include "llvm/Analysis/ValueTracking.h"
63 #include "llvm/IR/Attributes.h"
64 #include "llvm/IR/BasicBlock.h"
65 #include "llvm/IR/Constant.h"
66 #include "llvm/IR/Constants.h"
67 #include "llvm/IR/DataLayout.h"
68 #include "llvm/IR/DebugLoc.h"
69 #include "llvm/IR/DerivedTypes.h"
70 #include "llvm/IR/Dominators.h"
71 #include "llvm/IR/GlobalValue.h"
72 #include "llvm/IR/GlobalVariable.h"
73 #include "llvm/IR/IRBuilder.h"
74 #include "llvm/IR/InstrTypes.h"
75 #include "llvm/IR/Instruction.h"
76 #include "llvm/IR/Instructions.h"
77 #include "llvm/IR/IntrinsicInst.h"
78 #include "llvm/IR/Intrinsics.h"
79 #include "llvm/IR/LLVMContext.h"
80 #include "llvm/IR/Module.h"
81 #include "llvm/IR/PassManager.h"
82 #include "llvm/IR/Type.h"
83 #include "llvm/IR/User.h"
84 #include "llvm/IR/Value.h"
85 #include "llvm/IR/ValueHandle.h"
86 #include "llvm/InitializePasses.h"
87 #include "llvm/Pass.h"
88 #include "llvm/Support/Casting.h"
89 #include "llvm/Support/CommandLine.h"
90 #include "llvm/Support/Debug.h"
91 #include "llvm/Support/raw_ostream.h"
92 #include "llvm/Transforms/Scalar.h"
93 #include "llvm/Transforms/Utils/BuildLibCalls.h"
94 #include "llvm/Transforms/Utils/Local.h"
95 #include "llvm/Transforms/Utils/LoopUtils.h"
96 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
97 #include <algorithm>
98 #include <cassert>
99 #include <cstdint>
100 #include <utility>
101 #include <vector>
102 
103 using namespace llvm;
104 
105 #define DEBUG_TYPE "loop-idiom"
106 
107 STATISTIC(NumMemSet, "Number of memset's formed from loop stores");
108 STATISTIC(NumMemCpy, "Number of memcpy's formed from loop load+stores");
109 
110 static cl::opt<bool> UseLIRCodeSizeHeurs(
111     "use-lir-code-size-heurs",
112     cl::desc("Use loop idiom recognition code size heuristics when compiling"
113              "with -Os/-Oz"),
114     cl::init(true), cl::Hidden);
115 
116 namespace {
117 
118 class LoopIdiomRecognize {
119   Loop *CurLoop = nullptr;
120   AliasAnalysis *AA;
121   DominatorTree *DT;
122   LoopInfo *LI;
123   ScalarEvolution *SE;
124   TargetLibraryInfo *TLI;
125   const TargetTransformInfo *TTI;
126   const DataLayout *DL;
127   OptimizationRemarkEmitter &ORE;
128   bool ApplyCodeSizeHeuristics;
129   std::unique_ptr<MemorySSAUpdater> MSSAU;
130 
131 public:
132   explicit LoopIdiomRecognize(AliasAnalysis *AA, DominatorTree *DT,
133                               LoopInfo *LI, ScalarEvolution *SE,
134                               TargetLibraryInfo *TLI,
135                               const TargetTransformInfo *TTI, MemorySSA *MSSA,
136                               const DataLayout *DL,
137                               OptimizationRemarkEmitter &ORE)
138       : AA(AA), DT(DT), LI(LI), SE(SE), TLI(TLI), TTI(TTI), DL(DL), ORE(ORE) {
139     if (MSSA)
140       MSSAU = std::make_unique<MemorySSAUpdater>(MSSA);
141   }
142 
143   bool runOnLoop(Loop *L);
144 
145 private:
146   using StoreList = SmallVector<StoreInst *, 8>;
147   using StoreListMap = MapVector<Value *, StoreList>;
148 
149   StoreListMap StoreRefsForMemset;
150   StoreListMap StoreRefsForMemsetPattern;
151   StoreList StoreRefsForMemcpy;
152   bool HasMemset;
153   bool HasMemsetPattern;
154   bool HasMemcpy;
155 
156   /// Return code for isLegalStore()
157   enum LegalStoreKind {
158     None = 0,
159     Memset,
160     MemsetPattern,
161     Memcpy,
162     UnorderedAtomicMemcpy,
163     DontUse // Dummy retval never to be used. Allows catching errors in retval
164             // handling.
165   };
166 
167   /// \name Countable Loop Idiom Handling
168   /// @{
169 
170   bool runOnCountableLoop();
171   bool runOnLoopBlock(BasicBlock *BB, const SCEV *BECount,
172                       SmallVectorImpl<BasicBlock *> &ExitBlocks);
173 
174   void collectStores(BasicBlock *BB);
175   LegalStoreKind isLegalStore(StoreInst *SI);
176   enum class ForMemset { No, Yes };
177   bool processLoopStores(SmallVectorImpl<StoreInst *> &SL, const SCEV *BECount,
178                          ForMemset For);
179   bool processLoopMemSet(MemSetInst *MSI, const SCEV *BECount);
180 
181   bool processLoopStridedStore(Value *DestPtr, unsigned StoreSize,
182                                MaybeAlign StoreAlignment, Value *StoredVal,
183                                Instruction *TheStore,
184                                SmallPtrSetImpl<Instruction *> &Stores,
185                                const SCEVAddRecExpr *Ev, const SCEV *BECount,
186                                bool NegStride, bool IsLoopMemset = false);
187   bool processLoopStoreOfLoopLoad(StoreInst *SI, const SCEV *BECount);
188   bool avoidLIRForMultiBlockLoop(bool IsMemset = false,
189                                  bool IsLoopMemset = false);
190 
191   /// @}
192   /// \name Noncountable Loop Idiom Handling
193   /// @{
194 
195   bool runOnNoncountableLoop();
196 
197   bool recognizePopcount();
198   void transformLoopToPopcount(BasicBlock *PreCondBB, Instruction *CntInst,
199                                PHINode *CntPhi, Value *Var);
200   bool recognizeAndInsertFFS();  /// Find First Set: ctlz or cttz
201   void transformLoopToCountable(Intrinsic::ID IntrinID, BasicBlock *PreCondBB,
202                                 Instruction *CntInst, PHINode *CntPhi,
203                                 Value *Var, Instruction *DefX,
204                                 const DebugLoc &DL, bool ZeroCheck,
205                                 bool IsCntPhiUsedOutsideLoop);
206 
207   /// @}
208 };
209 
210 class LoopIdiomRecognizeLegacyPass : public LoopPass {
211 public:
212   static char ID;
213 
214   explicit LoopIdiomRecognizeLegacyPass() : LoopPass(ID) {
215     initializeLoopIdiomRecognizeLegacyPassPass(
216         *PassRegistry::getPassRegistry());
217   }
218 
219   bool runOnLoop(Loop *L, LPPassManager &LPM) override {
220     if (skipLoop(L))
221       return false;
222 
223     AliasAnalysis *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
224     DominatorTree *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
225     LoopInfo *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
226     ScalarEvolution *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
227     TargetLibraryInfo *TLI =
228         &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(
229             *L->getHeader()->getParent());
230     const TargetTransformInfo *TTI =
231         &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(
232             *L->getHeader()->getParent());
233     const DataLayout *DL = &L->getHeader()->getModule()->getDataLayout();
234     auto *MSSAAnalysis = getAnalysisIfAvailable<MemorySSAWrapperPass>();
235     MemorySSA *MSSA = nullptr;
236     if (MSSAAnalysis)
237       MSSA = &MSSAAnalysis->getMSSA();
238 
239     // For the old PM, we can't use OptimizationRemarkEmitter as an analysis
240     // pass.  Function analyses need to be preserved across loop transformations
241     // but ORE cannot be preserved (see comment before the pass definition).
242     OptimizationRemarkEmitter ORE(L->getHeader()->getParent());
243 
244     LoopIdiomRecognize LIR(AA, DT, LI, SE, TLI, TTI, MSSA, DL, ORE);
245     return LIR.runOnLoop(L);
246   }
247 
248   /// This transformation requires natural loop information & requires that
249   /// loop preheaders be inserted into the CFG.
250   void getAnalysisUsage(AnalysisUsage &AU) const override {
251     AU.addRequired<TargetLibraryInfoWrapperPass>();
252     AU.addRequired<TargetTransformInfoWrapperPass>();
253     AU.addPreserved<MemorySSAWrapperPass>();
254     getLoopAnalysisUsage(AU);
255   }
256 };
257 
258 } // end anonymous namespace
259 
260 char LoopIdiomRecognizeLegacyPass::ID = 0;
261 
262 PreservedAnalyses LoopIdiomRecognizePass::run(Loop &L, LoopAnalysisManager &AM,
263                                               LoopStandardAnalysisResults &AR,
264                                               LPMUpdater &) {
265   const auto *DL = &L.getHeader()->getModule()->getDataLayout();
266 
267   // For the new PM, we also can't use OptimizationRemarkEmitter as an analysis
268   // pass.  Function analyses need to be preserved across loop transformations
269   // but ORE cannot be preserved (see comment before the pass definition).
270   OptimizationRemarkEmitter ORE(L.getHeader()->getParent());
271 
272   LoopIdiomRecognize LIR(&AR.AA, &AR.DT, &AR.LI, &AR.SE, &AR.TLI, &AR.TTI,
273                          AR.MSSA, DL, ORE);
274   if (!LIR.runOnLoop(&L))
275     return PreservedAnalyses::all();
276 
277   auto PA = getLoopPassPreservedAnalyses();
278   if (AR.MSSA)
279     PA.preserve<MemorySSAAnalysis>();
280   return PA;
281 }
282 
283 INITIALIZE_PASS_BEGIN(LoopIdiomRecognizeLegacyPass, "loop-idiom",
284                       "Recognize loop idioms", false, false)
285 INITIALIZE_PASS_DEPENDENCY(LoopPass)
286 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
287 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
288 INITIALIZE_PASS_END(LoopIdiomRecognizeLegacyPass, "loop-idiom",
289                     "Recognize loop idioms", false, false)
290 
291 Pass *llvm::createLoopIdiomPass() { return new LoopIdiomRecognizeLegacyPass(); }
292 
293 static void deleteDeadInstruction(Instruction *I) {
294   I->replaceAllUsesWith(UndefValue::get(I->getType()));
295   I->eraseFromParent();
296 }
297 
298 //===----------------------------------------------------------------------===//
299 //
300 //          Implementation of LoopIdiomRecognize
301 //
302 //===----------------------------------------------------------------------===//
303 
304 bool LoopIdiomRecognize::runOnLoop(Loop *L) {
305   CurLoop = L;
306   // If the loop could not be converted to canonical form, it must have an
307   // indirectbr in it, just give up.
308   if (!L->getLoopPreheader())
309     return false;
310 
311   // Disable loop idiom recognition if the function's name is a common idiom.
312   StringRef Name = L->getHeader()->getParent()->getName();
313   if (Name == "memset" || Name == "memcpy")
314     return false;
315 
316   // Determine if code size heuristics need to be applied.
317   ApplyCodeSizeHeuristics =
318       L->getHeader()->getParent()->hasOptSize() && UseLIRCodeSizeHeurs;
319 
320   HasMemset = TLI->has(LibFunc_memset);
321   HasMemsetPattern = TLI->has(LibFunc_memset_pattern16);
322   HasMemcpy = TLI->has(LibFunc_memcpy);
323 
324   if (HasMemset || HasMemsetPattern || HasMemcpy)
325     if (SE->hasLoopInvariantBackedgeTakenCount(L))
326       return runOnCountableLoop();
327 
328   return runOnNoncountableLoop();
329 }
330 
331 bool LoopIdiomRecognize::runOnCountableLoop() {
332   const SCEV *BECount = SE->getBackedgeTakenCount(CurLoop);
333   assert(!isa<SCEVCouldNotCompute>(BECount) &&
334          "runOnCountableLoop() called on a loop without a predictable"
335          "backedge-taken count");
336 
337   // If this loop executes exactly one time, then it should be peeled, not
338   // optimized by this pass.
339   if (const SCEVConstant *BECst = dyn_cast<SCEVConstant>(BECount))
340     if (BECst->getAPInt() == 0)
341       return false;
342 
343   SmallVector<BasicBlock *, 8> ExitBlocks;
344   CurLoop->getUniqueExitBlocks(ExitBlocks);
345 
346   LLVM_DEBUG(dbgs() << DEBUG_TYPE " Scanning: F["
347                     << CurLoop->getHeader()->getParent()->getName()
348                     << "] Countable Loop %" << CurLoop->getHeader()->getName()
349                     << "\n");
350 
351   // The following transforms hoist stores/memsets into the loop pre-header.
352   // Give up if the loop has instructions that may throw.
353   SimpleLoopSafetyInfo SafetyInfo;
354   SafetyInfo.computeLoopSafetyInfo(CurLoop);
355   if (SafetyInfo.anyBlockMayThrow())
356     return false;
357 
358   bool MadeChange = false;
359 
360   // Scan all the blocks in the loop that are not in subloops.
361   for (auto *BB : CurLoop->getBlocks()) {
362     // Ignore blocks in subloops.
363     if (LI->getLoopFor(BB) != CurLoop)
364       continue;
365 
366     MadeChange |= runOnLoopBlock(BB, BECount, ExitBlocks);
367   }
368   return MadeChange;
369 }
370 
371 static APInt getStoreStride(const SCEVAddRecExpr *StoreEv) {
372   const SCEVConstant *ConstStride = cast<SCEVConstant>(StoreEv->getOperand(1));
373   return ConstStride->getAPInt();
374 }
375 
376 /// getMemSetPatternValue - If a strided store of the specified value is safe to
377 /// turn into a memset_pattern16, return a ConstantArray of 16 bytes that should
378 /// be passed in.  Otherwise, return null.
379 ///
380 /// Note that we don't ever attempt to use memset_pattern8 or 4, because these
381 /// just replicate their input array and then pass on to memset_pattern16.
382 static Constant *getMemSetPatternValue(Value *V, const DataLayout *DL) {
383   // FIXME: This could check for UndefValue because it can be merged into any
384   // other valid pattern.
385 
386   // If the value isn't a constant, we can't promote it to being in a constant
387   // array.  We could theoretically do a store to an alloca or something, but
388   // that doesn't seem worthwhile.
389   Constant *C = dyn_cast<Constant>(V);
390   if (!C)
391     return nullptr;
392 
393   // Only handle simple values that are a power of two bytes in size.
394   uint64_t Size = DL->getTypeSizeInBits(V->getType());
395   if (Size == 0 || (Size & 7) || (Size & (Size - 1)))
396     return nullptr;
397 
398   // Don't care enough about darwin/ppc to implement this.
399   if (DL->isBigEndian())
400     return nullptr;
401 
402   // Convert to size in bytes.
403   Size /= 8;
404 
405   // TODO: If CI is larger than 16-bytes, we can try slicing it in half to see
406   // if the top and bottom are the same (e.g. for vectors and large integers).
407   if (Size > 16)
408     return nullptr;
409 
410   // If the constant is exactly 16 bytes, just use it.
411   if (Size == 16)
412     return C;
413 
414   // Otherwise, we'll use an array of the constants.
415   unsigned ArraySize = 16 / Size;
416   ArrayType *AT = ArrayType::get(V->getType(), ArraySize);
417   return ConstantArray::get(AT, std::vector<Constant *>(ArraySize, C));
418 }
419 
420 LoopIdiomRecognize::LegalStoreKind
421 LoopIdiomRecognize::isLegalStore(StoreInst *SI) {
422   // Don't touch volatile stores.
423   if (SI->isVolatile())
424     return LegalStoreKind::None;
425   // We only want simple or unordered-atomic stores.
426   if (!SI->isUnordered())
427     return LegalStoreKind::None;
428 
429   // Don't convert stores of non-integral pointer types to memsets (which stores
430   // integers).
431   if (DL->isNonIntegralPointerType(SI->getValueOperand()->getType()))
432     return LegalStoreKind::None;
433 
434   // Avoid merging nontemporal stores.
435   if (SI->getMetadata(LLVMContext::MD_nontemporal))
436     return LegalStoreKind::None;
437 
438   Value *StoredVal = SI->getValueOperand();
439   Value *StorePtr = SI->getPointerOperand();
440 
441   // Reject stores that are so large that they overflow an unsigned.
442   uint64_t SizeInBits = DL->getTypeSizeInBits(StoredVal->getType());
443   if ((SizeInBits & 7) || (SizeInBits >> 32) != 0)
444     return LegalStoreKind::None;
445 
446   // See if the pointer expression is an AddRec like {base,+,1} on the current
447   // loop, which indicates a strided store.  If we have something else, it's a
448   // random store we can't handle.
449   const SCEVAddRecExpr *StoreEv =
450       dyn_cast<SCEVAddRecExpr>(SE->getSCEV(StorePtr));
451   if (!StoreEv || StoreEv->getLoop() != CurLoop || !StoreEv->isAffine())
452     return LegalStoreKind::None;
453 
454   // Check to see if we have a constant stride.
455   if (!isa<SCEVConstant>(StoreEv->getOperand(1)))
456     return LegalStoreKind::None;
457 
458   // See if the store can be turned into a memset.
459 
460   // If the stored value is a byte-wise value (like i32 -1), then it may be
461   // turned into a memset of i8 -1, assuming that all the consecutive bytes
462   // are stored.  A store of i32 0x01020304 can never be turned into a memset,
463   // but it can be turned into memset_pattern if the target supports it.
464   Value *SplatValue = isBytewiseValue(StoredVal, *DL);
465   Constant *PatternValue = nullptr;
466 
467   // Note: memset and memset_pattern on unordered-atomic is yet not supported
468   bool UnorderedAtomic = SI->isUnordered() && !SI->isSimple();
469 
470   // If we're allowed to form a memset, and the stored value would be
471   // acceptable for memset, use it.
472   if (!UnorderedAtomic && HasMemset && SplatValue &&
473       // Verify that the stored value is loop invariant.  If not, we can't
474       // promote the memset.
475       CurLoop->isLoopInvariant(SplatValue)) {
476     // It looks like we can use SplatValue.
477     return LegalStoreKind::Memset;
478   } else if (!UnorderedAtomic && HasMemsetPattern &&
479              // Don't create memset_pattern16s with address spaces.
480              StorePtr->getType()->getPointerAddressSpace() == 0 &&
481              (PatternValue = getMemSetPatternValue(StoredVal, DL))) {
482     // It looks like we can use PatternValue!
483     return LegalStoreKind::MemsetPattern;
484   }
485 
486   // Otherwise, see if the store can be turned into a memcpy.
487   if (HasMemcpy) {
488     // Check to see if the stride matches the size of the store.  If so, then we
489     // know that every byte is touched in the loop.
490     APInt Stride = getStoreStride(StoreEv);
491     unsigned StoreSize = DL->getTypeStoreSize(SI->getValueOperand()->getType());
492     if (StoreSize != Stride && StoreSize != -Stride)
493       return LegalStoreKind::None;
494 
495     // The store must be feeding a non-volatile load.
496     LoadInst *LI = dyn_cast<LoadInst>(SI->getValueOperand());
497 
498     // Only allow non-volatile loads
499     if (!LI || LI->isVolatile())
500       return LegalStoreKind::None;
501     // Only allow simple or unordered-atomic loads
502     if (!LI->isUnordered())
503       return LegalStoreKind::None;
504 
505     // See if the pointer expression is an AddRec like {base,+,1} on the current
506     // loop, which indicates a strided load.  If we have something else, it's a
507     // random load we can't handle.
508     const SCEVAddRecExpr *LoadEv =
509         dyn_cast<SCEVAddRecExpr>(SE->getSCEV(LI->getPointerOperand()));
510     if (!LoadEv || LoadEv->getLoop() != CurLoop || !LoadEv->isAffine())
511       return LegalStoreKind::None;
512 
513     // The store and load must share the same stride.
514     if (StoreEv->getOperand(1) != LoadEv->getOperand(1))
515       return LegalStoreKind::None;
516 
517     // Success.  This store can be converted into a memcpy.
518     UnorderedAtomic = UnorderedAtomic || LI->isAtomic();
519     return UnorderedAtomic ? LegalStoreKind::UnorderedAtomicMemcpy
520                            : LegalStoreKind::Memcpy;
521   }
522   // This store can't be transformed into a memset/memcpy.
523   return LegalStoreKind::None;
524 }
525 
526 void LoopIdiomRecognize::collectStores(BasicBlock *BB) {
527   StoreRefsForMemset.clear();
528   StoreRefsForMemsetPattern.clear();
529   StoreRefsForMemcpy.clear();
530   for (Instruction &I : *BB) {
531     StoreInst *SI = dyn_cast<StoreInst>(&I);
532     if (!SI)
533       continue;
534 
535     // Make sure this is a strided store with a constant stride.
536     switch (isLegalStore(SI)) {
537     case LegalStoreKind::None:
538       // Nothing to do
539       break;
540     case LegalStoreKind::Memset: {
541       // Find the base pointer.
542       Value *Ptr = getUnderlyingObject(SI->getPointerOperand());
543       StoreRefsForMemset[Ptr].push_back(SI);
544     } break;
545     case LegalStoreKind::MemsetPattern: {
546       // Find the base pointer.
547       Value *Ptr = getUnderlyingObject(SI->getPointerOperand());
548       StoreRefsForMemsetPattern[Ptr].push_back(SI);
549     } break;
550     case LegalStoreKind::Memcpy:
551     case LegalStoreKind::UnorderedAtomicMemcpy:
552       StoreRefsForMemcpy.push_back(SI);
553       break;
554     default:
555       assert(false && "unhandled return value");
556       break;
557     }
558   }
559 }
560 
561 /// runOnLoopBlock - Process the specified block, which lives in a counted loop
562 /// with the specified backedge count.  This block is known to be in the current
563 /// loop and not in any subloops.
564 bool LoopIdiomRecognize::runOnLoopBlock(
565     BasicBlock *BB, const SCEV *BECount,
566     SmallVectorImpl<BasicBlock *> &ExitBlocks) {
567   // We can only promote stores in this block if they are unconditionally
568   // executed in the loop.  For a block to be unconditionally executed, it has
569   // to dominate all the exit blocks of the loop.  Verify this now.
570   for (unsigned i = 0, e = ExitBlocks.size(); i != e; ++i)
571     if (!DT->dominates(BB, ExitBlocks[i]))
572       return false;
573 
574   bool MadeChange = false;
575   // Look for store instructions, which may be optimized to memset/memcpy.
576   collectStores(BB);
577 
578   // Look for a single store or sets of stores with a common base, which can be
579   // optimized into a memset (memset_pattern).  The latter most commonly happens
580   // with structs and handunrolled loops.
581   for (auto &SL : StoreRefsForMemset)
582     MadeChange |= processLoopStores(SL.second, BECount, ForMemset::Yes);
583 
584   for (auto &SL : StoreRefsForMemsetPattern)
585     MadeChange |= processLoopStores(SL.second, BECount, ForMemset::No);
586 
587   // Optimize the store into a memcpy, if it feeds an similarly strided load.
588   for (auto &SI : StoreRefsForMemcpy)
589     MadeChange |= processLoopStoreOfLoopLoad(SI, BECount);
590 
591   for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) {
592     Instruction *Inst = &*I++;
593     // Look for memset instructions, which may be optimized to a larger memset.
594     if (MemSetInst *MSI = dyn_cast<MemSetInst>(Inst)) {
595       WeakTrackingVH InstPtr(&*I);
596       if (!processLoopMemSet(MSI, BECount))
597         continue;
598       MadeChange = true;
599 
600       // If processing the memset invalidated our iterator, start over from the
601       // top of the block.
602       if (!InstPtr)
603         I = BB->begin();
604       continue;
605     }
606   }
607 
608   return MadeChange;
609 }
610 
611 /// See if this store(s) can be promoted to a memset.
612 bool LoopIdiomRecognize::processLoopStores(SmallVectorImpl<StoreInst *> &SL,
613                                            const SCEV *BECount, ForMemset For) {
614   // Try to find consecutive stores that can be transformed into memsets.
615   SetVector<StoreInst *> Heads, Tails;
616   SmallDenseMap<StoreInst *, StoreInst *> ConsecutiveChain;
617 
618   // Do a quadratic search on all of the given stores and find
619   // all of the pairs of stores that follow each other.
620   SmallVector<unsigned, 16> IndexQueue;
621   for (unsigned i = 0, e = SL.size(); i < e; ++i) {
622     assert(SL[i]->isSimple() && "Expected only non-volatile stores.");
623 
624     Value *FirstStoredVal = SL[i]->getValueOperand();
625     Value *FirstStorePtr = SL[i]->getPointerOperand();
626     const SCEVAddRecExpr *FirstStoreEv =
627         cast<SCEVAddRecExpr>(SE->getSCEV(FirstStorePtr));
628     APInt FirstStride = getStoreStride(FirstStoreEv);
629     unsigned FirstStoreSize = DL->getTypeStoreSize(SL[i]->getValueOperand()->getType());
630 
631     // See if we can optimize just this store in isolation.
632     if (FirstStride == FirstStoreSize || -FirstStride == FirstStoreSize) {
633       Heads.insert(SL[i]);
634       continue;
635     }
636 
637     Value *FirstSplatValue = nullptr;
638     Constant *FirstPatternValue = nullptr;
639 
640     if (For == ForMemset::Yes)
641       FirstSplatValue = isBytewiseValue(FirstStoredVal, *DL);
642     else
643       FirstPatternValue = getMemSetPatternValue(FirstStoredVal, DL);
644 
645     assert((FirstSplatValue || FirstPatternValue) &&
646            "Expected either splat value or pattern value.");
647 
648     IndexQueue.clear();
649     // If a store has multiple consecutive store candidates, search Stores
650     // array according to the sequence: from i+1 to e, then from i-1 to 0.
651     // This is because usually pairing with immediate succeeding or preceding
652     // candidate create the best chance to find memset opportunity.
653     unsigned j = 0;
654     for (j = i + 1; j < e; ++j)
655       IndexQueue.push_back(j);
656     for (j = i; j > 0; --j)
657       IndexQueue.push_back(j - 1);
658 
659     for (auto &k : IndexQueue) {
660       assert(SL[k]->isSimple() && "Expected only non-volatile stores.");
661       Value *SecondStorePtr = SL[k]->getPointerOperand();
662       const SCEVAddRecExpr *SecondStoreEv =
663           cast<SCEVAddRecExpr>(SE->getSCEV(SecondStorePtr));
664       APInt SecondStride = getStoreStride(SecondStoreEv);
665 
666       if (FirstStride != SecondStride)
667         continue;
668 
669       Value *SecondStoredVal = SL[k]->getValueOperand();
670       Value *SecondSplatValue = nullptr;
671       Constant *SecondPatternValue = nullptr;
672 
673       if (For == ForMemset::Yes)
674         SecondSplatValue = isBytewiseValue(SecondStoredVal, *DL);
675       else
676         SecondPatternValue = getMemSetPatternValue(SecondStoredVal, DL);
677 
678       assert((SecondSplatValue || SecondPatternValue) &&
679              "Expected either splat value or pattern value.");
680 
681       if (isConsecutiveAccess(SL[i], SL[k], *DL, *SE, false)) {
682         if (For == ForMemset::Yes) {
683           if (isa<UndefValue>(FirstSplatValue))
684             FirstSplatValue = SecondSplatValue;
685           if (FirstSplatValue != SecondSplatValue)
686             continue;
687         } else {
688           if (isa<UndefValue>(FirstPatternValue))
689             FirstPatternValue = SecondPatternValue;
690           if (FirstPatternValue != SecondPatternValue)
691             continue;
692         }
693         Tails.insert(SL[k]);
694         Heads.insert(SL[i]);
695         ConsecutiveChain[SL[i]] = SL[k];
696         break;
697       }
698     }
699   }
700 
701   // We may run into multiple chains that merge into a single chain. We mark the
702   // stores that we transformed so that we don't visit the same store twice.
703   SmallPtrSet<Value *, 16> TransformedStores;
704   bool Changed = false;
705 
706   // For stores that start but don't end a link in the chain:
707   for (SetVector<StoreInst *>::iterator it = Heads.begin(), e = Heads.end();
708        it != e; ++it) {
709     if (Tails.count(*it))
710       continue;
711 
712     // We found a store instr that starts a chain. Now follow the chain and try
713     // to transform it.
714     SmallPtrSet<Instruction *, 8> AdjacentStores;
715     StoreInst *I = *it;
716 
717     StoreInst *HeadStore = I;
718     unsigned StoreSize = 0;
719 
720     // Collect the chain into a list.
721     while (Tails.count(I) || Heads.count(I)) {
722       if (TransformedStores.count(I))
723         break;
724       AdjacentStores.insert(I);
725 
726       StoreSize += DL->getTypeStoreSize(I->getValueOperand()->getType());
727       // Move to the next value in the chain.
728       I = ConsecutiveChain[I];
729     }
730 
731     Value *StoredVal = HeadStore->getValueOperand();
732     Value *StorePtr = HeadStore->getPointerOperand();
733     const SCEVAddRecExpr *StoreEv = cast<SCEVAddRecExpr>(SE->getSCEV(StorePtr));
734     APInt Stride = getStoreStride(StoreEv);
735 
736     // Check to see if the stride matches the size of the stores.  If so, then
737     // we know that every byte is touched in the loop.
738     if (StoreSize != Stride && StoreSize != -Stride)
739       continue;
740 
741     bool NegStride = StoreSize == -Stride;
742 
743     if (processLoopStridedStore(StorePtr, StoreSize,
744                                 MaybeAlign(HeadStore->getAlignment()),
745                                 StoredVal, HeadStore, AdjacentStores, StoreEv,
746                                 BECount, NegStride)) {
747       TransformedStores.insert(AdjacentStores.begin(), AdjacentStores.end());
748       Changed = true;
749     }
750   }
751 
752   return Changed;
753 }
754 
755 /// processLoopMemSet - See if this memset can be promoted to a large memset.
756 bool LoopIdiomRecognize::processLoopMemSet(MemSetInst *MSI,
757                                            const SCEV *BECount) {
758   // We can only handle non-volatile memsets with a constant size.
759   if (MSI->isVolatile() || !isa<ConstantInt>(MSI->getLength()))
760     return false;
761 
762   // If we're not allowed to hack on memset, we fail.
763   if (!HasMemset)
764     return false;
765 
766   Value *Pointer = MSI->getDest();
767 
768   // See if the pointer expression is an AddRec like {base,+,1} on the current
769   // loop, which indicates a strided store.  If we have something else, it's a
770   // random store we can't handle.
771   const SCEVAddRecExpr *Ev = dyn_cast<SCEVAddRecExpr>(SE->getSCEV(Pointer));
772   if (!Ev || Ev->getLoop() != CurLoop || !Ev->isAffine())
773     return false;
774 
775   // Reject memsets that are so large that they overflow an unsigned.
776   uint64_t SizeInBytes = cast<ConstantInt>(MSI->getLength())->getZExtValue();
777   if ((SizeInBytes >> 32) != 0)
778     return false;
779 
780   // Check to see if the stride matches the size of the memset.  If so, then we
781   // know that every byte is touched in the loop.
782   const SCEVConstant *ConstStride = dyn_cast<SCEVConstant>(Ev->getOperand(1));
783   if (!ConstStride)
784     return false;
785 
786   APInt Stride = ConstStride->getAPInt();
787   if (SizeInBytes != Stride && SizeInBytes != -Stride)
788     return false;
789 
790   // Verify that the memset value is loop invariant.  If not, we can't promote
791   // the memset.
792   Value *SplatValue = MSI->getValue();
793   if (!SplatValue || !CurLoop->isLoopInvariant(SplatValue))
794     return false;
795 
796   SmallPtrSet<Instruction *, 1> MSIs;
797   MSIs.insert(MSI);
798   bool NegStride = SizeInBytes == -Stride;
799   return processLoopStridedStore(
800       Pointer, (unsigned)SizeInBytes, MaybeAlign(MSI->getDestAlignment()),
801       SplatValue, MSI, MSIs, Ev, BECount, NegStride, /*IsLoopMemset=*/true);
802 }
803 
804 /// mayLoopAccessLocation - Return true if the specified loop might access the
805 /// specified pointer location, which is a loop-strided access.  The 'Access'
806 /// argument specifies what the verboten forms of access are (read or write).
807 static bool
808 mayLoopAccessLocation(Value *Ptr, ModRefInfo Access, Loop *L,
809                       const SCEV *BECount, unsigned StoreSize,
810                       AliasAnalysis &AA,
811                       SmallPtrSetImpl<Instruction *> &IgnoredStores) {
812   // Get the location that may be stored across the loop.  Since the access is
813   // strided positively through memory, we say that the modified location starts
814   // at the pointer and has infinite size.
815   LocationSize AccessSize = LocationSize::unknown();
816 
817   // If the loop iterates a fixed number of times, we can refine the access size
818   // to be exactly the size of the memset, which is (BECount+1)*StoreSize
819   if (const SCEVConstant *BECst = dyn_cast<SCEVConstant>(BECount))
820     AccessSize = LocationSize::precise((BECst->getValue()->getZExtValue() + 1) *
821                                        StoreSize);
822 
823   // TODO: For this to be really effective, we have to dive into the pointer
824   // operand in the store.  Store to &A[i] of 100 will always return may alias
825   // with store of &A[100], we need to StoreLoc to be "A" with size of 100,
826   // which will then no-alias a store to &A[100].
827   MemoryLocation StoreLoc(Ptr, AccessSize);
828 
829   for (Loop::block_iterator BI = L->block_begin(), E = L->block_end(); BI != E;
830        ++BI)
831     for (Instruction &I : **BI)
832       if (IgnoredStores.count(&I) == 0 &&
833           isModOrRefSet(
834               intersectModRef(AA.getModRefInfo(&I, StoreLoc), Access)))
835         return true;
836 
837   return false;
838 }
839 
840 // If we have a negative stride, Start refers to the end of the memory location
841 // we're trying to memset.  Therefore, we need to recompute the base pointer,
842 // which is just Start - BECount*Size.
843 static const SCEV *getStartForNegStride(const SCEV *Start, const SCEV *BECount,
844                                         Type *IntPtr, unsigned StoreSize,
845                                         ScalarEvolution *SE) {
846   const SCEV *Index = SE->getTruncateOrZeroExtend(BECount, IntPtr);
847   if (StoreSize != 1)
848     Index = SE->getMulExpr(Index, SE->getConstant(IntPtr, StoreSize),
849                            SCEV::FlagNUW);
850   return SE->getMinusSCEV(Start, Index);
851 }
852 
853 /// Compute the number of bytes as a SCEV from the backedge taken count.
854 ///
855 /// This also maps the SCEV into the provided type and tries to handle the
856 /// computation in a way that will fold cleanly.
857 static const SCEV *getNumBytes(const SCEV *BECount, Type *IntPtr,
858                                unsigned StoreSize, Loop *CurLoop,
859                                const DataLayout *DL, ScalarEvolution *SE) {
860   const SCEV *NumBytesS;
861   // The # stored bytes is (BECount+1)*Size.  Expand the trip count out to
862   // pointer size if it isn't already.
863   //
864   // If we're going to need to zero extend the BE count, check if we can add
865   // one to it prior to zero extending without overflow. Provided this is safe,
866   // it allows better simplification of the +1.
867   if (DL->getTypeSizeInBits(BECount->getType()) <
868           DL->getTypeSizeInBits(IntPtr) &&
869       SE->isLoopEntryGuardedByCond(
870           CurLoop, ICmpInst::ICMP_NE, BECount,
871           SE->getNegativeSCEV(SE->getOne(BECount->getType())))) {
872     NumBytesS = SE->getZeroExtendExpr(
873         SE->getAddExpr(BECount, SE->getOne(BECount->getType()), SCEV::FlagNUW),
874         IntPtr);
875   } else {
876     NumBytesS = SE->getAddExpr(SE->getTruncateOrZeroExtend(BECount, IntPtr),
877                                SE->getOne(IntPtr), SCEV::FlagNUW);
878   }
879 
880   // And scale it based on the store size.
881   if (StoreSize != 1) {
882     NumBytesS = SE->getMulExpr(NumBytesS, SE->getConstant(IntPtr, StoreSize),
883                                SCEV::FlagNUW);
884   }
885   return NumBytesS;
886 }
887 
888 /// processLoopStridedStore - We see a strided store of some value.  If we can
889 /// transform this into a memset or memset_pattern in the loop preheader, do so.
890 bool LoopIdiomRecognize::processLoopStridedStore(
891     Value *DestPtr, unsigned StoreSize, MaybeAlign StoreAlignment,
892     Value *StoredVal, Instruction *TheStore,
893     SmallPtrSetImpl<Instruction *> &Stores, const SCEVAddRecExpr *Ev,
894     const SCEV *BECount, bool NegStride, bool IsLoopMemset) {
895   Value *SplatValue = isBytewiseValue(StoredVal, *DL);
896   Constant *PatternValue = nullptr;
897 
898   if (!SplatValue)
899     PatternValue = getMemSetPatternValue(StoredVal, DL);
900 
901   assert((SplatValue || PatternValue) &&
902          "Expected either splat value or pattern value.");
903 
904   // The trip count of the loop and the base pointer of the addrec SCEV is
905   // guaranteed to be loop invariant, which means that it should dominate the
906   // header.  This allows us to insert code for it in the preheader.
907   unsigned DestAS = DestPtr->getType()->getPointerAddressSpace();
908   BasicBlock *Preheader = CurLoop->getLoopPreheader();
909   IRBuilder<> Builder(Preheader->getTerminator());
910   SCEVExpander Expander(*SE, *DL, "loop-idiom");
911   SCEVExpanderCleaner ExpCleaner(Expander, *DT);
912 
913   Type *DestInt8PtrTy = Builder.getInt8PtrTy(DestAS);
914   Type *IntIdxTy = DL->getIndexType(DestPtr->getType());
915 
916   bool Changed = false;
917   const SCEV *Start = Ev->getStart();
918   // Handle negative strided loops.
919   if (NegStride)
920     Start = getStartForNegStride(Start, BECount, IntIdxTy, StoreSize, SE);
921 
922   // TODO: ideally we should still be able to generate memset if SCEV expander
923   // is taught to generate the dependencies at the latest point.
924   if (!isSafeToExpand(Start, *SE))
925     return Changed;
926 
927   // Okay, we have a strided store "p[i]" of a splattable value.  We can turn
928   // this into a memset in the loop preheader now if we want.  However, this
929   // would be unsafe to do if there is anything else in the loop that may read
930   // or write to the aliased location.  Check for any overlap by generating the
931   // base pointer and checking the region.
932   Value *BasePtr =
933       Expander.expandCodeFor(Start, DestInt8PtrTy, Preheader->getTerminator());
934 
935   // From here on out, conservatively report to the pass manager that we've
936   // changed the IR, even if we later clean up these added instructions. There
937   // may be structural differences e.g. in the order of use lists not accounted
938   // for in just a textual dump of the IR. This is written as a variable, even
939   // though statically all the places this dominates could be replaced with
940   // 'true', with the hope that anyone trying to be clever / "more precise" with
941   // the return value will read this comment, and leave them alone.
942   Changed = true;
943 
944   if (mayLoopAccessLocation(BasePtr, ModRefInfo::ModRef, CurLoop, BECount,
945                             StoreSize, *AA, Stores))
946     return Changed;
947 
948   if (avoidLIRForMultiBlockLoop(/*IsMemset=*/true, IsLoopMemset))
949     return Changed;
950 
951   // Okay, everything looks good, insert the memset.
952 
953   const SCEV *NumBytesS =
954       getNumBytes(BECount, IntIdxTy, StoreSize, CurLoop, DL, SE);
955 
956   // TODO: ideally we should still be able to generate memset if SCEV expander
957   // is taught to generate the dependencies at the latest point.
958   if (!isSafeToExpand(NumBytesS, *SE))
959     return Changed;
960 
961   Value *NumBytes =
962       Expander.expandCodeFor(NumBytesS, IntIdxTy, Preheader->getTerminator());
963 
964   CallInst *NewCall;
965   if (SplatValue) {
966     NewCall = Builder.CreateMemSet(BasePtr, SplatValue, NumBytes,
967                                    MaybeAlign(StoreAlignment));
968   } else {
969     // Everything is emitted in default address space
970     Type *Int8PtrTy = DestInt8PtrTy;
971 
972     Module *M = TheStore->getModule();
973     StringRef FuncName = "memset_pattern16";
974     FunctionCallee MSP = M->getOrInsertFunction(FuncName, Builder.getVoidTy(),
975                                                 Int8PtrTy, Int8PtrTy, IntIdxTy);
976     inferLibFuncAttributes(M, FuncName, *TLI);
977 
978     // Otherwise we should form a memset_pattern16.  PatternValue is known to be
979     // an constant array of 16-bytes.  Plop the value into a mergable global.
980     GlobalVariable *GV = new GlobalVariable(*M, PatternValue->getType(), true,
981                                             GlobalValue::PrivateLinkage,
982                                             PatternValue, ".memset_pattern");
983     GV->setUnnamedAddr(GlobalValue::UnnamedAddr::Global); // Ok to merge these.
984     GV->setAlignment(Align(16));
985     Value *PatternPtr = ConstantExpr::getBitCast(GV, Int8PtrTy);
986     NewCall = Builder.CreateCall(MSP, {BasePtr, PatternPtr, NumBytes});
987   }
988   NewCall->setDebugLoc(TheStore->getDebugLoc());
989 
990   if (MSSAU) {
991     MemoryAccess *NewMemAcc = MSSAU->createMemoryAccessInBB(
992         NewCall, nullptr, NewCall->getParent(), MemorySSA::BeforeTerminator);
993     MSSAU->insertDef(cast<MemoryDef>(NewMemAcc), true);
994   }
995 
996   LLVM_DEBUG(dbgs() << "  Formed memset: " << *NewCall << "\n"
997                     << "    from store to: " << *Ev << " at: " << *TheStore
998                     << "\n");
999 
1000   ORE.emit([&]() {
1001     return OptimizationRemark(DEBUG_TYPE, "ProcessLoopStridedStore",
1002                               NewCall->getDebugLoc(), Preheader)
1003            << "Transformed loop-strided store into a call to "
1004            << ore::NV("NewFunction", NewCall->getCalledFunction())
1005            << "() function";
1006   });
1007 
1008   // Okay, the memset has been formed.  Zap the original store and anything that
1009   // feeds into it.
1010   for (auto *I : Stores) {
1011     if (MSSAU)
1012       MSSAU->removeMemoryAccess(I, true);
1013     deleteDeadInstruction(I);
1014   }
1015   if (MSSAU && VerifyMemorySSA)
1016     MSSAU->getMemorySSA()->verifyMemorySSA();
1017   ++NumMemSet;
1018   ExpCleaner.markResultUsed();
1019   return true;
1020 }
1021 
1022 /// If the stored value is a strided load in the same loop with the same stride
1023 /// this may be transformable into a memcpy.  This kicks in for stuff like
1024 /// for (i) A[i] = B[i];
1025 bool LoopIdiomRecognize::processLoopStoreOfLoopLoad(StoreInst *SI,
1026                                                     const SCEV *BECount) {
1027   assert(SI->isUnordered() && "Expected only non-volatile non-ordered stores.");
1028 
1029   Value *StorePtr = SI->getPointerOperand();
1030   const SCEVAddRecExpr *StoreEv = cast<SCEVAddRecExpr>(SE->getSCEV(StorePtr));
1031   APInt Stride = getStoreStride(StoreEv);
1032   unsigned StoreSize = DL->getTypeStoreSize(SI->getValueOperand()->getType());
1033   bool NegStride = StoreSize == -Stride;
1034 
1035   // The store must be feeding a non-volatile load.
1036   LoadInst *LI = cast<LoadInst>(SI->getValueOperand());
1037   assert(LI->isUnordered() && "Expected only non-volatile non-ordered loads.");
1038 
1039   // See if the pointer expression is an AddRec like {base,+,1} on the current
1040   // loop, which indicates a strided load.  If we have something else, it's a
1041   // random load we can't handle.
1042   const SCEVAddRecExpr *LoadEv =
1043       cast<SCEVAddRecExpr>(SE->getSCEV(LI->getPointerOperand()));
1044 
1045   // The trip count of the loop and the base pointer of the addrec SCEV is
1046   // guaranteed to be loop invariant, which means that it should dominate the
1047   // header.  This allows us to insert code for it in the preheader.
1048   BasicBlock *Preheader = CurLoop->getLoopPreheader();
1049   IRBuilder<> Builder(Preheader->getTerminator());
1050   SCEVExpander Expander(*SE, *DL, "loop-idiom");
1051 
1052   SCEVExpanderCleaner ExpCleaner(Expander, *DT);
1053 
1054   bool Changed = false;
1055   const SCEV *StrStart = StoreEv->getStart();
1056   unsigned StrAS = SI->getPointerAddressSpace();
1057   Type *IntIdxTy = Builder.getIntNTy(DL->getIndexSizeInBits(StrAS));
1058 
1059   // Handle negative strided loops.
1060   if (NegStride)
1061     StrStart = getStartForNegStride(StrStart, BECount, IntIdxTy, StoreSize, SE);
1062 
1063   // Okay, we have a strided store "p[i]" of a loaded value.  We can turn
1064   // this into a memcpy in the loop preheader now if we want.  However, this
1065   // would be unsafe to do if there is anything else in the loop that may read
1066   // or write the memory region we're storing to.  This includes the load that
1067   // feeds the stores.  Check for an alias by generating the base address and
1068   // checking everything.
1069   Value *StoreBasePtr = Expander.expandCodeFor(
1070       StrStart, Builder.getInt8PtrTy(StrAS), Preheader->getTerminator());
1071 
1072   // From here on out, conservatively report to the pass manager that we've
1073   // changed the IR, even if we later clean up these added instructions. There
1074   // may be structural differences e.g. in the order of use lists not accounted
1075   // for in just a textual dump of the IR. This is written as a variable, even
1076   // though statically all the places this dominates could be replaced with
1077   // 'true', with the hope that anyone trying to be clever / "more precise" with
1078   // the return value will read this comment, and leave them alone.
1079   Changed = true;
1080 
1081   SmallPtrSet<Instruction *, 1> Stores;
1082   Stores.insert(SI);
1083   if (mayLoopAccessLocation(StoreBasePtr, ModRefInfo::ModRef, CurLoop, BECount,
1084                             StoreSize, *AA, Stores))
1085     return Changed;
1086 
1087   const SCEV *LdStart = LoadEv->getStart();
1088   unsigned LdAS = LI->getPointerAddressSpace();
1089 
1090   // Handle negative strided loops.
1091   if (NegStride)
1092     LdStart = getStartForNegStride(LdStart, BECount, IntIdxTy, StoreSize, SE);
1093 
1094   // For a memcpy, we have to make sure that the input array is not being
1095   // mutated by the loop.
1096   Value *LoadBasePtr = Expander.expandCodeFor(
1097       LdStart, Builder.getInt8PtrTy(LdAS), Preheader->getTerminator());
1098 
1099   if (mayLoopAccessLocation(LoadBasePtr, ModRefInfo::Mod, CurLoop, BECount,
1100                             StoreSize, *AA, Stores))
1101     return Changed;
1102 
1103   if (avoidLIRForMultiBlockLoop())
1104     return Changed;
1105 
1106   // Okay, everything is safe, we can transform this!
1107 
1108   const SCEV *NumBytesS =
1109       getNumBytes(BECount, IntIdxTy, StoreSize, CurLoop, DL, SE);
1110 
1111   Value *NumBytes =
1112       Expander.expandCodeFor(NumBytesS, IntIdxTy, Preheader->getTerminator());
1113 
1114   CallInst *NewCall = nullptr;
1115   // Check whether to generate an unordered atomic memcpy:
1116   //  If the load or store are atomic, then they must necessarily be unordered
1117   //  by previous checks.
1118   if (!SI->isAtomic() && !LI->isAtomic())
1119     NewCall = Builder.CreateMemCpy(StoreBasePtr, SI->getAlign(), LoadBasePtr,
1120                                    LI->getAlign(), NumBytes);
1121   else {
1122     // We cannot allow unaligned ops for unordered load/store, so reject
1123     // anything where the alignment isn't at least the element size.
1124     const Align StoreAlign = SI->getAlign();
1125     const Align LoadAlign = LI->getAlign();
1126     if (StoreAlign < StoreSize || LoadAlign < StoreSize)
1127       return Changed;
1128 
1129     // If the element.atomic memcpy is not lowered into explicit
1130     // loads/stores later, then it will be lowered into an element-size
1131     // specific lib call. If the lib call doesn't exist for our store size, then
1132     // we shouldn't generate the memcpy.
1133     if (StoreSize > TTI->getAtomicMemIntrinsicMaxElementSize())
1134       return Changed;
1135 
1136     // Create the call.
1137     // Note that unordered atomic loads/stores are *required* by the spec to
1138     // have an alignment but non-atomic loads/stores may not.
1139     NewCall = Builder.CreateElementUnorderedAtomicMemCpy(
1140         StoreBasePtr, StoreAlign, LoadBasePtr, LoadAlign, NumBytes,
1141         StoreSize);
1142   }
1143   NewCall->setDebugLoc(SI->getDebugLoc());
1144 
1145   if (MSSAU) {
1146     MemoryAccess *NewMemAcc = MSSAU->createMemoryAccessInBB(
1147         NewCall, nullptr, NewCall->getParent(), MemorySSA::BeforeTerminator);
1148     MSSAU->insertDef(cast<MemoryDef>(NewMemAcc), true);
1149   }
1150 
1151   LLVM_DEBUG(dbgs() << "  Formed memcpy: " << *NewCall << "\n"
1152                     << "    from load ptr=" << *LoadEv << " at: " << *LI << "\n"
1153                     << "    from store ptr=" << *StoreEv << " at: " << *SI
1154                     << "\n");
1155 
1156   ORE.emit([&]() {
1157     return OptimizationRemark(DEBUG_TYPE, "ProcessLoopStoreOfLoopLoad",
1158                               NewCall->getDebugLoc(), Preheader)
1159            << "Formed a call to "
1160            << ore::NV("NewFunction", NewCall->getCalledFunction())
1161            << "() function";
1162   });
1163 
1164   // Okay, the memcpy has been formed.  Zap the original store and anything that
1165   // feeds into it.
1166   if (MSSAU)
1167     MSSAU->removeMemoryAccess(SI, true);
1168   deleteDeadInstruction(SI);
1169   if (MSSAU && VerifyMemorySSA)
1170     MSSAU->getMemorySSA()->verifyMemorySSA();
1171   ++NumMemCpy;
1172   ExpCleaner.markResultUsed();
1173   return true;
1174 }
1175 
1176 // When compiling for codesize we avoid idiom recognition for a multi-block loop
1177 // unless it is a loop_memset idiom or a memset/memcpy idiom in a nested loop.
1178 //
1179 bool LoopIdiomRecognize::avoidLIRForMultiBlockLoop(bool IsMemset,
1180                                                    bool IsLoopMemset) {
1181   if (ApplyCodeSizeHeuristics && CurLoop->getNumBlocks() > 1) {
1182     if (!CurLoop->getParentLoop() && (!IsMemset || !IsLoopMemset)) {
1183       LLVM_DEBUG(dbgs() << "  " << CurLoop->getHeader()->getParent()->getName()
1184                         << " : LIR " << (IsMemset ? "Memset" : "Memcpy")
1185                         << " avoided: multi-block top-level loop\n");
1186       return true;
1187     }
1188   }
1189 
1190   return false;
1191 }
1192 
1193 bool LoopIdiomRecognize::runOnNoncountableLoop() {
1194   LLVM_DEBUG(dbgs() << DEBUG_TYPE " Scanning: F["
1195                     << CurLoop->getHeader()->getParent()->getName()
1196                     << "] Noncountable Loop %"
1197                     << CurLoop->getHeader()->getName() << "\n");
1198 
1199   return recognizePopcount() || recognizeAndInsertFFS();
1200 }
1201 
1202 /// Check if the given conditional branch is based on the comparison between
1203 /// a variable and zero, and if the variable is non-zero or zero (JmpOnZero is
1204 /// true), the control yields to the loop entry. If the branch matches the
1205 /// behavior, the variable involved in the comparison is returned. This function
1206 /// will be called to see if the precondition and postcondition of the loop are
1207 /// in desirable form.
1208 static Value *matchCondition(BranchInst *BI, BasicBlock *LoopEntry,
1209                              bool JmpOnZero = false) {
1210   if (!BI || !BI->isConditional())
1211     return nullptr;
1212 
1213   ICmpInst *Cond = dyn_cast<ICmpInst>(BI->getCondition());
1214   if (!Cond)
1215     return nullptr;
1216 
1217   ConstantInt *CmpZero = dyn_cast<ConstantInt>(Cond->getOperand(1));
1218   if (!CmpZero || !CmpZero->isZero())
1219     return nullptr;
1220 
1221   BasicBlock *TrueSucc = BI->getSuccessor(0);
1222   BasicBlock *FalseSucc = BI->getSuccessor(1);
1223   if (JmpOnZero)
1224     std::swap(TrueSucc, FalseSucc);
1225 
1226   ICmpInst::Predicate Pred = Cond->getPredicate();
1227   if ((Pred == ICmpInst::ICMP_NE && TrueSucc == LoopEntry) ||
1228       (Pred == ICmpInst::ICMP_EQ && FalseSucc == LoopEntry))
1229     return Cond->getOperand(0);
1230 
1231   return nullptr;
1232 }
1233 
1234 // Check if the recurrence variable `VarX` is in the right form to create
1235 // the idiom. Returns the value coerced to a PHINode if so.
1236 static PHINode *getRecurrenceVar(Value *VarX, Instruction *DefX,
1237                                  BasicBlock *LoopEntry) {
1238   auto *PhiX = dyn_cast<PHINode>(VarX);
1239   if (PhiX && PhiX->getParent() == LoopEntry &&
1240       (PhiX->getOperand(0) == DefX || PhiX->getOperand(1) == DefX))
1241     return PhiX;
1242   return nullptr;
1243 }
1244 
1245 /// Return true iff the idiom is detected in the loop.
1246 ///
1247 /// Additionally:
1248 /// 1) \p CntInst is set to the instruction counting the population bit.
1249 /// 2) \p CntPhi is set to the corresponding phi node.
1250 /// 3) \p Var is set to the value whose population bits are being counted.
1251 ///
1252 /// The core idiom we are trying to detect is:
1253 /// \code
1254 ///    if (x0 != 0)
1255 ///      goto loop-exit // the precondition of the loop
1256 ///    cnt0 = init-val;
1257 ///    do {
1258 ///       x1 = phi (x0, x2);
1259 ///       cnt1 = phi(cnt0, cnt2);
1260 ///
1261 ///       cnt2 = cnt1 + 1;
1262 ///        ...
1263 ///       x2 = x1 & (x1 - 1);
1264 ///        ...
1265 ///    } while(x != 0);
1266 ///
1267 /// loop-exit:
1268 /// \endcode
1269 static bool detectPopcountIdiom(Loop *CurLoop, BasicBlock *PreCondBB,
1270                                 Instruction *&CntInst, PHINode *&CntPhi,
1271                                 Value *&Var) {
1272   // step 1: Check to see if the look-back branch match this pattern:
1273   //    "if (a!=0) goto loop-entry".
1274   BasicBlock *LoopEntry;
1275   Instruction *DefX2, *CountInst;
1276   Value *VarX1, *VarX0;
1277   PHINode *PhiX, *CountPhi;
1278 
1279   DefX2 = CountInst = nullptr;
1280   VarX1 = VarX0 = nullptr;
1281   PhiX = CountPhi = nullptr;
1282   LoopEntry = *(CurLoop->block_begin());
1283 
1284   // step 1: Check if the loop-back branch is in desirable form.
1285   {
1286     if (Value *T = matchCondition(
1287             dyn_cast<BranchInst>(LoopEntry->getTerminator()), LoopEntry))
1288       DefX2 = dyn_cast<Instruction>(T);
1289     else
1290       return false;
1291   }
1292 
1293   // step 2: detect instructions corresponding to "x2 = x1 & (x1 - 1)"
1294   {
1295     if (!DefX2 || DefX2->getOpcode() != Instruction::And)
1296       return false;
1297 
1298     BinaryOperator *SubOneOp;
1299 
1300     if ((SubOneOp = dyn_cast<BinaryOperator>(DefX2->getOperand(0))))
1301       VarX1 = DefX2->getOperand(1);
1302     else {
1303       VarX1 = DefX2->getOperand(0);
1304       SubOneOp = dyn_cast<BinaryOperator>(DefX2->getOperand(1));
1305     }
1306     if (!SubOneOp || SubOneOp->getOperand(0) != VarX1)
1307       return false;
1308 
1309     ConstantInt *Dec = dyn_cast<ConstantInt>(SubOneOp->getOperand(1));
1310     if (!Dec ||
1311         !((SubOneOp->getOpcode() == Instruction::Sub && Dec->isOne()) ||
1312           (SubOneOp->getOpcode() == Instruction::Add &&
1313            Dec->isMinusOne()))) {
1314       return false;
1315     }
1316   }
1317 
1318   // step 3: Check the recurrence of variable X
1319   PhiX = getRecurrenceVar(VarX1, DefX2, LoopEntry);
1320   if (!PhiX)
1321     return false;
1322 
1323   // step 4: Find the instruction which count the population: cnt2 = cnt1 + 1
1324   {
1325     CountInst = nullptr;
1326     for (BasicBlock::iterator Iter = LoopEntry->getFirstNonPHI()->getIterator(),
1327                               IterE = LoopEntry->end();
1328          Iter != IterE; Iter++) {
1329       Instruction *Inst = &*Iter;
1330       if (Inst->getOpcode() != Instruction::Add)
1331         continue;
1332 
1333       ConstantInt *Inc = dyn_cast<ConstantInt>(Inst->getOperand(1));
1334       if (!Inc || !Inc->isOne())
1335         continue;
1336 
1337       PHINode *Phi = getRecurrenceVar(Inst->getOperand(0), Inst, LoopEntry);
1338       if (!Phi)
1339         continue;
1340 
1341       // Check if the result of the instruction is live of the loop.
1342       bool LiveOutLoop = false;
1343       for (User *U : Inst->users()) {
1344         if ((cast<Instruction>(U))->getParent() != LoopEntry) {
1345           LiveOutLoop = true;
1346           break;
1347         }
1348       }
1349 
1350       if (LiveOutLoop) {
1351         CountInst = Inst;
1352         CountPhi = Phi;
1353         break;
1354       }
1355     }
1356 
1357     if (!CountInst)
1358       return false;
1359   }
1360 
1361   // step 5: check if the precondition is in this form:
1362   //   "if (x != 0) goto loop-head ; else goto somewhere-we-don't-care;"
1363   {
1364     auto *PreCondBr = dyn_cast<BranchInst>(PreCondBB->getTerminator());
1365     Value *T = matchCondition(PreCondBr, CurLoop->getLoopPreheader());
1366     if (T != PhiX->getOperand(0) && T != PhiX->getOperand(1))
1367       return false;
1368 
1369     CntInst = CountInst;
1370     CntPhi = CountPhi;
1371     Var = T;
1372   }
1373 
1374   return true;
1375 }
1376 
1377 /// Return true if the idiom is detected in the loop.
1378 ///
1379 /// Additionally:
1380 /// 1) \p CntInst is set to the instruction Counting Leading Zeros (CTLZ)
1381 ///       or nullptr if there is no such.
1382 /// 2) \p CntPhi is set to the corresponding phi node
1383 ///       or nullptr if there is no such.
1384 /// 3) \p Var is set to the value whose CTLZ could be used.
1385 /// 4) \p DefX is set to the instruction calculating Loop exit condition.
1386 ///
1387 /// The core idiom we are trying to detect is:
1388 /// \code
1389 ///    if (x0 == 0)
1390 ///      goto loop-exit // the precondition of the loop
1391 ///    cnt0 = init-val;
1392 ///    do {
1393 ///       x = phi (x0, x.next);   //PhiX
1394 ///       cnt = phi(cnt0, cnt.next);
1395 ///
1396 ///       cnt.next = cnt + 1;
1397 ///        ...
1398 ///       x.next = x >> 1;   // DefX
1399 ///        ...
1400 ///    } while(x.next != 0);
1401 ///
1402 /// loop-exit:
1403 /// \endcode
1404 static bool detectShiftUntilZeroIdiom(Loop *CurLoop, const DataLayout &DL,
1405                                       Intrinsic::ID &IntrinID, Value *&InitX,
1406                                       Instruction *&CntInst, PHINode *&CntPhi,
1407                                       Instruction *&DefX) {
1408   BasicBlock *LoopEntry;
1409   Value *VarX = nullptr;
1410 
1411   DefX = nullptr;
1412   CntInst = nullptr;
1413   CntPhi = nullptr;
1414   LoopEntry = *(CurLoop->block_begin());
1415 
1416   // step 1: Check if the loop-back branch is in desirable form.
1417   if (Value *T = matchCondition(
1418           dyn_cast<BranchInst>(LoopEntry->getTerminator()), LoopEntry))
1419     DefX = dyn_cast<Instruction>(T);
1420   else
1421     return false;
1422 
1423   // step 2: detect instructions corresponding to "x.next = x >> 1 or x << 1"
1424   if (!DefX || !DefX->isShift())
1425     return false;
1426   IntrinID = DefX->getOpcode() == Instruction::Shl ? Intrinsic::cttz :
1427                                                      Intrinsic::ctlz;
1428   ConstantInt *Shft = dyn_cast<ConstantInt>(DefX->getOperand(1));
1429   if (!Shft || !Shft->isOne())
1430     return false;
1431   VarX = DefX->getOperand(0);
1432 
1433   // step 3: Check the recurrence of variable X
1434   PHINode *PhiX = getRecurrenceVar(VarX, DefX, LoopEntry);
1435   if (!PhiX)
1436     return false;
1437 
1438   InitX = PhiX->getIncomingValueForBlock(CurLoop->getLoopPreheader());
1439 
1440   // Make sure the initial value can't be negative otherwise the ashr in the
1441   // loop might never reach zero which would make the loop infinite.
1442   if (DefX->getOpcode() == Instruction::AShr && !isKnownNonNegative(InitX, DL))
1443     return false;
1444 
1445   // step 4: Find the instruction which count the CTLZ: cnt.next = cnt + 1
1446   // TODO: We can skip the step. If loop trip count is known (CTLZ),
1447   //       then all uses of "cnt.next" could be optimized to the trip count
1448   //       plus "cnt0". Currently it is not optimized.
1449   //       This step could be used to detect POPCNT instruction:
1450   //       cnt.next = cnt + (x.next & 1)
1451   for (BasicBlock::iterator Iter = LoopEntry->getFirstNonPHI()->getIterator(),
1452                             IterE = LoopEntry->end();
1453        Iter != IterE; Iter++) {
1454     Instruction *Inst = &*Iter;
1455     if (Inst->getOpcode() != Instruction::Add)
1456       continue;
1457 
1458     ConstantInt *Inc = dyn_cast<ConstantInt>(Inst->getOperand(1));
1459     if (!Inc || !Inc->isOne())
1460       continue;
1461 
1462     PHINode *Phi = getRecurrenceVar(Inst->getOperand(0), Inst, LoopEntry);
1463     if (!Phi)
1464       continue;
1465 
1466     CntInst = Inst;
1467     CntPhi = Phi;
1468     break;
1469   }
1470   if (!CntInst)
1471     return false;
1472 
1473   return true;
1474 }
1475 
1476 /// Recognize CTLZ or CTTZ idiom in a non-countable loop and convert the loop
1477 /// to countable (with CTLZ / CTTZ trip count). If CTLZ / CTTZ inserted as a new
1478 /// trip count returns true; otherwise, returns false.
1479 bool LoopIdiomRecognize::recognizeAndInsertFFS() {
1480   // Give up if the loop has multiple blocks or multiple backedges.
1481   if (CurLoop->getNumBackEdges() != 1 || CurLoop->getNumBlocks() != 1)
1482     return false;
1483 
1484   Intrinsic::ID IntrinID;
1485   Value *InitX;
1486   Instruction *DefX = nullptr;
1487   PHINode *CntPhi = nullptr;
1488   Instruction *CntInst = nullptr;
1489   // Help decide if transformation is profitable. For ShiftUntilZero idiom,
1490   // this is always 6.
1491   size_t IdiomCanonicalSize = 6;
1492 
1493   if (!detectShiftUntilZeroIdiom(CurLoop, *DL, IntrinID, InitX,
1494                                  CntInst, CntPhi, DefX))
1495     return false;
1496 
1497   bool IsCntPhiUsedOutsideLoop = false;
1498   for (User *U : CntPhi->users())
1499     if (!CurLoop->contains(cast<Instruction>(U))) {
1500       IsCntPhiUsedOutsideLoop = true;
1501       break;
1502     }
1503   bool IsCntInstUsedOutsideLoop = false;
1504   for (User *U : CntInst->users())
1505     if (!CurLoop->contains(cast<Instruction>(U))) {
1506       IsCntInstUsedOutsideLoop = true;
1507       break;
1508     }
1509   // If both CntInst and CntPhi are used outside the loop the profitability
1510   // is questionable.
1511   if (IsCntInstUsedOutsideLoop && IsCntPhiUsedOutsideLoop)
1512     return false;
1513 
1514   // For some CPUs result of CTLZ(X) intrinsic is undefined
1515   // when X is 0. If we can not guarantee X != 0, we need to check this
1516   // when expand.
1517   bool ZeroCheck = false;
1518   // It is safe to assume Preheader exist as it was checked in
1519   // parent function RunOnLoop.
1520   BasicBlock *PH = CurLoop->getLoopPreheader();
1521 
1522   // If we are using the count instruction outside the loop, make sure we
1523   // have a zero check as a precondition. Without the check the loop would run
1524   // one iteration for before any check of the input value. This means 0 and 1
1525   // would have identical behavior in the original loop and thus
1526   if (!IsCntPhiUsedOutsideLoop) {
1527     auto *PreCondBB = PH->getSinglePredecessor();
1528     if (!PreCondBB)
1529       return false;
1530     auto *PreCondBI = dyn_cast<BranchInst>(PreCondBB->getTerminator());
1531     if (!PreCondBI)
1532       return false;
1533     if (matchCondition(PreCondBI, PH) != InitX)
1534       return false;
1535     ZeroCheck = true;
1536   }
1537 
1538   // Check if CTLZ / CTTZ intrinsic is profitable. Assume it is always
1539   // profitable if we delete the loop.
1540 
1541   // the loop has only 6 instructions:
1542   //  %n.addr.0 = phi [ %n, %entry ], [ %shr, %while.cond ]
1543   //  %i.0 = phi [ %i0, %entry ], [ %inc, %while.cond ]
1544   //  %shr = ashr %n.addr.0, 1
1545   //  %tobool = icmp eq %shr, 0
1546   //  %inc = add nsw %i.0, 1
1547   //  br i1 %tobool
1548 
1549   const Value *Args[] = {
1550       InitX, ZeroCheck ? ConstantInt::getTrue(InitX->getContext())
1551                        : ConstantInt::getFalse(InitX->getContext())};
1552 
1553   // @llvm.dbg doesn't count as they have no semantic effect.
1554   auto InstWithoutDebugIt = CurLoop->getHeader()->instructionsWithoutDebug();
1555   uint32_t HeaderSize =
1556       std::distance(InstWithoutDebugIt.begin(), InstWithoutDebugIt.end());
1557 
1558   IntrinsicCostAttributes Attrs(IntrinID, InitX->getType(), Args);
1559   int Cost =
1560     TTI->getIntrinsicInstrCost(Attrs, TargetTransformInfo::TCK_SizeAndLatency);
1561   if (HeaderSize != IdiomCanonicalSize &&
1562       Cost > TargetTransformInfo::TCC_Basic)
1563     return false;
1564 
1565   transformLoopToCountable(IntrinID, PH, CntInst, CntPhi, InitX, DefX,
1566                            DefX->getDebugLoc(), ZeroCheck,
1567                            IsCntPhiUsedOutsideLoop);
1568   return true;
1569 }
1570 
1571 /// Recognizes a population count idiom in a non-countable loop.
1572 ///
1573 /// If detected, transforms the relevant code to issue the popcount intrinsic
1574 /// function call, and returns true; otherwise, returns false.
1575 bool LoopIdiomRecognize::recognizePopcount() {
1576   if (TTI->getPopcntSupport(32) != TargetTransformInfo::PSK_FastHardware)
1577     return false;
1578 
1579   // Counting population are usually conducted by few arithmetic instructions.
1580   // Such instructions can be easily "absorbed" by vacant slots in a
1581   // non-compact loop. Therefore, recognizing popcount idiom only makes sense
1582   // in a compact loop.
1583 
1584   // Give up if the loop has multiple blocks or multiple backedges.
1585   if (CurLoop->getNumBackEdges() != 1 || CurLoop->getNumBlocks() != 1)
1586     return false;
1587 
1588   BasicBlock *LoopBody = *(CurLoop->block_begin());
1589   if (LoopBody->size() >= 20) {
1590     // The loop is too big, bail out.
1591     return false;
1592   }
1593 
1594   // It should have a preheader containing nothing but an unconditional branch.
1595   BasicBlock *PH = CurLoop->getLoopPreheader();
1596   if (!PH || &PH->front() != PH->getTerminator())
1597     return false;
1598   auto *EntryBI = dyn_cast<BranchInst>(PH->getTerminator());
1599   if (!EntryBI || EntryBI->isConditional())
1600     return false;
1601 
1602   // It should have a precondition block where the generated popcount intrinsic
1603   // function can be inserted.
1604   auto *PreCondBB = PH->getSinglePredecessor();
1605   if (!PreCondBB)
1606     return false;
1607   auto *PreCondBI = dyn_cast<BranchInst>(PreCondBB->getTerminator());
1608   if (!PreCondBI || PreCondBI->isUnconditional())
1609     return false;
1610 
1611   Instruction *CntInst;
1612   PHINode *CntPhi;
1613   Value *Val;
1614   if (!detectPopcountIdiom(CurLoop, PreCondBB, CntInst, CntPhi, Val))
1615     return false;
1616 
1617   transformLoopToPopcount(PreCondBB, CntInst, CntPhi, Val);
1618   return true;
1619 }
1620 
1621 static CallInst *createPopcntIntrinsic(IRBuilder<> &IRBuilder, Value *Val,
1622                                        const DebugLoc &DL) {
1623   Value *Ops[] = {Val};
1624   Type *Tys[] = {Val->getType()};
1625 
1626   Module *M = IRBuilder.GetInsertBlock()->getParent()->getParent();
1627   Function *Func = Intrinsic::getDeclaration(M, Intrinsic::ctpop, Tys);
1628   CallInst *CI = IRBuilder.CreateCall(Func, Ops);
1629   CI->setDebugLoc(DL);
1630 
1631   return CI;
1632 }
1633 
1634 static CallInst *createFFSIntrinsic(IRBuilder<> &IRBuilder, Value *Val,
1635                                     const DebugLoc &DL, bool ZeroCheck,
1636                                     Intrinsic::ID IID) {
1637   Value *Ops[] = {Val, ZeroCheck ? IRBuilder.getTrue() : IRBuilder.getFalse()};
1638   Type *Tys[] = {Val->getType()};
1639 
1640   Module *M = IRBuilder.GetInsertBlock()->getParent()->getParent();
1641   Function *Func = Intrinsic::getDeclaration(M, IID, Tys);
1642   CallInst *CI = IRBuilder.CreateCall(Func, Ops);
1643   CI->setDebugLoc(DL);
1644 
1645   return CI;
1646 }
1647 
1648 /// Transform the following loop (Using CTLZ, CTTZ is similar):
1649 /// loop:
1650 ///   CntPhi = PHI [Cnt0, CntInst]
1651 ///   PhiX = PHI [InitX, DefX]
1652 ///   CntInst = CntPhi + 1
1653 ///   DefX = PhiX >> 1
1654 ///   LOOP_BODY
1655 ///   Br: loop if (DefX != 0)
1656 /// Use(CntPhi) or Use(CntInst)
1657 ///
1658 /// Into:
1659 /// If CntPhi used outside the loop:
1660 ///   CountPrev = BitWidth(InitX) - CTLZ(InitX >> 1)
1661 ///   Count = CountPrev + 1
1662 /// else
1663 ///   Count = BitWidth(InitX) - CTLZ(InitX)
1664 /// loop:
1665 ///   CntPhi = PHI [Cnt0, CntInst]
1666 ///   PhiX = PHI [InitX, DefX]
1667 ///   PhiCount = PHI [Count, Dec]
1668 ///   CntInst = CntPhi + 1
1669 ///   DefX = PhiX >> 1
1670 ///   Dec = PhiCount - 1
1671 ///   LOOP_BODY
1672 ///   Br: loop if (Dec != 0)
1673 /// Use(CountPrev + Cnt0) // Use(CntPhi)
1674 /// or
1675 /// Use(Count + Cnt0) // Use(CntInst)
1676 ///
1677 /// If LOOP_BODY is empty the loop will be deleted.
1678 /// If CntInst and DefX are not used in LOOP_BODY they will be removed.
1679 void LoopIdiomRecognize::transformLoopToCountable(
1680     Intrinsic::ID IntrinID, BasicBlock *Preheader, Instruction *CntInst,
1681     PHINode *CntPhi, Value *InitX, Instruction *DefX, const DebugLoc &DL,
1682     bool ZeroCheck, bool IsCntPhiUsedOutsideLoop) {
1683   BranchInst *PreheaderBr = cast<BranchInst>(Preheader->getTerminator());
1684 
1685   // Step 1: Insert the CTLZ/CTTZ instruction at the end of the preheader block
1686   IRBuilder<> Builder(PreheaderBr);
1687   Builder.SetCurrentDebugLocation(DL);
1688   Value *FFS, *Count, *CountPrev, *NewCount, *InitXNext;
1689 
1690   //   Count = BitWidth - CTLZ(InitX);
1691   // If there are uses of CntPhi create:
1692   //   CountPrev = BitWidth - CTLZ(InitX >> 1);
1693   if (IsCntPhiUsedOutsideLoop) {
1694     if (DefX->getOpcode() == Instruction::AShr)
1695       InitXNext =
1696           Builder.CreateAShr(InitX, ConstantInt::get(InitX->getType(), 1));
1697     else if (DefX->getOpcode() == Instruction::LShr)
1698       InitXNext =
1699           Builder.CreateLShr(InitX, ConstantInt::get(InitX->getType(), 1));
1700     else if (DefX->getOpcode() == Instruction::Shl) // cttz
1701       InitXNext =
1702           Builder.CreateShl(InitX, ConstantInt::get(InitX->getType(), 1));
1703     else
1704       llvm_unreachable("Unexpected opcode!");
1705   } else
1706     InitXNext = InitX;
1707   FFS = createFFSIntrinsic(Builder, InitXNext, DL, ZeroCheck, IntrinID);
1708   Count = Builder.CreateSub(
1709       ConstantInt::get(FFS->getType(),
1710                        FFS->getType()->getIntegerBitWidth()),
1711       FFS);
1712   if (IsCntPhiUsedOutsideLoop) {
1713     CountPrev = Count;
1714     Count = Builder.CreateAdd(
1715         CountPrev,
1716         ConstantInt::get(CountPrev->getType(), 1));
1717   }
1718 
1719   NewCount = Builder.CreateZExtOrTrunc(
1720                       IsCntPhiUsedOutsideLoop ? CountPrev : Count,
1721                       cast<IntegerType>(CntInst->getType()));
1722 
1723   // If the counter's initial value is not zero, insert Add Inst.
1724   Value *CntInitVal = CntPhi->getIncomingValueForBlock(Preheader);
1725   ConstantInt *InitConst = dyn_cast<ConstantInt>(CntInitVal);
1726   if (!InitConst || !InitConst->isZero())
1727     NewCount = Builder.CreateAdd(NewCount, CntInitVal);
1728 
1729   // Step 2: Insert new IV and loop condition:
1730   // loop:
1731   //   ...
1732   //   PhiCount = PHI [Count, Dec]
1733   //   ...
1734   //   Dec = PhiCount - 1
1735   //   ...
1736   //   Br: loop if (Dec != 0)
1737   BasicBlock *Body = *(CurLoop->block_begin());
1738   auto *LbBr = cast<BranchInst>(Body->getTerminator());
1739   ICmpInst *LbCond = cast<ICmpInst>(LbBr->getCondition());
1740   Type *Ty = Count->getType();
1741 
1742   PHINode *TcPhi = PHINode::Create(Ty, 2, "tcphi", &Body->front());
1743 
1744   Builder.SetInsertPoint(LbCond);
1745   Instruction *TcDec = cast<Instruction>(
1746       Builder.CreateSub(TcPhi, ConstantInt::get(Ty, 1),
1747                         "tcdec", false, true));
1748 
1749   TcPhi->addIncoming(Count, Preheader);
1750   TcPhi->addIncoming(TcDec, Body);
1751 
1752   CmpInst::Predicate Pred =
1753       (LbBr->getSuccessor(0) == Body) ? CmpInst::ICMP_NE : CmpInst::ICMP_EQ;
1754   LbCond->setPredicate(Pred);
1755   LbCond->setOperand(0, TcDec);
1756   LbCond->setOperand(1, ConstantInt::get(Ty, 0));
1757 
1758   // Step 3: All the references to the original counter outside
1759   //  the loop are replaced with the NewCount
1760   if (IsCntPhiUsedOutsideLoop)
1761     CntPhi->replaceUsesOutsideBlock(NewCount, Body);
1762   else
1763     CntInst->replaceUsesOutsideBlock(NewCount, Body);
1764 
1765   // step 4: Forget the "non-computable" trip-count SCEV associated with the
1766   //   loop. The loop would otherwise not be deleted even if it becomes empty.
1767   SE->forgetLoop(CurLoop);
1768 }
1769 
1770 void LoopIdiomRecognize::transformLoopToPopcount(BasicBlock *PreCondBB,
1771                                                  Instruction *CntInst,
1772                                                  PHINode *CntPhi, Value *Var) {
1773   BasicBlock *PreHead = CurLoop->getLoopPreheader();
1774   auto *PreCondBr = cast<BranchInst>(PreCondBB->getTerminator());
1775   const DebugLoc &DL = CntInst->getDebugLoc();
1776 
1777   // Assuming before transformation, the loop is following:
1778   //  if (x) // the precondition
1779   //     do { cnt++; x &= x - 1; } while(x);
1780 
1781   // Step 1: Insert the ctpop instruction at the end of the precondition block
1782   IRBuilder<> Builder(PreCondBr);
1783   Value *PopCnt, *PopCntZext, *NewCount, *TripCnt;
1784   {
1785     PopCnt = createPopcntIntrinsic(Builder, Var, DL);
1786     NewCount = PopCntZext =
1787         Builder.CreateZExtOrTrunc(PopCnt, cast<IntegerType>(CntPhi->getType()));
1788 
1789     if (NewCount != PopCnt)
1790       (cast<Instruction>(NewCount))->setDebugLoc(DL);
1791 
1792     // TripCnt is exactly the number of iterations the loop has
1793     TripCnt = NewCount;
1794 
1795     // If the population counter's initial value is not zero, insert Add Inst.
1796     Value *CntInitVal = CntPhi->getIncomingValueForBlock(PreHead);
1797     ConstantInt *InitConst = dyn_cast<ConstantInt>(CntInitVal);
1798     if (!InitConst || !InitConst->isZero()) {
1799       NewCount = Builder.CreateAdd(NewCount, CntInitVal);
1800       (cast<Instruction>(NewCount))->setDebugLoc(DL);
1801     }
1802   }
1803 
1804   // Step 2: Replace the precondition from "if (x == 0) goto loop-exit" to
1805   //   "if (NewCount == 0) loop-exit". Without this change, the intrinsic
1806   //   function would be partial dead code, and downstream passes will drag
1807   //   it back from the precondition block to the preheader.
1808   {
1809     ICmpInst *PreCond = cast<ICmpInst>(PreCondBr->getCondition());
1810 
1811     Value *Opnd0 = PopCntZext;
1812     Value *Opnd1 = ConstantInt::get(PopCntZext->getType(), 0);
1813     if (PreCond->getOperand(0) != Var)
1814       std::swap(Opnd0, Opnd1);
1815 
1816     ICmpInst *NewPreCond = cast<ICmpInst>(
1817         Builder.CreateICmp(PreCond->getPredicate(), Opnd0, Opnd1));
1818     PreCondBr->setCondition(NewPreCond);
1819 
1820     RecursivelyDeleteTriviallyDeadInstructions(PreCond, TLI);
1821   }
1822 
1823   // Step 3: Note that the population count is exactly the trip count of the
1824   // loop in question, which enable us to convert the loop from noncountable
1825   // loop into a countable one. The benefit is twofold:
1826   //
1827   //  - If the loop only counts population, the entire loop becomes dead after
1828   //    the transformation. It is a lot easier to prove a countable loop dead
1829   //    than to prove a noncountable one. (In some C dialects, an infinite loop
1830   //    isn't dead even if it computes nothing useful. In general, DCE needs
1831   //    to prove a noncountable loop finite before safely delete it.)
1832   //
1833   //  - If the loop also performs something else, it remains alive.
1834   //    Since it is transformed to countable form, it can be aggressively
1835   //    optimized by some optimizations which are in general not applicable
1836   //    to a noncountable loop.
1837   //
1838   // After this step, this loop (conceptually) would look like following:
1839   //   newcnt = __builtin_ctpop(x);
1840   //   t = newcnt;
1841   //   if (x)
1842   //     do { cnt++; x &= x-1; t--) } while (t > 0);
1843   BasicBlock *Body = *(CurLoop->block_begin());
1844   {
1845     auto *LbBr = cast<BranchInst>(Body->getTerminator());
1846     ICmpInst *LbCond = cast<ICmpInst>(LbBr->getCondition());
1847     Type *Ty = TripCnt->getType();
1848 
1849     PHINode *TcPhi = PHINode::Create(Ty, 2, "tcphi", &Body->front());
1850 
1851     Builder.SetInsertPoint(LbCond);
1852     Instruction *TcDec = cast<Instruction>(
1853         Builder.CreateSub(TcPhi, ConstantInt::get(Ty, 1),
1854                           "tcdec", false, true));
1855 
1856     TcPhi->addIncoming(TripCnt, PreHead);
1857     TcPhi->addIncoming(TcDec, Body);
1858 
1859     CmpInst::Predicate Pred =
1860         (LbBr->getSuccessor(0) == Body) ? CmpInst::ICMP_UGT : CmpInst::ICMP_SLE;
1861     LbCond->setPredicate(Pred);
1862     LbCond->setOperand(0, TcDec);
1863     LbCond->setOperand(1, ConstantInt::get(Ty, 0));
1864   }
1865 
1866   // Step 4: All the references to the original population counter outside
1867   //  the loop are replaced with the NewCount -- the value returned from
1868   //  __builtin_ctpop().
1869   CntInst->replaceUsesOutsideBlock(NewCount, Body);
1870 
1871   // step 5: Forget the "non-computable" trip-count SCEV associated with the
1872   //   loop. The loop would otherwise not be deleted even if it becomes empty.
1873   SE->forgetLoop(CurLoop);
1874 }
1875