1 //===- LoopIdiomRecognize.cpp - Loop idiom recognition --------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This pass implements an idiom recognizer that transforms simple loops into a
11 // non-loop form.  In cases that this kicks in, it can be a significant
12 // performance win.
13 //
14 // If compiling for code size we avoid idiom recognition if the resulting
15 // code could be larger than the code for the original loop. One way this could
16 // happen is if the loop is not removable after idiom recognition due to the
17 // presence of non-idiom instructions. The initial implementation of the
18 // heuristics applies to idioms in multi-block loops.
19 //
20 //===----------------------------------------------------------------------===//
21 //
22 // TODO List:
23 //
24 // Future loop memory idioms to recognize:
25 //   memcmp, memmove, strlen, etc.
26 // Future floating point idioms to recognize in -ffast-math mode:
27 //   fpowi
28 // Future integer operation idioms to recognize:
29 //   ctpop, ctlz, cttz
30 //
31 // Beware that isel's default lowering for ctpop is highly inefficient for
32 // i64 and larger types when i64 is legal and the value has few bits set.  It
33 // would be good to enhance isel to emit a loop for ctpop in this case.
34 //
35 // This could recognize common matrix multiplies and dot product idioms and
36 // replace them with calls to BLAS (if linked in??).
37 //
38 //===----------------------------------------------------------------------===//
39 
40 #include "llvm/Transforms/Scalar/LoopIdiomRecognize.h"
41 #include "llvm/ADT/APInt.h"
42 #include "llvm/ADT/ArrayRef.h"
43 #include "llvm/ADT/DenseMap.h"
44 #include "llvm/ADT/MapVector.h"
45 #include "llvm/ADT/SetVector.h"
46 #include "llvm/ADT/SmallPtrSet.h"
47 #include "llvm/ADT/SmallVector.h"
48 #include "llvm/ADT/Statistic.h"
49 #include "llvm/ADT/StringRef.h"
50 #include "llvm/Analysis/AliasAnalysis.h"
51 #include "llvm/Analysis/LoopAccessAnalysis.h"
52 #include "llvm/Analysis/LoopInfo.h"
53 #include "llvm/Analysis/LoopPass.h"
54 #include "llvm/Analysis/MemoryLocation.h"
55 #include "llvm/Analysis/ScalarEvolution.h"
56 #include "llvm/Analysis/ScalarEvolutionExpander.h"
57 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
58 #include "llvm/Analysis/TargetLibraryInfo.h"
59 #include "llvm/Analysis/TargetTransformInfo.h"
60 #include "llvm/Analysis/ValueTracking.h"
61 #include "llvm/IR/Attributes.h"
62 #include "llvm/IR/BasicBlock.h"
63 #include "llvm/IR/Constant.h"
64 #include "llvm/IR/Constants.h"
65 #include "llvm/IR/DataLayout.h"
66 #include "llvm/IR/DebugLoc.h"
67 #include "llvm/IR/DerivedTypes.h"
68 #include "llvm/IR/Dominators.h"
69 #include "llvm/IR/GlobalValue.h"
70 #include "llvm/IR/GlobalVariable.h"
71 #include "llvm/IR/IRBuilder.h"
72 #include "llvm/IR/InstrTypes.h"
73 #include "llvm/IR/Instruction.h"
74 #include "llvm/IR/Instructions.h"
75 #include "llvm/IR/IntrinsicInst.h"
76 #include "llvm/IR/Intrinsics.h"
77 #include "llvm/IR/LLVMContext.h"
78 #include "llvm/IR/Module.h"
79 #include "llvm/IR/PassManager.h"
80 #include "llvm/IR/Type.h"
81 #include "llvm/IR/User.h"
82 #include "llvm/IR/Value.h"
83 #include "llvm/IR/ValueHandle.h"
84 #include "llvm/Pass.h"
85 #include "llvm/Support/Casting.h"
86 #include "llvm/Support/CommandLine.h"
87 #include "llvm/Support/Debug.h"
88 #include "llvm/Support/raw_ostream.h"
89 #include "llvm/Transforms/Scalar.h"
90 #include "llvm/Transforms/Utils/BuildLibCalls.h"
91 #include "llvm/Transforms/Utils/Local.h"
92 #include "llvm/Transforms/Utils/LoopUtils.h"
93 #include <algorithm>
94 #include <cassert>
95 #include <cstdint>
96 #include <utility>
97 #include <vector>
98 
99 using namespace llvm;
100 
101 #define DEBUG_TYPE "loop-idiom"
102 
103 STATISTIC(NumMemSet, "Number of memset's formed from loop stores");
104 STATISTIC(NumMemCpy, "Number of memcpy's formed from loop load+stores");
105 
106 static cl::opt<bool> UseLIRCodeSizeHeurs(
107     "use-lir-code-size-heurs",
108     cl::desc("Use loop idiom recognition code size heuristics when compiling"
109              "with -Os/-Oz"),
110     cl::init(true), cl::Hidden);
111 
112 namespace {
113 
114 class LoopIdiomRecognize {
115   Loop *CurLoop = nullptr;
116   AliasAnalysis *AA;
117   DominatorTree *DT;
118   LoopInfo *LI;
119   ScalarEvolution *SE;
120   TargetLibraryInfo *TLI;
121   const TargetTransformInfo *TTI;
122   const DataLayout *DL;
123   bool ApplyCodeSizeHeuristics;
124 
125 public:
126   explicit LoopIdiomRecognize(AliasAnalysis *AA, DominatorTree *DT,
127                               LoopInfo *LI, ScalarEvolution *SE,
128                               TargetLibraryInfo *TLI,
129                               const TargetTransformInfo *TTI,
130                               const DataLayout *DL)
131       : AA(AA), DT(DT), LI(LI), SE(SE), TLI(TLI), TTI(TTI), DL(DL) {}
132 
133   bool runOnLoop(Loop *L);
134 
135 private:
136   using StoreList = SmallVector<StoreInst *, 8>;
137   using StoreListMap = MapVector<Value *, StoreList>;
138 
139   StoreListMap StoreRefsForMemset;
140   StoreListMap StoreRefsForMemsetPattern;
141   StoreList StoreRefsForMemcpy;
142   bool HasMemset;
143   bool HasMemsetPattern;
144   bool HasMemcpy;
145 
146   /// Return code for isLegalStore()
147   enum LegalStoreKind {
148     None = 0,
149     Memset,
150     MemsetPattern,
151     Memcpy,
152     UnorderedAtomicMemcpy,
153     DontUse // Dummy retval never to be used. Allows catching errors in retval
154             // handling.
155   };
156 
157   /// \name Countable Loop Idiom Handling
158   /// @{
159 
160   bool runOnCountableLoop();
161   bool runOnLoopBlock(BasicBlock *BB, const SCEV *BECount,
162                       SmallVectorImpl<BasicBlock *> &ExitBlocks);
163 
164   void collectStores(BasicBlock *BB);
165   LegalStoreKind isLegalStore(StoreInst *SI);
166   bool processLoopStores(SmallVectorImpl<StoreInst *> &SL, const SCEV *BECount,
167                          bool ForMemset);
168   bool processLoopMemSet(MemSetInst *MSI, const SCEV *BECount);
169 
170   bool processLoopStridedStore(Value *DestPtr, unsigned StoreSize,
171                                unsigned StoreAlignment, Value *StoredVal,
172                                Instruction *TheStore,
173                                SmallPtrSetImpl<Instruction *> &Stores,
174                                const SCEVAddRecExpr *Ev, const SCEV *BECount,
175                                bool NegStride, bool IsLoopMemset = false);
176   bool processLoopStoreOfLoopLoad(StoreInst *SI, const SCEV *BECount);
177   bool avoidLIRForMultiBlockLoop(bool IsMemset = false,
178                                  bool IsLoopMemset = false);
179 
180   /// @}
181   /// \name Noncountable Loop Idiom Handling
182   /// @{
183 
184   bool runOnNoncountableLoop();
185 
186   bool recognizePopcount();
187   void transformLoopToPopcount(BasicBlock *PreCondBB, Instruction *CntInst,
188                                PHINode *CntPhi, Value *Var);
189   bool recognizeAndInsertCTLZ();
190   void transformLoopToCountable(BasicBlock *PreCondBB, Instruction *CntInst,
191                                 PHINode *CntPhi, Value *Var, const DebugLoc DL,
192                                 bool ZeroCheck, bool IsCntPhiUsedOutsideLoop);
193 
194   /// @}
195 };
196 
197 class LoopIdiomRecognizeLegacyPass : public LoopPass {
198 public:
199   static char ID;
200 
201   explicit LoopIdiomRecognizeLegacyPass() : LoopPass(ID) {
202     initializeLoopIdiomRecognizeLegacyPassPass(
203         *PassRegistry::getPassRegistry());
204   }
205 
206   bool runOnLoop(Loop *L, LPPassManager &LPM) override {
207     if (skipLoop(L))
208       return false;
209 
210     AliasAnalysis *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
211     DominatorTree *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
212     LoopInfo *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
213     ScalarEvolution *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
214     TargetLibraryInfo *TLI =
215         &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
216     const TargetTransformInfo *TTI =
217         &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(
218             *L->getHeader()->getParent());
219     const DataLayout *DL = &L->getHeader()->getModule()->getDataLayout();
220 
221     LoopIdiomRecognize LIR(AA, DT, LI, SE, TLI, TTI, DL);
222     return LIR.runOnLoop(L);
223   }
224 
225   /// This transformation requires natural loop information & requires that
226   /// loop preheaders be inserted into the CFG.
227   void getAnalysisUsage(AnalysisUsage &AU) const override {
228     AU.addRequired<TargetLibraryInfoWrapperPass>();
229     AU.addRequired<TargetTransformInfoWrapperPass>();
230     getLoopAnalysisUsage(AU);
231   }
232 };
233 
234 } // end anonymous namespace
235 
236 char LoopIdiomRecognizeLegacyPass::ID = 0;
237 
238 PreservedAnalyses LoopIdiomRecognizePass::run(Loop &L, LoopAnalysisManager &AM,
239                                               LoopStandardAnalysisResults &AR,
240                                               LPMUpdater &) {
241   const auto *DL = &L.getHeader()->getModule()->getDataLayout();
242 
243   LoopIdiomRecognize LIR(&AR.AA, &AR.DT, &AR.LI, &AR.SE, &AR.TLI, &AR.TTI, DL);
244   if (!LIR.runOnLoop(&L))
245     return PreservedAnalyses::all();
246 
247   return getLoopPassPreservedAnalyses();
248 }
249 
250 INITIALIZE_PASS_BEGIN(LoopIdiomRecognizeLegacyPass, "loop-idiom",
251                       "Recognize loop idioms", false, false)
252 INITIALIZE_PASS_DEPENDENCY(LoopPass)
253 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
254 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
255 INITIALIZE_PASS_END(LoopIdiomRecognizeLegacyPass, "loop-idiom",
256                     "Recognize loop idioms", false, false)
257 
258 Pass *llvm::createLoopIdiomPass() { return new LoopIdiomRecognizeLegacyPass(); }
259 
260 static void deleteDeadInstruction(Instruction *I) {
261   I->replaceAllUsesWith(UndefValue::get(I->getType()));
262   I->eraseFromParent();
263 }
264 
265 //===----------------------------------------------------------------------===//
266 //
267 //          Implementation of LoopIdiomRecognize
268 //
269 //===----------------------------------------------------------------------===//
270 
271 bool LoopIdiomRecognize::runOnLoop(Loop *L) {
272   CurLoop = L;
273   // If the loop could not be converted to canonical form, it must have an
274   // indirectbr in it, just give up.
275   if (!L->getLoopPreheader())
276     return false;
277 
278   // Disable loop idiom recognition if the function's name is a common idiom.
279   StringRef Name = L->getHeader()->getParent()->getName();
280   if (Name == "memset" || Name == "memcpy")
281     return false;
282 
283   // Determine if code size heuristics need to be applied.
284   ApplyCodeSizeHeuristics =
285       L->getHeader()->getParent()->optForSize() && UseLIRCodeSizeHeurs;
286 
287   HasMemset = TLI->has(LibFunc_memset);
288   HasMemsetPattern = TLI->has(LibFunc_memset_pattern16);
289   HasMemcpy = TLI->has(LibFunc_memcpy);
290 
291   if (HasMemset || HasMemsetPattern || HasMemcpy)
292     if (SE->hasLoopInvariantBackedgeTakenCount(L))
293       return runOnCountableLoop();
294 
295   return runOnNoncountableLoop();
296 }
297 
298 bool LoopIdiomRecognize::runOnCountableLoop() {
299   const SCEV *BECount = SE->getBackedgeTakenCount(CurLoop);
300   assert(!isa<SCEVCouldNotCompute>(BECount) &&
301          "runOnCountableLoop() called on a loop without a predictable"
302          "backedge-taken count");
303 
304   // If this loop executes exactly one time, then it should be peeled, not
305   // optimized by this pass.
306   if (const SCEVConstant *BECst = dyn_cast<SCEVConstant>(BECount))
307     if (BECst->getAPInt() == 0)
308       return false;
309 
310   SmallVector<BasicBlock *, 8> ExitBlocks;
311   CurLoop->getUniqueExitBlocks(ExitBlocks);
312 
313   DEBUG(dbgs() << "loop-idiom Scanning: F["
314                << CurLoop->getHeader()->getParent()->getName() << "] Loop %"
315                << CurLoop->getHeader()->getName() << "\n");
316 
317   bool MadeChange = false;
318 
319   // The following transforms hoist stores/memsets into the loop pre-header.
320   // Give up if the loop has instructions may throw.
321   LoopSafetyInfo SafetyInfo;
322   computeLoopSafetyInfo(&SafetyInfo, CurLoop);
323   if (SafetyInfo.MayThrow)
324     return MadeChange;
325 
326   // Scan all the blocks in the loop that are not in subloops.
327   for (auto *BB : CurLoop->getBlocks()) {
328     // Ignore blocks in subloops.
329     if (LI->getLoopFor(BB) != CurLoop)
330       continue;
331 
332     MadeChange |= runOnLoopBlock(BB, BECount, ExitBlocks);
333   }
334   return MadeChange;
335 }
336 
337 static APInt getStoreStride(const SCEVAddRecExpr *StoreEv) {
338   const SCEVConstant *ConstStride = cast<SCEVConstant>(StoreEv->getOperand(1));
339   return ConstStride->getAPInt();
340 }
341 
342 /// getMemSetPatternValue - If a strided store of the specified value is safe to
343 /// turn into a memset_pattern16, return a ConstantArray of 16 bytes that should
344 /// be passed in.  Otherwise, return null.
345 ///
346 /// Note that we don't ever attempt to use memset_pattern8 or 4, because these
347 /// just replicate their input array and then pass on to memset_pattern16.
348 static Constant *getMemSetPatternValue(Value *V, const DataLayout *DL) {
349   // If the value isn't a constant, we can't promote it to being in a constant
350   // array.  We could theoretically do a store to an alloca or something, but
351   // that doesn't seem worthwhile.
352   Constant *C = dyn_cast<Constant>(V);
353   if (!C)
354     return nullptr;
355 
356   // Only handle simple values that are a power of two bytes in size.
357   uint64_t Size = DL->getTypeSizeInBits(V->getType());
358   if (Size == 0 || (Size & 7) || (Size & (Size - 1)))
359     return nullptr;
360 
361   // Don't care enough about darwin/ppc to implement this.
362   if (DL->isBigEndian())
363     return nullptr;
364 
365   // Convert to size in bytes.
366   Size /= 8;
367 
368   // TODO: If CI is larger than 16-bytes, we can try slicing it in half to see
369   // if the top and bottom are the same (e.g. for vectors and large integers).
370   if (Size > 16)
371     return nullptr;
372 
373   // If the constant is exactly 16 bytes, just use it.
374   if (Size == 16)
375     return C;
376 
377   // Otherwise, we'll use an array of the constants.
378   unsigned ArraySize = 16 / Size;
379   ArrayType *AT = ArrayType::get(V->getType(), ArraySize);
380   return ConstantArray::get(AT, std::vector<Constant *>(ArraySize, C));
381 }
382 
383 LoopIdiomRecognize::LegalStoreKind
384 LoopIdiomRecognize::isLegalStore(StoreInst *SI) {
385   // Don't touch volatile stores.
386   if (SI->isVolatile())
387     return LegalStoreKind::None;
388   // We only want simple or unordered-atomic stores.
389   if (!SI->isUnordered())
390     return LegalStoreKind::None;
391 
392   // Don't convert stores of non-integral pointer types to memsets (which stores
393   // integers).
394   if (DL->isNonIntegralPointerType(SI->getValueOperand()->getType()))
395     return LegalStoreKind::None;
396 
397   // Avoid merging nontemporal stores.
398   if (SI->getMetadata(LLVMContext::MD_nontemporal))
399     return LegalStoreKind::None;
400 
401   Value *StoredVal = SI->getValueOperand();
402   Value *StorePtr = SI->getPointerOperand();
403 
404   // Reject stores that are so large that they overflow an unsigned.
405   uint64_t SizeInBits = DL->getTypeSizeInBits(StoredVal->getType());
406   if ((SizeInBits & 7) || (SizeInBits >> 32) != 0)
407     return LegalStoreKind::None;
408 
409   // See if the pointer expression is an AddRec like {base,+,1} on the current
410   // loop, which indicates a strided store.  If we have something else, it's a
411   // random store we can't handle.
412   const SCEVAddRecExpr *StoreEv =
413       dyn_cast<SCEVAddRecExpr>(SE->getSCEV(StorePtr));
414   if (!StoreEv || StoreEv->getLoop() != CurLoop || !StoreEv->isAffine())
415     return LegalStoreKind::None;
416 
417   // Check to see if we have a constant stride.
418   if (!isa<SCEVConstant>(StoreEv->getOperand(1)))
419     return LegalStoreKind::None;
420 
421   // See if the store can be turned into a memset.
422 
423   // If the stored value is a byte-wise value (like i32 -1), then it may be
424   // turned into a memset of i8 -1, assuming that all the consecutive bytes
425   // are stored.  A store of i32 0x01020304 can never be turned into a memset,
426   // but it can be turned into memset_pattern if the target supports it.
427   Value *SplatValue = isBytewiseValue(StoredVal);
428   Constant *PatternValue = nullptr;
429 
430   // Note: memset and memset_pattern on unordered-atomic is yet not supported
431   bool UnorderedAtomic = SI->isUnordered() && !SI->isSimple();
432 
433   // If we're allowed to form a memset, and the stored value would be
434   // acceptable for memset, use it.
435   if (!UnorderedAtomic && HasMemset && SplatValue &&
436       // Verify that the stored value is loop invariant.  If not, we can't
437       // promote the memset.
438       CurLoop->isLoopInvariant(SplatValue)) {
439     // It looks like we can use SplatValue.
440     return LegalStoreKind::Memset;
441   } else if (!UnorderedAtomic && HasMemsetPattern &&
442              // Don't create memset_pattern16s with address spaces.
443              StorePtr->getType()->getPointerAddressSpace() == 0 &&
444              (PatternValue = getMemSetPatternValue(StoredVal, DL))) {
445     // It looks like we can use PatternValue!
446     return LegalStoreKind::MemsetPattern;
447   }
448 
449   // Otherwise, see if the store can be turned into a memcpy.
450   if (HasMemcpy) {
451     // Check to see if the stride matches the size of the store.  If so, then we
452     // know that every byte is touched in the loop.
453     APInt Stride = getStoreStride(StoreEv);
454     unsigned StoreSize = DL->getTypeStoreSize(SI->getValueOperand()->getType());
455     if (StoreSize != Stride && StoreSize != -Stride)
456       return LegalStoreKind::None;
457 
458     // The store must be feeding a non-volatile load.
459     LoadInst *LI = dyn_cast<LoadInst>(SI->getValueOperand());
460 
461     // Only allow non-volatile loads
462     if (!LI || LI->isVolatile())
463       return LegalStoreKind::None;
464     // Only allow simple or unordered-atomic loads
465     if (!LI->isUnordered())
466       return LegalStoreKind::None;
467 
468     // See if the pointer expression is an AddRec like {base,+,1} on the current
469     // loop, which indicates a strided load.  If we have something else, it's a
470     // random load we can't handle.
471     const SCEVAddRecExpr *LoadEv =
472         dyn_cast<SCEVAddRecExpr>(SE->getSCEV(LI->getPointerOperand()));
473     if (!LoadEv || LoadEv->getLoop() != CurLoop || !LoadEv->isAffine())
474       return LegalStoreKind::None;
475 
476     // The store and load must share the same stride.
477     if (StoreEv->getOperand(1) != LoadEv->getOperand(1))
478       return LegalStoreKind::None;
479 
480     // Success.  This store can be converted into a memcpy.
481     UnorderedAtomic = UnorderedAtomic || LI->isAtomic();
482     return UnorderedAtomic ? LegalStoreKind::UnorderedAtomicMemcpy
483                            : LegalStoreKind::Memcpy;
484   }
485   // This store can't be transformed into a memset/memcpy.
486   return LegalStoreKind::None;
487 }
488 
489 void LoopIdiomRecognize::collectStores(BasicBlock *BB) {
490   StoreRefsForMemset.clear();
491   StoreRefsForMemsetPattern.clear();
492   StoreRefsForMemcpy.clear();
493   for (Instruction &I : *BB) {
494     StoreInst *SI = dyn_cast<StoreInst>(&I);
495     if (!SI)
496       continue;
497 
498     // Make sure this is a strided store with a constant stride.
499     switch (isLegalStore(SI)) {
500     case LegalStoreKind::None:
501       // Nothing to do
502       break;
503     case LegalStoreKind::Memset: {
504       // Find the base pointer.
505       Value *Ptr = GetUnderlyingObject(SI->getPointerOperand(), *DL);
506       StoreRefsForMemset[Ptr].push_back(SI);
507     } break;
508     case LegalStoreKind::MemsetPattern: {
509       // Find the base pointer.
510       Value *Ptr = GetUnderlyingObject(SI->getPointerOperand(), *DL);
511       StoreRefsForMemsetPattern[Ptr].push_back(SI);
512     } break;
513     case LegalStoreKind::Memcpy:
514     case LegalStoreKind::UnorderedAtomicMemcpy:
515       StoreRefsForMemcpy.push_back(SI);
516       break;
517     default:
518       assert(false && "unhandled return value");
519       break;
520     }
521   }
522 }
523 
524 /// runOnLoopBlock - Process the specified block, which lives in a counted loop
525 /// with the specified backedge count.  This block is known to be in the current
526 /// loop and not in any subloops.
527 bool LoopIdiomRecognize::runOnLoopBlock(
528     BasicBlock *BB, const SCEV *BECount,
529     SmallVectorImpl<BasicBlock *> &ExitBlocks) {
530   // We can only promote stores in this block if they are unconditionally
531   // executed in the loop.  For a block to be unconditionally executed, it has
532   // to dominate all the exit blocks of the loop.  Verify this now.
533   for (unsigned i = 0, e = ExitBlocks.size(); i != e; ++i)
534     if (!DT->dominates(BB, ExitBlocks[i]))
535       return false;
536 
537   bool MadeChange = false;
538   // Look for store instructions, which may be optimized to memset/memcpy.
539   collectStores(BB);
540 
541   // Look for a single store or sets of stores with a common base, which can be
542   // optimized into a memset (memset_pattern).  The latter most commonly happens
543   // with structs and handunrolled loops.
544   for (auto &SL : StoreRefsForMemset)
545     MadeChange |= processLoopStores(SL.second, BECount, true);
546 
547   for (auto &SL : StoreRefsForMemsetPattern)
548     MadeChange |= processLoopStores(SL.second, BECount, false);
549 
550   // Optimize the store into a memcpy, if it feeds an similarly strided load.
551   for (auto &SI : StoreRefsForMemcpy)
552     MadeChange |= processLoopStoreOfLoopLoad(SI, BECount);
553 
554   for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) {
555     Instruction *Inst = &*I++;
556     // Look for memset instructions, which may be optimized to a larger memset.
557     if (MemSetInst *MSI = dyn_cast<MemSetInst>(Inst)) {
558       WeakTrackingVH InstPtr(&*I);
559       if (!processLoopMemSet(MSI, BECount))
560         continue;
561       MadeChange = true;
562 
563       // If processing the memset invalidated our iterator, start over from the
564       // top of the block.
565       if (!InstPtr)
566         I = BB->begin();
567       continue;
568     }
569   }
570 
571   return MadeChange;
572 }
573 
574 /// processLoopStores - See if this store(s) can be promoted to a memset.
575 bool LoopIdiomRecognize::processLoopStores(SmallVectorImpl<StoreInst *> &SL,
576                                            const SCEV *BECount,
577                                            bool ForMemset) {
578   // Try to find consecutive stores that can be transformed into memsets.
579   SetVector<StoreInst *> Heads, Tails;
580   SmallDenseMap<StoreInst *, StoreInst *> ConsecutiveChain;
581 
582   // Do a quadratic search on all of the given stores and find
583   // all of the pairs of stores that follow each other.
584   SmallVector<unsigned, 16> IndexQueue;
585   for (unsigned i = 0, e = SL.size(); i < e; ++i) {
586     assert(SL[i]->isSimple() && "Expected only non-volatile stores.");
587 
588     Value *FirstStoredVal = SL[i]->getValueOperand();
589     Value *FirstStorePtr = SL[i]->getPointerOperand();
590     const SCEVAddRecExpr *FirstStoreEv =
591         cast<SCEVAddRecExpr>(SE->getSCEV(FirstStorePtr));
592     APInt FirstStride = getStoreStride(FirstStoreEv);
593     unsigned FirstStoreSize = DL->getTypeStoreSize(SL[i]->getValueOperand()->getType());
594 
595     // See if we can optimize just this store in isolation.
596     if (FirstStride == FirstStoreSize || -FirstStride == FirstStoreSize) {
597       Heads.insert(SL[i]);
598       continue;
599     }
600 
601     Value *FirstSplatValue = nullptr;
602     Constant *FirstPatternValue = nullptr;
603 
604     if (ForMemset)
605       FirstSplatValue = isBytewiseValue(FirstStoredVal);
606     else
607       FirstPatternValue = getMemSetPatternValue(FirstStoredVal, DL);
608 
609     assert((FirstSplatValue || FirstPatternValue) &&
610            "Expected either splat value or pattern value.");
611 
612     IndexQueue.clear();
613     // If a store has multiple consecutive store candidates, search Stores
614     // array according to the sequence: from i+1 to e, then from i-1 to 0.
615     // This is because usually pairing with immediate succeeding or preceding
616     // candidate create the best chance to find memset opportunity.
617     unsigned j = 0;
618     for (j = i + 1; j < e; ++j)
619       IndexQueue.push_back(j);
620     for (j = i; j > 0; --j)
621       IndexQueue.push_back(j - 1);
622 
623     for (auto &k : IndexQueue) {
624       assert(SL[k]->isSimple() && "Expected only non-volatile stores.");
625       Value *SecondStorePtr = SL[k]->getPointerOperand();
626       const SCEVAddRecExpr *SecondStoreEv =
627           cast<SCEVAddRecExpr>(SE->getSCEV(SecondStorePtr));
628       APInt SecondStride = getStoreStride(SecondStoreEv);
629 
630       if (FirstStride != SecondStride)
631         continue;
632 
633       Value *SecondStoredVal = SL[k]->getValueOperand();
634       Value *SecondSplatValue = nullptr;
635       Constant *SecondPatternValue = nullptr;
636 
637       if (ForMemset)
638         SecondSplatValue = isBytewiseValue(SecondStoredVal);
639       else
640         SecondPatternValue = getMemSetPatternValue(SecondStoredVal, DL);
641 
642       assert((SecondSplatValue || SecondPatternValue) &&
643              "Expected either splat value or pattern value.");
644 
645       if (isConsecutiveAccess(SL[i], SL[k], *DL, *SE, false)) {
646         if (ForMemset) {
647           if (FirstSplatValue != SecondSplatValue)
648             continue;
649         } else {
650           if (FirstPatternValue != SecondPatternValue)
651             continue;
652         }
653         Tails.insert(SL[k]);
654         Heads.insert(SL[i]);
655         ConsecutiveChain[SL[i]] = SL[k];
656         break;
657       }
658     }
659   }
660 
661   // We may run into multiple chains that merge into a single chain. We mark the
662   // stores that we transformed so that we don't visit the same store twice.
663   SmallPtrSet<Value *, 16> TransformedStores;
664   bool Changed = false;
665 
666   // For stores that start but don't end a link in the chain:
667   for (SetVector<StoreInst *>::iterator it = Heads.begin(), e = Heads.end();
668        it != e; ++it) {
669     if (Tails.count(*it))
670       continue;
671 
672     // We found a store instr that starts a chain. Now follow the chain and try
673     // to transform it.
674     SmallPtrSet<Instruction *, 8> AdjacentStores;
675     StoreInst *I = *it;
676 
677     StoreInst *HeadStore = I;
678     unsigned StoreSize = 0;
679 
680     // Collect the chain into a list.
681     while (Tails.count(I) || Heads.count(I)) {
682       if (TransformedStores.count(I))
683         break;
684       AdjacentStores.insert(I);
685 
686       StoreSize += DL->getTypeStoreSize(I->getValueOperand()->getType());
687       // Move to the next value in the chain.
688       I = ConsecutiveChain[I];
689     }
690 
691     Value *StoredVal = HeadStore->getValueOperand();
692     Value *StorePtr = HeadStore->getPointerOperand();
693     const SCEVAddRecExpr *StoreEv = cast<SCEVAddRecExpr>(SE->getSCEV(StorePtr));
694     APInt Stride = getStoreStride(StoreEv);
695 
696     // Check to see if the stride matches the size of the stores.  If so, then
697     // we know that every byte is touched in the loop.
698     if (StoreSize != Stride && StoreSize != -Stride)
699       continue;
700 
701     bool NegStride = StoreSize == -Stride;
702 
703     if (processLoopStridedStore(StorePtr, StoreSize, HeadStore->getAlignment(),
704                                 StoredVal, HeadStore, AdjacentStores, StoreEv,
705                                 BECount, NegStride)) {
706       TransformedStores.insert(AdjacentStores.begin(), AdjacentStores.end());
707       Changed = true;
708     }
709   }
710 
711   return Changed;
712 }
713 
714 /// processLoopMemSet - See if this memset can be promoted to a large memset.
715 bool LoopIdiomRecognize::processLoopMemSet(MemSetInst *MSI,
716                                            const SCEV *BECount) {
717   // We can only handle non-volatile memsets with a constant size.
718   if (MSI->isVolatile() || !isa<ConstantInt>(MSI->getLength()))
719     return false;
720 
721   // If we're not allowed to hack on memset, we fail.
722   if (!HasMemset)
723     return false;
724 
725   Value *Pointer = MSI->getDest();
726 
727   // See if the pointer expression is an AddRec like {base,+,1} on the current
728   // loop, which indicates a strided store.  If we have something else, it's a
729   // random store we can't handle.
730   const SCEVAddRecExpr *Ev = dyn_cast<SCEVAddRecExpr>(SE->getSCEV(Pointer));
731   if (!Ev || Ev->getLoop() != CurLoop || !Ev->isAffine())
732     return false;
733 
734   // Reject memsets that are so large that they overflow an unsigned.
735   uint64_t SizeInBytes = cast<ConstantInt>(MSI->getLength())->getZExtValue();
736   if ((SizeInBytes >> 32) != 0)
737     return false;
738 
739   // Check to see if the stride matches the size of the memset.  If so, then we
740   // know that every byte is touched in the loop.
741   const SCEVConstant *ConstStride = dyn_cast<SCEVConstant>(Ev->getOperand(1));
742   if (!ConstStride)
743     return false;
744 
745   APInt Stride = ConstStride->getAPInt();
746   if (SizeInBytes != Stride && SizeInBytes != -Stride)
747     return false;
748 
749   // Verify that the memset value is loop invariant.  If not, we can't promote
750   // the memset.
751   Value *SplatValue = MSI->getValue();
752   if (!SplatValue || !CurLoop->isLoopInvariant(SplatValue))
753     return false;
754 
755   SmallPtrSet<Instruction *, 1> MSIs;
756   MSIs.insert(MSI);
757   bool NegStride = SizeInBytes == -Stride;
758   return processLoopStridedStore(Pointer, (unsigned)SizeInBytes,
759                                  MSI->getDestAlignment(), SplatValue, MSI, MSIs,
760                                  Ev, BECount, NegStride, /*IsLoopMemset=*/true);
761 }
762 
763 /// mayLoopAccessLocation - Return true if the specified loop might access the
764 /// specified pointer location, which is a loop-strided access.  The 'Access'
765 /// argument specifies what the verboten forms of access are (read or write).
766 static bool
767 mayLoopAccessLocation(Value *Ptr, ModRefInfo Access, Loop *L,
768                       const SCEV *BECount, unsigned StoreSize,
769                       AliasAnalysis &AA,
770                       SmallPtrSetImpl<Instruction *> &IgnoredStores) {
771   // Get the location that may be stored across the loop.  Since the access is
772   // strided positively through memory, we say that the modified location starts
773   // at the pointer and has infinite size.
774   uint64_t AccessSize = MemoryLocation::UnknownSize;
775 
776   // If the loop iterates a fixed number of times, we can refine the access size
777   // to be exactly the size of the memset, which is (BECount+1)*StoreSize
778   if (const SCEVConstant *BECst = dyn_cast<SCEVConstant>(BECount))
779     AccessSize = (BECst->getValue()->getZExtValue() + 1) * StoreSize;
780 
781   // TODO: For this to be really effective, we have to dive into the pointer
782   // operand in the store.  Store to &A[i] of 100 will always return may alias
783   // with store of &A[100], we need to StoreLoc to be "A" with size of 100,
784   // which will then no-alias a store to &A[100].
785   MemoryLocation StoreLoc(Ptr, AccessSize);
786 
787   for (Loop::block_iterator BI = L->block_begin(), E = L->block_end(); BI != E;
788        ++BI)
789     for (Instruction &I : **BI)
790       if (IgnoredStores.count(&I) == 0 &&
791           isModOrRefSet(
792               intersectModRef(AA.getModRefInfo(&I, StoreLoc), Access)))
793         return true;
794 
795   return false;
796 }
797 
798 // If we have a negative stride, Start refers to the end of the memory location
799 // we're trying to memset.  Therefore, we need to recompute the base pointer,
800 // which is just Start - BECount*Size.
801 static const SCEV *getStartForNegStride(const SCEV *Start, const SCEV *BECount,
802                                         Type *IntPtr, unsigned StoreSize,
803                                         ScalarEvolution *SE) {
804   const SCEV *Index = SE->getTruncateOrZeroExtend(BECount, IntPtr);
805   if (StoreSize != 1)
806     Index = SE->getMulExpr(Index, SE->getConstant(IntPtr, StoreSize),
807                            SCEV::FlagNUW);
808   return SE->getMinusSCEV(Start, Index);
809 }
810 
811 /// Compute the number of bytes as a SCEV from the backedge taken count.
812 ///
813 /// This also maps the SCEV into the provided type and tries to handle the
814 /// computation in a way that will fold cleanly.
815 static const SCEV *getNumBytes(const SCEV *BECount, Type *IntPtr,
816                                unsigned StoreSize, Loop *CurLoop,
817                                const DataLayout *DL, ScalarEvolution *SE) {
818   const SCEV *NumBytesS;
819   // The # stored bytes is (BECount+1)*Size.  Expand the trip count out to
820   // pointer size if it isn't already.
821   //
822   // If we're going to need to zero extend the BE count, check if we can add
823   // one to it prior to zero extending without overflow. Provided this is safe,
824   // it allows better simplification of the +1.
825   if (DL->getTypeSizeInBits(BECount->getType()) <
826           DL->getTypeSizeInBits(IntPtr) &&
827       SE->isLoopEntryGuardedByCond(
828           CurLoop, ICmpInst::ICMP_NE, BECount,
829           SE->getNegativeSCEV(SE->getOne(BECount->getType())))) {
830     NumBytesS = SE->getZeroExtendExpr(
831         SE->getAddExpr(BECount, SE->getOne(BECount->getType()), SCEV::FlagNUW),
832         IntPtr);
833   } else {
834     NumBytesS = SE->getAddExpr(SE->getTruncateOrZeroExtend(BECount, IntPtr),
835                                SE->getOne(IntPtr), SCEV::FlagNUW);
836   }
837 
838   // And scale it based on the store size.
839   if (StoreSize != 1) {
840     NumBytesS = SE->getMulExpr(NumBytesS, SE->getConstant(IntPtr, StoreSize),
841                                SCEV::FlagNUW);
842   }
843   return NumBytesS;
844 }
845 
846 /// processLoopStridedStore - We see a strided store of some value.  If we can
847 /// transform this into a memset or memset_pattern in the loop preheader, do so.
848 bool LoopIdiomRecognize::processLoopStridedStore(
849     Value *DestPtr, unsigned StoreSize, unsigned StoreAlignment,
850     Value *StoredVal, Instruction *TheStore,
851     SmallPtrSetImpl<Instruction *> &Stores, const SCEVAddRecExpr *Ev,
852     const SCEV *BECount, bool NegStride, bool IsLoopMemset) {
853   Value *SplatValue = isBytewiseValue(StoredVal);
854   Constant *PatternValue = nullptr;
855 
856   if (!SplatValue)
857     PatternValue = getMemSetPatternValue(StoredVal, DL);
858 
859   assert((SplatValue || PatternValue) &&
860          "Expected either splat value or pattern value.");
861 
862   // The trip count of the loop and the base pointer of the addrec SCEV is
863   // guaranteed to be loop invariant, which means that it should dominate the
864   // header.  This allows us to insert code for it in the preheader.
865   unsigned DestAS = DestPtr->getType()->getPointerAddressSpace();
866   BasicBlock *Preheader = CurLoop->getLoopPreheader();
867   IRBuilder<> Builder(Preheader->getTerminator());
868   SCEVExpander Expander(*SE, *DL, "loop-idiom");
869 
870   Type *DestInt8PtrTy = Builder.getInt8PtrTy(DestAS);
871   Type *IntPtr = Builder.getIntPtrTy(*DL, DestAS);
872 
873   const SCEV *Start = Ev->getStart();
874   // Handle negative strided loops.
875   if (NegStride)
876     Start = getStartForNegStride(Start, BECount, IntPtr, StoreSize, SE);
877 
878   // TODO: ideally we should still be able to generate memset if SCEV expander
879   // is taught to generate the dependencies at the latest point.
880   if (!isSafeToExpand(Start, *SE))
881     return false;
882 
883   // Okay, we have a strided store "p[i]" of a splattable value.  We can turn
884   // this into a memset in the loop preheader now if we want.  However, this
885   // would be unsafe to do if there is anything else in the loop that may read
886   // or write to the aliased location.  Check for any overlap by generating the
887   // base pointer and checking the region.
888   Value *BasePtr =
889       Expander.expandCodeFor(Start, DestInt8PtrTy, Preheader->getTerminator());
890   if (mayLoopAccessLocation(BasePtr, ModRefInfo::ModRef, CurLoop, BECount,
891                             StoreSize, *AA, Stores)) {
892     Expander.clear();
893     // If we generated new code for the base pointer, clean up.
894     RecursivelyDeleteTriviallyDeadInstructions(BasePtr, TLI);
895     return false;
896   }
897 
898   if (avoidLIRForMultiBlockLoop(/*IsMemset=*/true, IsLoopMemset))
899     return false;
900 
901   // Okay, everything looks good, insert the memset.
902 
903   const SCEV *NumBytesS =
904       getNumBytes(BECount, IntPtr, StoreSize, CurLoop, DL, SE);
905 
906   // TODO: ideally we should still be able to generate memset if SCEV expander
907   // is taught to generate the dependencies at the latest point.
908   if (!isSafeToExpand(NumBytesS, *SE))
909     return false;
910 
911   Value *NumBytes =
912       Expander.expandCodeFor(NumBytesS, IntPtr, Preheader->getTerminator());
913 
914   CallInst *NewCall;
915   if (SplatValue) {
916     NewCall =
917         Builder.CreateMemSet(BasePtr, SplatValue, NumBytes, StoreAlignment);
918   } else {
919     // Everything is emitted in default address space
920     Type *Int8PtrTy = DestInt8PtrTy;
921 
922     Module *M = TheStore->getModule();
923     Value *MSP =
924         M->getOrInsertFunction("memset_pattern16", Builder.getVoidTy(),
925                                Int8PtrTy, Int8PtrTy, IntPtr);
926     inferLibFuncAttributes(*M->getFunction("memset_pattern16"), *TLI);
927 
928     // Otherwise we should form a memset_pattern16.  PatternValue is known to be
929     // an constant array of 16-bytes.  Plop the value into a mergable global.
930     GlobalVariable *GV = new GlobalVariable(*M, PatternValue->getType(), true,
931                                             GlobalValue::PrivateLinkage,
932                                             PatternValue, ".memset_pattern");
933     GV->setUnnamedAddr(GlobalValue::UnnamedAddr::Global); // Ok to merge these.
934     GV->setAlignment(16);
935     Value *PatternPtr = ConstantExpr::getBitCast(GV, Int8PtrTy);
936     NewCall = Builder.CreateCall(MSP, {BasePtr, PatternPtr, NumBytes});
937   }
938 
939   DEBUG(dbgs() << "  Formed memset: " << *NewCall << "\n"
940                << "    from store to: " << *Ev << " at: " << *TheStore << "\n");
941   NewCall->setDebugLoc(TheStore->getDebugLoc());
942 
943   // Okay, the memset has been formed.  Zap the original store and anything that
944   // feeds into it.
945   for (auto *I : Stores)
946     deleteDeadInstruction(I);
947   ++NumMemSet;
948   return true;
949 }
950 
951 /// If the stored value is a strided load in the same loop with the same stride
952 /// this may be transformable into a memcpy.  This kicks in for stuff like
953 /// for (i) A[i] = B[i];
954 bool LoopIdiomRecognize::processLoopStoreOfLoopLoad(StoreInst *SI,
955                                                     const SCEV *BECount) {
956   assert(SI->isUnordered() && "Expected only non-volatile non-ordered stores.");
957 
958   Value *StorePtr = SI->getPointerOperand();
959   const SCEVAddRecExpr *StoreEv = cast<SCEVAddRecExpr>(SE->getSCEV(StorePtr));
960   APInt Stride = getStoreStride(StoreEv);
961   unsigned StoreSize = DL->getTypeStoreSize(SI->getValueOperand()->getType());
962   bool NegStride = StoreSize == -Stride;
963 
964   // The store must be feeding a non-volatile load.
965   LoadInst *LI = cast<LoadInst>(SI->getValueOperand());
966   assert(LI->isUnordered() && "Expected only non-volatile non-ordered loads.");
967 
968   // See if the pointer expression is an AddRec like {base,+,1} on the current
969   // loop, which indicates a strided load.  If we have something else, it's a
970   // random load we can't handle.
971   const SCEVAddRecExpr *LoadEv =
972       cast<SCEVAddRecExpr>(SE->getSCEV(LI->getPointerOperand()));
973 
974   // The trip count of the loop and the base pointer of the addrec SCEV is
975   // guaranteed to be loop invariant, which means that it should dominate the
976   // header.  This allows us to insert code for it in the preheader.
977   BasicBlock *Preheader = CurLoop->getLoopPreheader();
978   IRBuilder<> Builder(Preheader->getTerminator());
979   SCEVExpander Expander(*SE, *DL, "loop-idiom");
980 
981   const SCEV *StrStart = StoreEv->getStart();
982   unsigned StrAS = SI->getPointerAddressSpace();
983   Type *IntPtrTy = Builder.getIntPtrTy(*DL, StrAS);
984 
985   // Handle negative strided loops.
986   if (NegStride)
987     StrStart = getStartForNegStride(StrStart, BECount, IntPtrTy, StoreSize, SE);
988 
989   // Okay, we have a strided store "p[i]" of a loaded value.  We can turn
990   // this into a memcpy in the loop preheader now if we want.  However, this
991   // would be unsafe to do if there is anything else in the loop that may read
992   // or write the memory region we're storing to.  This includes the load that
993   // feeds the stores.  Check for an alias by generating the base address and
994   // checking everything.
995   Value *StoreBasePtr = Expander.expandCodeFor(
996       StrStart, Builder.getInt8PtrTy(StrAS), Preheader->getTerminator());
997 
998   SmallPtrSet<Instruction *, 1> Stores;
999   Stores.insert(SI);
1000   if (mayLoopAccessLocation(StoreBasePtr, ModRefInfo::ModRef, CurLoop, BECount,
1001                             StoreSize, *AA, Stores)) {
1002     Expander.clear();
1003     // If we generated new code for the base pointer, clean up.
1004     RecursivelyDeleteTriviallyDeadInstructions(StoreBasePtr, TLI);
1005     return false;
1006   }
1007 
1008   const SCEV *LdStart = LoadEv->getStart();
1009   unsigned LdAS = LI->getPointerAddressSpace();
1010 
1011   // Handle negative strided loops.
1012   if (NegStride)
1013     LdStart = getStartForNegStride(LdStart, BECount, IntPtrTy, StoreSize, SE);
1014 
1015   // For a memcpy, we have to make sure that the input array is not being
1016   // mutated by the loop.
1017   Value *LoadBasePtr = Expander.expandCodeFor(
1018       LdStart, Builder.getInt8PtrTy(LdAS), Preheader->getTerminator());
1019 
1020   if (mayLoopAccessLocation(LoadBasePtr, ModRefInfo::Mod, CurLoop, BECount,
1021                             StoreSize, *AA, Stores)) {
1022     Expander.clear();
1023     // If we generated new code for the base pointer, clean up.
1024     RecursivelyDeleteTriviallyDeadInstructions(LoadBasePtr, TLI);
1025     RecursivelyDeleteTriviallyDeadInstructions(StoreBasePtr, TLI);
1026     return false;
1027   }
1028 
1029   if (avoidLIRForMultiBlockLoop())
1030     return false;
1031 
1032   // Okay, everything is safe, we can transform this!
1033 
1034   const SCEV *NumBytesS =
1035       getNumBytes(BECount, IntPtrTy, StoreSize, CurLoop, DL, SE);
1036 
1037   Value *NumBytes =
1038       Expander.expandCodeFor(NumBytesS, IntPtrTy, Preheader->getTerminator());
1039 
1040   CallInst *NewCall = nullptr;
1041   // Check whether to generate an unordered atomic memcpy:
1042   //  If the load or store are atomic, then they must neccessarily be unordered
1043   //  by previous checks.
1044   if (!SI->isAtomic() && !LI->isAtomic())
1045     NewCall = Builder.CreateMemCpy(StoreBasePtr, SI->getAlignment(),
1046                                    LoadBasePtr, LI->getAlignment(), NumBytes);
1047   else {
1048     // We cannot allow unaligned ops for unordered load/store, so reject
1049     // anything where the alignment isn't at least the element size.
1050     unsigned Align = std::min(SI->getAlignment(), LI->getAlignment());
1051     if (Align < StoreSize)
1052       return false;
1053 
1054     // If the element.atomic memcpy is not lowered into explicit
1055     // loads/stores later, then it will be lowered into an element-size
1056     // specific lib call. If the lib call doesn't exist for our store size, then
1057     // we shouldn't generate the memcpy.
1058     if (StoreSize > TTI->getAtomicMemIntrinsicMaxElementSize())
1059       return false;
1060 
1061     // Create the call.
1062     // Note that unordered atomic loads/stores are *required* by the spec to
1063     // have an alignment but non-atomic loads/stores may not.
1064     NewCall = Builder.CreateElementUnorderedAtomicMemCpy(
1065         StoreBasePtr, SI->getAlignment(), LoadBasePtr, LI->getAlignment(),
1066         NumBytes, StoreSize);
1067   }
1068   NewCall->setDebugLoc(SI->getDebugLoc());
1069 
1070   DEBUG(dbgs() << "  Formed memcpy: " << *NewCall << "\n"
1071                << "    from load ptr=" << *LoadEv << " at: " << *LI << "\n"
1072                << "    from store ptr=" << *StoreEv << " at: " << *SI << "\n");
1073 
1074   // Okay, the memcpy has been formed.  Zap the original store and anything that
1075   // feeds into it.
1076   deleteDeadInstruction(SI);
1077   ++NumMemCpy;
1078   return true;
1079 }
1080 
1081 // When compiling for codesize we avoid idiom recognition for a multi-block loop
1082 // unless it is a loop_memset idiom or a memset/memcpy idiom in a nested loop.
1083 //
1084 bool LoopIdiomRecognize::avoidLIRForMultiBlockLoop(bool IsMemset,
1085                                                    bool IsLoopMemset) {
1086   if (ApplyCodeSizeHeuristics && CurLoop->getNumBlocks() > 1) {
1087     if (!CurLoop->getParentLoop() && (!IsMemset || !IsLoopMemset)) {
1088       DEBUG(dbgs() << "  " << CurLoop->getHeader()->getParent()->getName()
1089                    << " : LIR " << (IsMemset ? "Memset" : "Memcpy")
1090                    << " avoided: multi-block top-level loop\n");
1091       return true;
1092     }
1093   }
1094 
1095   return false;
1096 }
1097 
1098 bool LoopIdiomRecognize::runOnNoncountableLoop() {
1099   return recognizePopcount() || recognizeAndInsertCTLZ();
1100 }
1101 
1102 /// Check if the given conditional branch is based on the comparison between
1103 /// a variable and zero, and if the variable is non-zero, the control yields to
1104 /// the loop entry. If the branch matches the behavior, the variable involved
1105 /// in the comparison is returned. This function will be called to see if the
1106 /// precondition and postcondition of the loop are in desirable form.
1107 static Value *matchCondition(BranchInst *BI, BasicBlock *LoopEntry) {
1108   if (!BI || !BI->isConditional())
1109     return nullptr;
1110 
1111   ICmpInst *Cond = dyn_cast<ICmpInst>(BI->getCondition());
1112   if (!Cond)
1113     return nullptr;
1114 
1115   ConstantInt *CmpZero = dyn_cast<ConstantInt>(Cond->getOperand(1));
1116   if (!CmpZero || !CmpZero->isZero())
1117     return nullptr;
1118 
1119   ICmpInst::Predicate Pred = Cond->getPredicate();
1120   if ((Pred == ICmpInst::ICMP_NE && BI->getSuccessor(0) == LoopEntry) ||
1121       (Pred == ICmpInst::ICMP_EQ && BI->getSuccessor(1) == LoopEntry))
1122     return Cond->getOperand(0);
1123 
1124   return nullptr;
1125 }
1126 
1127 // Check if the recurrence variable `VarX` is in the right form to create
1128 // the idiom. Returns the value coerced to a PHINode if so.
1129 static PHINode *getRecurrenceVar(Value *VarX, Instruction *DefX,
1130                                  BasicBlock *LoopEntry) {
1131   auto *PhiX = dyn_cast<PHINode>(VarX);
1132   if (PhiX && PhiX->getParent() == LoopEntry &&
1133       (PhiX->getOperand(0) == DefX || PhiX->getOperand(1) == DefX))
1134     return PhiX;
1135   return nullptr;
1136 }
1137 
1138 /// Return true iff the idiom is detected in the loop.
1139 ///
1140 /// Additionally:
1141 /// 1) \p CntInst is set to the instruction counting the population bit.
1142 /// 2) \p CntPhi is set to the corresponding phi node.
1143 /// 3) \p Var is set to the value whose population bits are being counted.
1144 ///
1145 /// The core idiom we are trying to detect is:
1146 /// \code
1147 ///    if (x0 != 0)
1148 ///      goto loop-exit // the precondition of the loop
1149 ///    cnt0 = init-val;
1150 ///    do {
1151 ///       x1 = phi (x0, x2);
1152 ///       cnt1 = phi(cnt0, cnt2);
1153 ///
1154 ///       cnt2 = cnt1 + 1;
1155 ///        ...
1156 ///       x2 = x1 & (x1 - 1);
1157 ///        ...
1158 ///    } while(x != 0);
1159 ///
1160 /// loop-exit:
1161 /// \endcode
1162 static bool detectPopcountIdiom(Loop *CurLoop, BasicBlock *PreCondBB,
1163                                 Instruction *&CntInst, PHINode *&CntPhi,
1164                                 Value *&Var) {
1165   // step 1: Check to see if the look-back branch match this pattern:
1166   //    "if (a!=0) goto loop-entry".
1167   BasicBlock *LoopEntry;
1168   Instruction *DefX2, *CountInst;
1169   Value *VarX1, *VarX0;
1170   PHINode *PhiX, *CountPhi;
1171 
1172   DefX2 = CountInst = nullptr;
1173   VarX1 = VarX0 = nullptr;
1174   PhiX = CountPhi = nullptr;
1175   LoopEntry = *(CurLoop->block_begin());
1176 
1177   // step 1: Check if the loop-back branch is in desirable form.
1178   {
1179     if (Value *T = matchCondition(
1180             dyn_cast<BranchInst>(LoopEntry->getTerminator()), LoopEntry))
1181       DefX2 = dyn_cast<Instruction>(T);
1182     else
1183       return false;
1184   }
1185 
1186   // step 2: detect instructions corresponding to "x2 = x1 & (x1 - 1)"
1187   {
1188     if (!DefX2 || DefX2->getOpcode() != Instruction::And)
1189       return false;
1190 
1191     BinaryOperator *SubOneOp;
1192 
1193     if ((SubOneOp = dyn_cast<BinaryOperator>(DefX2->getOperand(0))))
1194       VarX1 = DefX2->getOperand(1);
1195     else {
1196       VarX1 = DefX2->getOperand(0);
1197       SubOneOp = dyn_cast<BinaryOperator>(DefX2->getOperand(1));
1198     }
1199     if (!SubOneOp)
1200       return false;
1201 
1202     Instruction *SubInst = cast<Instruction>(SubOneOp);
1203     ConstantInt *Dec = dyn_cast<ConstantInt>(SubInst->getOperand(1));
1204     if (!Dec ||
1205         !((SubInst->getOpcode() == Instruction::Sub && Dec->isOne()) ||
1206           (SubInst->getOpcode() == Instruction::Add &&
1207            Dec->isMinusOne()))) {
1208       return false;
1209     }
1210   }
1211 
1212   // step 3: Check the recurrence of variable X
1213   PhiX = getRecurrenceVar(VarX1, DefX2, LoopEntry);
1214   if (!PhiX)
1215     return false;
1216 
1217   // step 4: Find the instruction which count the population: cnt2 = cnt1 + 1
1218   {
1219     CountInst = nullptr;
1220     for (BasicBlock::iterator Iter = LoopEntry->getFirstNonPHI()->getIterator(),
1221                               IterE = LoopEntry->end();
1222          Iter != IterE; Iter++) {
1223       Instruction *Inst = &*Iter;
1224       if (Inst->getOpcode() != Instruction::Add)
1225         continue;
1226 
1227       ConstantInt *Inc = dyn_cast<ConstantInt>(Inst->getOperand(1));
1228       if (!Inc || !Inc->isOne())
1229         continue;
1230 
1231       PHINode *Phi = getRecurrenceVar(Inst->getOperand(0), Inst, LoopEntry);
1232       if (!Phi)
1233         continue;
1234 
1235       // Check if the result of the instruction is live of the loop.
1236       bool LiveOutLoop = false;
1237       for (User *U : Inst->users()) {
1238         if ((cast<Instruction>(U))->getParent() != LoopEntry) {
1239           LiveOutLoop = true;
1240           break;
1241         }
1242       }
1243 
1244       if (LiveOutLoop) {
1245         CountInst = Inst;
1246         CountPhi = Phi;
1247         break;
1248       }
1249     }
1250 
1251     if (!CountInst)
1252       return false;
1253   }
1254 
1255   // step 5: check if the precondition is in this form:
1256   //   "if (x != 0) goto loop-head ; else goto somewhere-we-don't-care;"
1257   {
1258     auto *PreCondBr = dyn_cast<BranchInst>(PreCondBB->getTerminator());
1259     Value *T = matchCondition(PreCondBr, CurLoop->getLoopPreheader());
1260     if (T != PhiX->getOperand(0) && T != PhiX->getOperand(1))
1261       return false;
1262 
1263     CntInst = CountInst;
1264     CntPhi = CountPhi;
1265     Var = T;
1266   }
1267 
1268   return true;
1269 }
1270 
1271 /// Return true if the idiom is detected in the loop.
1272 ///
1273 /// Additionally:
1274 /// 1) \p CntInst is set to the instruction Counting Leading Zeros (CTLZ)
1275 ///       or nullptr if there is no such.
1276 /// 2) \p CntPhi is set to the corresponding phi node
1277 ///       or nullptr if there is no such.
1278 /// 3) \p Var is set to the value whose CTLZ could be used.
1279 /// 4) \p DefX is set to the instruction calculating Loop exit condition.
1280 ///
1281 /// The core idiom we are trying to detect is:
1282 /// \code
1283 ///    if (x0 == 0)
1284 ///      goto loop-exit // the precondition of the loop
1285 ///    cnt0 = init-val;
1286 ///    do {
1287 ///       x = phi (x0, x.next);   //PhiX
1288 ///       cnt = phi(cnt0, cnt.next);
1289 ///
1290 ///       cnt.next = cnt + 1;
1291 ///        ...
1292 ///       x.next = x >> 1;   // DefX
1293 ///        ...
1294 ///    } while(x.next != 0);
1295 ///
1296 /// loop-exit:
1297 /// \endcode
1298 static bool detectCTLZIdiom(Loop *CurLoop, PHINode *&PhiX,
1299                             Instruction *&CntInst, PHINode *&CntPhi,
1300                             Instruction *&DefX) {
1301   BasicBlock *LoopEntry;
1302   Value *VarX = nullptr;
1303 
1304   DefX = nullptr;
1305   PhiX = nullptr;
1306   CntInst = nullptr;
1307   CntPhi = nullptr;
1308   LoopEntry = *(CurLoop->block_begin());
1309 
1310   // step 1: Check if the loop-back branch is in desirable form.
1311   if (Value *T = matchCondition(
1312           dyn_cast<BranchInst>(LoopEntry->getTerminator()), LoopEntry))
1313     DefX = dyn_cast<Instruction>(T);
1314   else
1315     return false;
1316 
1317   // step 2: detect instructions corresponding to "x.next = x >> 1"
1318   if (!DefX || DefX->getOpcode() != Instruction::AShr)
1319     return false;
1320   ConstantInt *Shft = dyn_cast<ConstantInt>(DefX->getOperand(1));
1321   if (!Shft || !Shft->isOne())
1322     return false;
1323   VarX = DefX->getOperand(0);
1324 
1325   // step 3: Check the recurrence of variable X
1326   PhiX = getRecurrenceVar(VarX, DefX, LoopEntry);
1327   if (!PhiX)
1328     return false;
1329 
1330   // step 4: Find the instruction which count the CTLZ: cnt.next = cnt + 1
1331   // TODO: We can skip the step. If loop trip count is known (CTLZ),
1332   //       then all uses of "cnt.next" could be optimized to the trip count
1333   //       plus "cnt0". Currently it is not optimized.
1334   //       This step could be used to detect POPCNT instruction:
1335   //       cnt.next = cnt + (x.next & 1)
1336   for (BasicBlock::iterator Iter = LoopEntry->getFirstNonPHI()->getIterator(),
1337                             IterE = LoopEntry->end();
1338        Iter != IterE; Iter++) {
1339     Instruction *Inst = &*Iter;
1340     if (Inst->getOpcode() != Instruction::Add)
1341       continue;
1342 
1343     ConstantInt *Inc = dyn_cast<ConstantInt>(Inst->getOperand(1));
1344     if (!Inc || !Inc->isOne())
1345       continue;
1346 
1347     PHINode *Phi = getRecurrenceVar(Inst->getOperand(0), Inst, LoopEntry);
1348     if (!Phi)
1349       continue;
1350 
1351     CntInst = Inst;
1352     CntPhi = Phi;
1353     break;
1354   }
1355   if (!CntInst)
1356     return false;
1357 
1358   return true;
1359 }
1360 
1361 /// Recognize CTLZ idiom in a non-countable loop and convert the loop
1362 /// to countable (with CTLZ trip count).
1363 /// If CTLZ inserted as a new trip count returns true; otherwise, returns false.
1364 bool LoopIdiomRecognize::recognizeAndInsertCTLZ() {
1365   // Give up if the loop has multiple blocks or multiple backedges.
1366   if (CurLoop->getNumBackEdges() != 1 || CurLoop->getNumBlocks() != 1)
1367     return false;
1368 
1369   Instruction *CntInst, *DefX;
1370   PHINode *CntPhi, *PhiX;
1371   if (!detectCTLZIdiom(CurLoop, PhiX, CntInst, CntPhi, DefX))
1372     return false;
1373 
1374   bool IsCntPhiUsedOutsideLoop = false;
1375   for (User *U : CntPhi->users())
1376     if (!CurLoop->contains(dyn_cast<Instruction>(U))) {
1377       IsCntPhiUsedOutsideLoop = true;
1378       break;
1379     }
1380   bool IsCntInstUsedOutsideLoop = false;
1381   for (User *U : CntInst->users())
1382     if (!CurLoop->contains(dyn_cast<Instruction>(U))) {
1383       IsCntInstUsedOutsideLoop = true;
1384       break;
1385     }
1386   // If both CntInst and CntPhi are used outside the loop the profitability
1387   // is questionable.
1388   if (IsCntInstUsedOutsideLoop && IsCntPhiUsedOutsideLoop)
1389     return false;
1390 
1391   // For some CPUs result of CTLZ(X) intrinsic is undefined
1392   // when X is 0. If we can not guarantee X != 0, we need to check this
1393   // when expand.
1394   bool ZeroCheck = false;
1395   // It is safe to assume Preheader exist as it was checked in
1396   // parent function RunOnLoop.
1397   BasicBlock *PH = CurLoop->getLoopPreheader();
1398   Value *InitX = PhiX->getIncomingValueForBlock(PH);
1399   // If we check X != 0 before entering the loop we don't need a zero
1400   // check in CTLZ intrinsic, but only if Cnt Phi is not used outside of the
1401   // loop (if it is used we count CTLZ(X >> 1)).
1402   if (!IsCntPhiUsedOutsideLoop)
1403     if (BasicBlock *PreCondBB = PH->getSinglePredecessor())
1404       if (BranchInst *PreCondBr =
1405           dyn_cast<BranchInst>(PreCondBB->getTerminator())) {
1406         if (matchCondition(PreCondBr, PH) == InitX)
1407           ZeroCheck = true;
1408       }
1409 
1410   // Check if CTLZ intrinsic is profitable. Assume it is always profitable
1411   // if we delete the loop (the loop has only 6 instructions):
1412   //  %n.addr.0 = phi [ %n, %entry ], [ %shr, %while.cond ]
1413   //  %i.0 = phi [ %i0, %entry ], [ %inc, %while.cond ]
1414   //  %shr = ashr %n.addr.0, 1
1415   //  %tobool = icmp eq %shr, 0
1416   //  %inc = add nsw %i.0, 1
1417   //  br i1 %tobool
1418 
1419   IRBuilder<> Builder(PH->getTerminator());
1420   SmallVector<const Value *, 2> Ops =
1421       {InitX, ZeroCheck ? Builder.getTrue() : Builder.getFalse()};
1422   ArrayRef<const Value *> Args(Ops);
1423   if (CurLoop->getHeader()->size() != 6 &&
1424       TTI->getIntrinsicCost(Intrinsic::ctlz, InitX->getType(), Args) >
1425           TargetTransformInfo::TCC_Basic)
1426     return false;
1427 
1428   const DebugLoc DL = DefX->getDebugLoc();
1429   transformLoopToCountable(PH, CntInst, CntPhi, InitX, DL, ZeroCheck,
1430                            IsCntPhiUsedOutsideLoop);
1431   return true;
1432 }
1433 
1434 /// Recognizes a population count idiom in a non-countable loop.
1435 ///
1436 /// If detected, transforms the relevant code to issue the popcount intrinsic
1437 /// function call, and returns true; otherwise, returns false.
1438 bool LoopIdiomRecognize::recognizePopcount() {
1439   if (TTI->getPopcntSupport(32) != TargetTransformInfo::PSK_FastHardware)
1440     return false;
1441 
1442   // Counting population are usually conducted by few arithmetic instructions.
1443   // Such instructions can be easily "absorbed" by vacant slots in a
1444   // non-compact loop. Therefore, recognizing popcount idiom only makes sense
1445   // in a compact loop.
1446 
1447   // Give up if the loop has multiple blocks or multiple backedges.
1448   if (CurLoop->getNumBackEdges() != 1 || CurLoop->getNumBlocks() != 1)
1449     return false;
1450 
1451   BasicBlock *LoopBody = *(CurLoop->block_begin());
1452   if (LoopBody->size() >= 20) {
1453     // The loop is too big, bail out.
1454     return false;
1455   }
1456 
1457   // It should have a preheader containing nothing but an unconditional branch.
1458   BasicBlock *PH = CurLoop->getLoopPreheader();
1459   if (!PH || &PH->front() != PH->getTerminator())
1460     return false;
1461   auto *EntryBI = dyn_cast<BranchInst>(PH->getTerminator());
1462   if (!EntryBI || EntryBI->isConditional())
1463     return false;
1464 
1465   // It should have a precondition block where the generated popcount instrinsic
1466   // function can be inserted.
1467   auto *PreCondBB = PH->getSinglePredecessor();
1468   if (!PreCondBB)
1469     return false;
1470   auto *PreCondBI = dyn_cast<BranchInst>(PreCondBB->getTerminator());
1471   if (!PreCondBI || PreCondBI->isUnconditional())
1472     return false;
1473 
1474   Instruction *CntInst;
1475   PHINode *CntPhi;
1476   Value *Val;
1477   if (!detectPopcountIdiom(CurLoop, PreCondBB, CntInst, CntPhi, Val))
1478     return false;
1479 
1480   transformLoopToPopcount(PreCondBB, CntInst, CntPhi, Val);
1481   return true;
1482 }
1483 
1484 static CallInst *createPopcntIntrinsic(IRBuilder<> &IRBuilder, Value *Val,
1485                                        const DebugLoc &DL) {
1486   Value *Ops[] = {Val};
1487   Type *Tys[] = {Val->getType()};
1488 
1489   Module *M = IRBuilder.GetInsertBlock()->getParent()->getParent();
1490   Value *Func = Intrinsic::getDeclaration(M, Intrinsic::ctpop, Tys);
1491   CallInst *CI = IRBuilder.CreateCall(Func, Ops);
1492   CI->setDebugLoc(DL);
1493 
1494   return CI;
1495 }
1496 
1497 static CallInst *createCTLZIntrinsic(IRBuilder<> &IRBuilder, Value *Val,
1498                                      const DebugLoc &DL, bool ZeroCheck) {
1499   Value *Ops[] = {Val, ZeroCheck ? IRBuilder.getTrue() : IRBuilder.getFalse()};
1500   Type *Tys[] = {Val->getType()};
1501 
1502   Module *M = IRBuilder.GetInsertBlock()->getParent()->getParent();
1503   Value *Func = Intrinsic::getDeclaration(M, Intrinsic::ctlz, Tys);
1504   CallInst *CI = IRBuilder.CreateCall(Func, Ops);
1505   CI->setDebugLoc(DL);
1506 
1507   return CI;
1508 }
1509 
1510 /// Transform the following loop:
1511 /// loop:
1512 ///   CntPhi = PHI [Cnt0, CntInst]
1513 ///   PhiX = PHI [InitX, DefX]
1514 ///   CntInst = CntPhi + 1
1515 ///   DefX = PhiX >> 1
1516 ///   LOOP_BODY
1517 ///   Br: loop if (DefX != 0)
1518 /// Use(CntPhi) or Use(CntInst)
1519 ///
1520 /// Into:
1521 /// If CntPhi used outside the loop:
1522 ///   CountPrev = BitWidth(InitX) - CTLZ(InitX >> 1)
1523 ///   Count = CountPrev + 1
1524 /// else
1525 ///   Count = BitWidth(InitX) - CTLZ(InitX)
1526 /// loop:
1527 ///   CntPhi = PHI [Cnt0, CntInst]
1528 ///   PhiX = PHI [InitX, DefX]
1529 ///   PhiCount = PHI [Count, Dec]
1530 ///   CntInst = CntPhi + 1
1531 ///   DefX = PhiX >> 1
1532 ///   Dec = PhiCount - 1
1533 ///   LOOP_BODY
1534 ///   Br: loop if (Dec != 0)
1535 /// Use(CountPrev + Cnt0) // Use(CntPhi)
1536 /// or
1537 /// Use(Count + Cnt0) // Use(CntInst)
1538 ///
1539 /// If LOOP_BODY is empty the loop will be deleted.
1540 /// If CntInst and DefX are not used in LOOP_BODY they will be removed.
1541 void LoopIdiomRecognize::transformLoopToCountable(
1542     BasicBlock *Preheader, Instruction *CntInst, PHINode *CntPhi, Value *InitX,
1543     const DebugLoc DL, bool ZeroCheck, bool IsCntPhiUsedOutsideLoop) {
1544   BranchInst *PreheaderBr = dyn_cast<BranchInst>(Preheader->getTerminator());
1545 
1546   // Step 1: Insert the CTLZ instruction at the end of the preheader block
1547   //   Count = BitWidth - CTLZ(InitX);
1548   // If there are uses of CntPhi create:
1549   //   CountPrev = BitWidth - CTLZ(InitX >> 1);
1550   IRBuilder<> Builder(PreheaderBr);
1551   Builder.SetCurrentDebugLocation(DL);
1552   Value *CTLZ, *Count, *CountPrev, *NewCount, *InitXNext;
1553 
1554   if (IsCntPhiUsedOutsideLoop)
1555     InitXNext = Builder.CreateAShr(InitX,
1556                                    ConstantInt::get(InitX->getType(), 1));
1557   else
1558     InitXNext = InitX;
1559   CTLZ = createCTLZIntrinsic(Builder, InitXNext, DL, ZeroCheck);
1560   Count = Builder.CreateSub(
1561       ConstantInt::get(CTLZ->getType(),
1562                        CTLZ->getType()->getIntegerBitWidth()),
1563       CTLZ);
1564   if (IsCntPhiUsedOutsideLoop) {
1565     CountPrev = Count;
1566     Count = Builder.CreateAdd(
1567         CountPrev,
1568         ConstantInt::get(CountPrev->getType(), 1));
1569   }
1570   if (IsCntPhiUsedOutsideLoop)
1571     NewCount = Builder.CreateZExtOrTrunc(CountPrev,
1572         cast<IntegerType>(CntInst->getType()));
1573   else
1574     NewCount = Builder.CreateZExtOrTrunc(Count,
1575         cast<IntegerType>(CntInst->getType()));
1576 
1577   // If the CTLZ counter's initial value is not zero, insert Add Inst.
1578   Value *CntInitVal = CntPhi->getIncomingValueForBlock(Preheader);
1579   ConstantInt *InitConst = dyn_cast<ConstantInt>(CntInitVal);
1580   if (!InitConst || !InitConst->isZero())
1581     NewCount = Builder.CreateAdd(NewCount, CntInitVal);
1582 
1583   // Step 2: Insert new IV and loop condition:
1584   // loop:
1585   //   ...
1586   //   PhiCount = PHI [Count, Dec]
1587   //   ...
1588   //   Dec = PhiCount - 1
1589   //   ...
1590   //   Br: loop if (Dec != 0)
1591   BasicBlock *Body = *(CurLoop->block_begin());
1592   auto *LbBr = dyn_cast<BranchInst>(Body->getTerminator());
1593   ICmpInst *LbCond = cast<ICmpInst>(LbBr->getCondition());
1594   Type *Ty = Count->getType();
1595 
1596   PHINode *TcPhi = PHINode::Create(Ty, 2, "tcphi", &Body->front());
1597 
1598   Builder.SetInsertPoint(LbCond);
1599   Instruction *TcDec = cast<Instruction>(
1600       Builder.CreateSub(TcPhi, ConstantInt::get(Ty, 1),
1601                         "tcdec", false, true));
1602 
1603   TcPhi->addIncoming(Count, Preheader);
1604   TcPhi->addIncoming(TcDec, Body);
1605 
1606   CmpInst::Predicate Pred =
1607       (LbBr->getSuccessor(0) == Body) ? CmpInst::ICMP_NE : CmpInst::ICMP_EQ;
1608   LbCond->setPredicate(Pred);
1609   LbCond->setOperand(0, TcDec);
1610   LbCond->setOperand(1, ConstantInt::get(Ty, 0));
1611 
1612   // Step 3: All the references to the original counter outside
1613   //  the loop are replaced with the NewCount -- the value returned from
1614   //  __builtin_ctlz(x).
1615   if (IsCntPhiUsedOutsideLoop)
1616     CntPhi->replaceUsesOutsideBlock(NewCount, Body);
1617   else
1618     CntInst->replaceUsesOutsideBlock(NewCount, Body);
1619 
1620   // step 4: Forget the "non-computable" trip-count SCEV associated with the
1621   //   loop. The loop would otherwise not be deleted even if it becomes empty.
1622   SE->forgetLoop(CurLoop);
1623 }
1624 
1625 void LoopIdiomRecognize::transformLoopToPopcount(BasicBlock *PreCondBB,
1626                                                  Instruction *CntInst,
1627                                                  PHINode *CntPhi, Value *Var) {
1628   BasicBlock *PreHead = CurLoop->getLoopPreheader();
1629   auto *PreCondBr = dyn_cast<BranchInst>(PreCondBB->getTerminator());
1630   const DebugLoc DL = CntInst->getDebugLoc();
1631 
1632   // Assuming before transformation, the loop is following:
1633   //  if (x) // the precondition
1634   //     do { cnt++; x &= x - 1; } while(x);
1635 
1636   // Step 1: Insert the ctpop instruction at the end of the precondition block
1637   IRBuilder<> Builder(PreCondBr);
1638   Value *PopCnt, *PopCntZext, *NewCount, *TripCnt;
1639   {
1640     PopCnt = createPopcntIntrinsic(Builder, Var, DL);
1641     NewCount = PopCntZext =
1642         Builder.CreateZExtOrTrunc(PopCnt, cast<IntegerType>(CntPhi->getType()));
1643 
1644     if (NewCount != PopCnt)
1645       (cast<Instruction>(NewCount))->setDebugLoc(DL);
1646 
1647     // TripCnt is exactly the number of iterations the loop has
1648     TripCnt = NewCount;
1649 
1650     // If the population counter's initial value is not zero, insert Add Inst.
1651     Value *CntInitVal = CntPhi->getIncomingValueForBlock(PreHead);
1652     ConstantInt *InitConst = dyn_cast<ConstantInt>(CntInitVal);
1653     if (!InitConst || !InitConst->isZero()) {
1654       NewCount = Builder.CreateAdd(NewCount, CntInitVal);
1655       (cast<Instruction>(NewCount))->setDebugLoc(DL);
1656     }
1657   }
1658 
1659   // Step 2: Replace the precondition from "if (x == 0) goto loop-exit" to
1660   //   "if (NewCount == 0) loop-exit". Without this change, the intrinsic
1661   //   function would be partial dead code, and downstream passes will drag
1662   //   it back from the precondition block to the preheader.
1663   {
1664     ICmpInst *PreCond = cast<ICmpInst>(PreCondBr->getCondition());
1665 
1666     Value *Opnd0 = PopCntZext;
1667     Value *Opnd1 = ConstantInt::get(PopCntZext->getType(), 0);
1668     if (PreCond->getOperand(0) != Var)
1669       std::swap(Opnd0, Opnd1);
1670 
1671     ICmpInst *NewPreCond = cast<ICmpInst>(
1672         Builder.CreateICmp(PreCond->getPredicate(), Opnd0, Opnd1));
1673     PreCondBr->setCondition(NewPreCond);
1674 
1675     RecursivelyDeleteTriviallyDeadInstructions(PreCond, TLI);
1676   }
1677 
1678   // Step 3: Note that the population count is exactly the trip count of the
1679   // loop in question, which enable us to convert the loop from noncountable
1680   // loop into a countable one. The benefit is twofold:
1681   //
1682   //  - If the loop only counts population, the entire loop becomes dead after
1683   //    the transformation. It is a lot easier to prove a countable loop dead
1684   //    than to prove a noncountable one. (In some C dialects, an infinite loop
1685   //    isn't dead even if it computes nothing useful. In general, DCE needs
1686   //    to prove a noncountable loop finite before safely delete it.)
1687   //
1688   //  - If the loop also performs something else, it remains alive.
1689   //    Since it is transformed to countable form, it can be aggressively
1690   //    optimized by some optimizations which are in general not applicable
1691   //    to a noncountable loop.
1692   //
1693   // After this step, this loop (conceptually) would look like following:
1694   //   newcnt = __builtin_ctpop(x);
1695   //   t = newcnt;
1696   //   if (x)
1697   //     do { cnt++; x &= x-1; t--) } while (t > 0);
1698   BasicBlock *Body = *(CurLoop->block_begin());
1699   {
1700     auto *LbBr = dyn_cast<BranchInst>(Body->getTerminator());
1701     ICmpInst *LbCond = cast<ICmpInst>(LbBr->getCondition());
1702     Type *Ty = TripCnt->getType();
1703 
1704     PHINode *TcPhi = PHINode::Create(Ty, 2, "tcphi", &Body->front());
1705 
1706     Builder.SetInsertPoint(LbCond);
1707     Instruction *TcDec = cast<Instruction>(
1708         Builder.CreateSub(TcPhi, ConstantInt::get(Ty, 1),
1709                           "tcdec", false, true));
1710 
1711     TcPhi->addIncoming(TripCnt, PreHead);
1712     TcPhi->addIncoming(TcDec, Body);
1713 
1714     CmpInst::Predicate Pred =
1715         (LbBr->getSuccessor(0) == Body) ? CmpInst::ICMP_UGT : CmpInst::ICMP_SLE;
1716     LbCond->setPredicate(Pred);
1717     LbCond->setOperand(0, TcDec);
1718     LbCond->setOperand(1, ConstantInt::get(Ty, 0));
1719   }
1720 
1721   // Step 4: All the references to the original population counter outside
1722   //  the loop are replaced with the NewCount -- the value returned from
1723   //  __builtin_ctpop().
1724   CntInst->replaceUsesOutsideBlock(NewCount, Body);
1725 
1726   // step 5: Forget the "non-computable" trip-count SCEV associated with the
1727   //   loop. The loop would otherwise not be deleted even if it becomes empty.
1728   SE->forgetLoop(CurLoop);
1729 }
1730