1 //===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops
11 // and generates target-independent LLVM-IR.
12 // The vectorizer uses the TargetTransformInfo analysis to estimate the costs
13 // of instructions in order to estimate the profitability of vectorization.
14 //
15 // The loop vectorizer combines consecutive loop iterations into a single
16 // 'wide' iteration. After this transformation the index is incremented
17 // by the SIMD vector width, and not by one.
18 //
19 // This pass has three parts:
20 // 1. The main loop pass that drives the different parts.
21 // 2. LoopVectorizationLegality - A unit that checks for the legality
22 //    of the vectorization.
23 // 3. InnerLoopVectorizer - A unit that performs the actual
24 //    widening of instructions.
25 // 4. LoopVectorizationCostModel - A unit that checks for the profitability
26 //    of vectorization. It decides on the optimal vector width, which
27 //    can be one, if vectorization is not profitable.
28 //
29 //===----------------------------------------------------------------------===//
30 //
31 // The reduction-variable vectorization is based on the paper:
32 //  D. Nuzman and R. Henderson. Multi-platform Auto-vectorization.
33 //
34 // Variable uniformity checks are inspired by:
35 //  Karrenberg, R. and Hack, S. Whole Function Vectorization.
36 //
37 // The interleaved access vectorization is based on the paper:
38 //  Dorit Nuzman, Ira Rosen and Ayal Zaks.  Auto-Vectorization of Interleaved
39 //  Data for SIMD
40 //
41 // Other ideas/concepts are from:
42 //  A. Zaks and D. Nuzman. Autovectorization in GCC-two years later.
43 //
44 //  S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua.  An Evaluation of
45 //  Vectorizing Compilers.
46 //
47 //===----------------------------------------------------------------------===//
48 
49 #include "llvm/Transforms/Vectorize/LoopVectorize.h"
50 #include "llvm/ADT/DenseMap.h"
51 #include "llvm/ADT/Hashing.h"
52 #include "llvm/ADT/MapVector.h"
53 #include "llvm/ADT/Optional.h"
54 #include "llvm/ADT/SCCIterator.h"
55 #include "llvm/ADT/SetVector.h"
56 #include "llvm/ADT/SmallPtrSet.h"
57 #include "llvm/ADT/SmallSet.h"
58 #include "llvm/ADT/SmallVector.h"
59 #include "llvm/ADT/Statistic.h"
60 #include "llvm/ADT/StringExtras.h"
61 #include "llvm/Analysis/CodeMetrics.h"
62 #include "llvm/Analysis/GlobalsModRef.h"
63 #include "llvm/Analysis/LoopInfo.h"
64 #include "llvm/Analysis/LoopIterator.h"
65 #include "llvm/Analysis/LoopPass.h"
66 #include "llvm/Analysis/ScalarEvolutionExpander.h"
67 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
68 #include "llvm/Analysis/ValueTracking.h"
69 #include "llvm/Analysis/VectorUtils.h"
70 #include "llvm/IR/Constants.h"
71 #include "llvm/IR/DataLayout.h"
72 #include "llvm/IR/DebugInfo.h"
73 #include "llvm/IR/DerivedTypes.h"
74 #include "llvm/IR/DiagnosticInfo.h"
75 #include "llvm/IR/Dominators.h"
76 #include "llvm/IR/Function.h"
77 #include "llvm/IR/IRBuilder.h"
78 #include "llvm/IR/Instructions.h"
79 #include "llvm/IR/IntrinsicInst.h"
80 #include "llvm/IR/LLVMContext.h"
81 #include "llvm/IR/Module.h"
82 #include "llvm/IR/PatternMatch.h"
83 #include "llvm/IR/Type.h"
84 #include "llvm/IR/User.h"
85 #include "llvm/IR/Value.h"
86 #include "llvm/IR/ValueHandle.h"
87 #include "llvm/IR/Verifier.h"
88 #include "llvm/Pass.h"
89 #include "llvm/Support/BranchProbability.h"
90 #include "llvm/Support/CommandLine.h"
91 #include "llvm/Support/Debug.h"
92 #include "llvm/Support/raw_ostream.h"
93 #include "llvm/Transforms/Scalar.h"
94 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
95 #include "llvm/Transforms/Utils/Local.h"
96 #include "llvm/Transforms/Utils/LoopSimplify.h"
97 #include "llvm/Transforms/Utils/LoopUtils.h"
98 #include "llvm/Transforms/Utils/LoopVersioning.h"
99 #include "llvm/Transforms/Vectorize.h"
100 #include <algorithm>
101 #include <map>
102 #include <tuple>
103 
104 using namespace llvm;
105 using namespace llvm::PatternMatch;
106 
107 #define LV_NAME "loop-vectorize"
108 #define DEBUG_TYPE LV_NAME
109 
110 STATISTIC(LoopsVectorized, "Number of loops vectorized");
111 STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization");
112 
113 static cl::opt<bool>
114     EnableIfConversion("enable-if-conversion", cl::init(true), cl::Hidden,
115                        cl::desc("Enable if-conversion during vectorization."));
116 
117 /// We don't vectorize loops with a known constant trip count below this number.
118 static cl::opt<unsigned> TinyTripCountVectorThreshold(
119     "vectorizer-min-trip-count", cl::init(16), cl::Hidden,
120     cl::desc("Don't vectorize loops with a constant "
121              "trip count that is smaller than this "
122              "value."));
123 
124 static cl::opt<bool> MaximizeBandwidth(
125     "vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden,
126     cl::desc("Maximize bandwidth when selecting vectorization factor which "
127              "will be determined by the smallest type in loop."));
128 
129 static cl::opt<bool> EnableInterleavedMemAccesses(
130     "enable-interleaved-mem-accesses", cl::init(false), cl::Hidden,
131     cl::desc("Enable vectorization on interleaved memory accesses in a loop"));
132 
133 /// Maximum factor for an interleaved memory access.
134 static cl::opt<unsigned> MaxInterleaveGroupFactor(
135     "max-interleave-group-factor", cl::Hidden,
136     cl::desc("Maximum factor for an interleaved access group (default = 8)"),
137     cl::init(8));
138 
139 /// We don't interleave loops with a known constant trip count below this
140 /// number.
141 static const unsigned TinyTripCountInterleaveThreshold = 128;
142 
143 static cl::opt<unsigned> ForceTargetNumScalarRegs(
144     "force-target-num-scalar-regs", cl::init(0), cl::Hidden,
145     cl::desc("A flag that overrides the target's number of scalar registers."));
146 
147 static cl::opt<unsigned> ForceTargetNumVectorRegs(
148     "force-target-num-vector-regs", cl::init(0), cl::Hidden,
149     cl::desc("A flag that overrides the target's number of vector registers."));
150 
151 /// Maximum vectorization interleave count.
152 static const unsigned MaxInterleaveFactor = 16;
153 
154 static cl::opt<unsigned> ForceTargetMaxScalarInterleaveFactor(
155     "force-target-max-scalar-interleave", cl::init(0), cl::Hidden,
156     cl::desc("A flag that overrides the target's max interleave factor for "
157              "scalar loops."));
158 
159 static cl::opt<unsigned> ForceTargetMaxVectorInterleaveFactor(
160     "force-target-max-vector-interleave", cl::init(0), cl::Hidden,
161     cl::desc("A flag that overrides the target's max interleave factor for "
162              "vectorized loops."));
163 
164 static cl::opt<unsigned> ForceTargetInstructionCost(
165     "force-target-instruction-cost", cl::init(0), cl::Hidden,
166     cl::desc("A flag that overrides the target's expected cost for "
167              "an instruction to a single constant value. Mostly "
168              "useful for getting consistent testing."));
169 
170 static cl::opt<unsigned> SmallLoopCost(
171     "small-loop-cost", cl::init(20), cl::Hidden,
172     cl::desc(
173         "The cost of a loop that is considered 'small' by the interleaver."));
174 
175 static cl::opt<bool> LoopVectorizeWithBlockFrequency(
176     "loop-vectorize-with-block-frequency", cl::init(false), cl::Hidden,
177     cl::desc("Enable the use of the block frequency analysis to access PGO "
178              "heuristics minimizing code growth in cold regions and being more "
179              "aggressive in hot regions."));
180 
181 // Runtime interleave loops for load/store throughput.
182 static cl::opt<bool> EnableLoadStoreRuntimeInterleave(
183     "enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden,
184     cl::desc(
185         "Enable runtime interleaving until load/store ports are saturated"));
186 
187 /// The number of stores in a loop that are allowed to need predication.
188 static cl::opt<unsigned> NumberOfStoresToPredicate(
189     "vectorize-num-stores-pred", cl::init(1), cl::Hidden,
190     cl::desc("Max number of stores to be predicated behind an if."));
191 
192 static cl::opt<bool> EnableIndVarRegisterHeur(
193     "enable-ind-var-reg-heur", cl::init(true), cl::Hidden,
194     cl::desc("Count the induction variable only once when interleaving"));
195 
196 static cl::opt<bool> EnableCondStoresVectorization(
197     "enable-cond-stores-vec", cl::init(true), cl::Hidden,
198     cl::desc("Enable if predication of stores during vectorization."));
199 
200 static cl::opt<unsigned> MaxNestedScalarReductionIC(
201     "max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden,
202     cl::desc("The maximum interleave count to use when interleaving a scalar "
203              "reduction in a nested loop."));
204 
205 static cl::opt<unsigned> PragmaVectorizeMemoryCheckThreshold(
206     "pragma-vectorize-memory-check-threshold", cl::init(128), cl::Hidden,
207     cl::desc("The maximum allowed number of runtime memory checks with a "
208              "vectorize(enable) pragma."));
209 
210 static cl::opt<unsigned> VectorizeSCEVCheckThreshold(
211     "vectorize-scev-check-threshold", cl::init(16), cl::Hidden,
212     cl::desc("The maximum number of SCEV checks allowed."));
213 
214 static cl::opt<unsigned> PragmaVectorizeSCEVCheckThreshold(
215     "pragma-vectorize-scev-check-threshold", cl::init(128), cl::Hidden,
216     cl::desc("The maximum number of SCEV checks allowed with a "
217              "vectorize(enable) pragma"));
218 
219 /// Create an analysis remark that explains why vectorization failed
220 ///
221 /// \p PassName is the name of the pass (e.g. can be AlwaysPrint).  \p
222 /// RemarkName is the identifier for the remark.  If \p I is passed it is an
223 /// instruction that prevents vectorization.  Otherwise \p TheLoop is used for
224 /// the location of the remark.  \return the remark object that can be
225 /// streamed to.
226 static OptimizationRemarkAnalysis
227 createMissedAnalysis(const char *PassName, StringRef RemarkName, Loop *TheLoop,
228                      Instruction *I = nullptr) {
229   Value *CodeRegion = TheLoop->getHeader();
230   DebugLoc DL = TheLoop->getStartLoc();
231 
232   if (I) {
233     CodeRegion = I->getParent();
234     // If there is no debug location attached to the instruction, revert back to
235     // using the loop's.
236     if (I->getDebugLoc())
237       DL = I->getDebugLoc();
238   }
239 
240   OptimizationRemarkAnalysis R(PassName, RemarkName, DL, CodeRegion);
241   R << "loop not vectorized: ";
242   return R;
243 }
244 
245 namespace {
246 
247 // Forward declarations.
248 class LoopVectorizeHints;
249 class LoopVectorizationLegality;
250 class LoopVectorizationCostModel;
251 class LoopVectorizationRequirements;
252 
253 /// Returns true if the given loop body has a cycle, excluding the loop
254 /// itself.
255 static bool hasCyclesInLoopBody(const Loop &L) {
256   if (!L.empty())
257     return true;
258 
259   for (const auto &SCC :
260        make_range(scc_iterator<Loop, LoopBodyTraits>::begin(L),
261                   scc_iterator<Loop, LoopBodyTraits>::end(L))) {
262     if (SCC.size() > 1) {
263       DEBUG(dbgs() << "LVL: Detected a cycle in the loop body:\n");
264       DEBUG(L.dump());
265       return true;
266     }
267   }
268   return false;
269 }
270 
271 /// A helper function for converting Scalar types to vector types.
272 /// If the incoming type is void, we return void. If the VF is 1, we return
273 /// the scalar type.
274 static Type *ToVectorTy(Type *Scalar, unsigned VF) {
275   if (Scalar->isVoidTy() || VF == 1)
276     return Scalar;
277   return VectorType::get(Scalar, VF);
278 }
279 
280 /// A helper function that returns GEP instruction and knows to skip a
281 /// 'bitcast'. The 'bitcast' may be skipped if the source and the destination
282 /// pointee types of the 'bitcast' have the same size.
283 /// For example:
284 ///   bitcast double** %var to i64* - can be skipped
285 ///   bitcast double** %var to i8*  - can not
286 static GetElementPtrInst *getGEPInstruction(Value *Ptr) {
287 
288   if (isa<GetElementPtrInst>(Ptr))
289     return cast<GetElementPtrInst>(Ptr);
290 
291   if (isa<BitCastInst>(Ptr) &&
292       isa<GetElementPtrInst>(cast<BitCastInst>(Ptr)->getOperand(0))) {
293     Type *BitcastTy = Ptr->getType();
294     Type *GEPTy = cast<BitCastInst>(Ptr)->getSrcTy();
295     if (!isa<PointerType>(BitcastTy) || !isa<PointerType>(GEPTy))
296       return nullptr;
297     Type *Pointee1Ty = cast<PointerType>(BitcastTy)->getPointerElementType();
298     Type *Pointee2Ty = cast<PointerType>(GEPTy)->getPointerElementType();
299     const DataLayout &DL = cast<BitCastInst>(Ptr)->getModule()->getDataLayout();
300     if (DL.getTypeSizeInBits(Pointee1Ty) == DL.getTypeSizeInBits(Pointee2Ty))
301       return cast<GetElementPtrInst>(cast<BitCastInst>(Ptr)->getOperand(0));
302   }
303   return nullptr;
304 }
305 
306 // FIXME: The following helper functions have multiple implementations
307 // in the project. They can be effectively organized in a common Load/Store
308 // utilities unit.
309 
310 /// A helper function that returns the pointer operand of a load or store
311 /// instruction.
312 static Value *getPointerOperand(Value *I) {
313   if (auto *LI = dyn_cast<LoadInst>(I))
314     return LI->getPointerOperand();
315   if (auto *SI = dyn_cast<StoreInst>(I))
316     return SI->getPointerOperand();
317   return nullptr;
318 }
319 
320 /// A helper function that returns the type of loaded or stored value.
321 static Type *getMemInstValueType(Value *I) {
322   assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
323          "Expected Load or Store instruction");
324   if (auto *LI = dyn_cast<LoadInst>(I))
325     return LI->getType();
326   return cast<StoreInst>(I)->getValueOperand()->getType();
327 }
328 
329 /// A helper function that returns the alignment of load or store instruction.
330 static unsigned getMemInstAlignment(Value *I) {
331   assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
332          "Expected Load or Store instruction");
333   if (auto *LI = dyn_cast<LoadInst>(I))
334     return LI->getAlignment();
335   return cast<StoreInst>(I)->getAlignment();
336 }
337 
338 /// A helper function that returns the address space of the pointer operand of
339 /// load or store instruction.
340 static unsigned getMemInstAddressSpace(Value *I) {
341   assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
342          "Expected Load or Store instruction");
343   if (auto *LI = dyn_cast<LoadInst>(I))
344     return LI->getPointerAddressSpace();
345   return cast<StoreInst>(I)->getPointerAddressSpace();
346 }
347 
348 /// A helper function that returns true if the given type is irregular. The
349 /// type is irregular if its allocated size doesn't equal the store size of an
350 /// element of the corresponding vector type at the given vectorization factor.
351 static bool hasIrregularType(Type *Ty, const DataLayout &DL, unsigned VF) {
352 
353   // Determine if an array of VF elements of type Ty is "bitcast compatible"
354   // with a <VF x Ty> vector.
355   if (VF > 1) {
356     auto *VectorTy = VectorType::get(Ty, VF);
357     return VF * DL.getTypeAllocSize(Ty) != DL.getTypeStoreSize(VectorTy);
358   }
359 
360   // If the vectorization factor is one, we just check if an array of type Ty
361   // requires padding between elements.
362   return DL.getTypeAllocSizeInBits(Ty) != DL.getTypeSizeInBits(Ty);
363 }
364 
365 /// A helper function that returns the reciprocal of the block probability of
366 /// predicated blocks. If we return X, we are assuming the predicated block
367 /// will execute once for for every X iterations of the loop header.
368 ///
369 /// TODO: We should use actual block probability here, if available. Currently,
370 ///       we always assume predicated blocks have a 50% chance of executing.
371 static unsigned getReciprocalPredBlockProb() { return 2; }
372 
373 /// A helper function that adds a 'fast' flag to floating-point operations.
374 static Value *addFastMathFlag(Value *V) {
375   if (isa<FPMathOperator>(V)) {
376     FastMathFlags Flags;
377     Flags.setUnsafeAlgebra();
378     cast<Instruction>(V)->setFastMathFlags(Flags);
379   }
380   return V;
381 }
382 
383 /// A helper function that returns an integer or floating-point constant with
384 /// value C.
385 static Constant *getSignedIntOrFpConstant(Type *Ty, int64_t C) {
386   return Ty->isIntegerTy() ? ConstantInt::getSigned(Ty, C)
387                            : ConstantFP::get(Ty, C);
388 }
389 
390 /// InnerLoopVectorizer vectorizes loops which contain only one basic
391 /// block to a specified vectorization factor (VF).
392 /// This class performs the widening of scalars into vectors, or multiple
393 /// scalars. This class also implements the following features:
394 /// * It inserts an epilogue loop for handling loops that don't have iteration
395 ///   counts that are known to be a multiple of the vectorization factor.
396 /// * It handles the code generation for reduction variables.
397 /// * Scalarization (implementation using scalars) of un-vectorizable
398 ///   instructions.
399 /// InnerLoopVectorizer does not perform any vectorization-legality
400 /// checks, and relies on the caller to check for the different legality
401 /// aspects. The InnerLoopVectorizer relies on the
402 /// LoopVectorizationLegality class to provide information about the induction
403 /// and reduction variables that were found to a given vectorization factor.
404 class InnerLoopVectorizer {
405 public:
406   InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE,
407                       LoopInfo *LI, DominatorTree *DT,
408                       const TargetLibraryInfo *TLI,
409                       const TargetTransformInfo *TTI, AssumptionCache *AC,
410                       OptimizationRemarkEmitter *ORE, unsigned VecWidth,
411                       unsigned UnrollFactor, LoopVectorizationLegality *LVL,
412                       LoopVectorizationCostModel *CM)
413       : OrigLoop(OrigLoop), PSE(PSE), LI(LI), DT(DT), TLI(TLI), TTI(TTI),
414         AC(AC), ORE(ORE), VF(VecWidth), UF(UnrollFactor),
415         Builder(PSE.getSE()->getContext()), Induction(nullptr),
416         OldInduction(nullptr), VectorLoopValueMap(UnrollFactor, VecWidth),
417         TripCount(nullptr), VectorTripCount(nullptr), Legal(LVL), Cost(CM),
418         AddedSafetyChecks(false) {}
419 
420   // Perform the actual loop widening (vectorization).
421   void vectorize() {
422     // Create a new empty loop. Unlink the old loop and connect the new one.
423     createEmptyLoop();
424     // Widen each instruction in the old loop to a new one in the new loop.
425     vectorizeLoop();
426   }
427 
428   // Return true if any runtime check is added.
429   bool areSafetyChecksAdded() { return AddedSafetyChecks; }
430 
431   virtual ~InnerLoopVectorizer() {}
432 
433 protected:
434   /// A small list of PHINodes.
435   typedef SmallVector<PHINode *, 4> PhiVector;
436 
437   /// A type for vectorized values in the new loop. Each value from the
438   /// original loop, when vectorized, is represented by UF vector values in the
439   /// new unrolled loop, where UF is the unroll factor.
440   typedef SmallVector<Value *, 2> VectorParts;
441 
442   /// A type for scalarized values in the new loop. Each value from the
443   /// original loop, when scalarized, is represented by UF x VF scalar values
444   /// in the new unrolled loop, where UF is the unroll factor and VF is the
445   /// vectorization factor.
446   typedef SmallVector<SmallVector<Value *, 4>, 2> ScalarParts;
447 
448   // When we if-convert we need to create edge masks. We have to cache values
449   // so that we don't end up with exponential recursion/IR.
450   typedef DenseMap<std::pair<BasicBlock *, BasicBlock *>, VectorParts>
451       EdgeMaskCache;
452 
453   /// Create an empty loop, based on the loop ranges of the old loop.
454   void createEmptyLoop();
455 
456   /// Set up the values of the IVs correctly when exiting the vector loop.
457   void fixupIVUsers(PHINode *OrigPhi, const InductionDescriptor &II,
458                     Value *CountRoundDown, Value *EndValue,
459                     BasicBlock *MiddleBlock);
460 
461   /// Create a new induction variable inside L.
462   PHINode *createInductionVariable(Loop *L, Value *Start, Value *End,
463                                    Value *Step, Instruction *DL);
464   /// Copy and widen the instructions from the old loop.
465   virtual void vectorizeLoop();
466 
467   /// Handle all cross-iteration phis in the header.
468   void fixCrossIterationPHIs();
469 
470   /// Fix a first-order recurrence. This is the second phase of vectorizing
471   /// this phi node.
472   void fixFirstOrderRecurrence(PHINode *Phi);
473 
474   /// Fix a reduction cross-iteration phi. This is the second phase of
475   /// vectorizing this phi node.
476   void fixReduction(PHINode *Phi);
477 
478   /// \brief The Loop exit block may have single value PHI nodes where the
479   /// incoming value is 'Undef'. While vectorizing we only handled real values
480   /// that were defined inside the loop. Here we fix the 'undef case'.
481   /// See PR14725.
482   void fixLCSSAPHIs();
483 
484   /// Iteratively sink the scalarized operands of a predicated instruction into
485   /// the block that was created for it.
486   void sinkScalarOperands(Instruction *PredInst);
487 
488   /// Predicate conditional instructions that require predication on their
489   /// respective conditions.
490   void predicateInstructions();
491 
492   /// Collect the instructions from the original loop that would be trivially
493   /// dead in the vectorized loop if generated.
494   void collectTriviallyDeadInstructions();
495 
496   /// Shrinks vector element sizes to the smallest bitwidth they can be legally
497   /// represented as.
498   void truncateToMinimalBitwidths();
499 
500   /// A helper function that computes the predicate of the block BB, assuming
501   /// that the header block of the loop is set to True. It returns the *entry*
502   /// mask for the block BB.
503   VectorParts createBlockInMask(BasicBlock *BB);
504   /// A helper function that computes the predicate of the edge between SRC
505   /// and DST.
506   VectorParts createEdgeMask(BasicBlock *Src, BasicBlock *Dst);
507 
508   /// A helper function to vectorize a single BB within the innermost loop.
509   void vectorizeBlockInLoop(BasicBlock *BB);
510 
511   /// Vectorize a single PHINode in a block. This method handles the induction
512   /// variable canonicalization. It supports both VF = 1 for unrolled loops and
513   /// arbitrary length vectors.
514   void widenPHIInstruction(Instruction *PN, unsigned UF, unsigned VF);
515 
516   /// Insert the new loop to the loop hierarchy and pass manager
517   /// and update the analysis passes.
518   void updateAnalysis();
519 
520   /// This instruction is un-vectorizable. Implement it as a sequence
521   /// of scalars. If \p IfPredicateInstr is true we need to 'hide' each
522   /// scalarized instruction behind an if block predicated on the control
523   /// dependence of the instruction.
524   virtual void scalarizeInstruction(Instruction *Instr,
525                                     bool IfPredicateInstr = false);
526 
527   /// Vectorize Load and Store instructions,
528   virtual void vectorizeMemoryInstruction(Instruction *Instr);
529 
530   /// Create a broadcast instruction. This method generates a broadcast
531   /// instruction (shuffle) for loop invariant values and for the induction
532   /// value. If this is the induction variable then we extend it to N, N+1, ...
533   /// this is needed because each iteration in the loop corresponds to a SIMD
534   /// element.
535   virtual Value *getBroadcastInstrs(Value *V);
536 
537   /// This function adds (StartIdx, StartIdx + Step, StartIdx + 2*Step, ...)
538   /// to each vector element of Val. The sequence starts at StartIndex.
539   /// \p Opcode is relevant for FP induction variable.
540   virtual Value *getStepVector(Value *Val, int StartIdx, Value *Step,
541                                Instruction::BinaryOps Opcode =
542                                Instruction::BinaryOpsEnd);
543 
544   /// Compute scalar induction steps. \p ScalarIV is the scalar induction
545   /// variable on which to base the steps, \p Step is the size of the step, and
546   /// \p EntryVal is the value from the original loop that maps to the steps.
547   /// Note that \p EntryVal doesn't have to be an induction variable (e.g., it
548   /// can be a truncate instruction).
549   void buildScalarSteps(Value *ScalarIV, Value *Step, Value *EntryVal,
550                         const InductionDescriptor &ID);
551 
552   /// Create a vector induction phi node based on an existing scalar one. \p
553   /// EntryVal is the value from the original loop that maps to the vector phi
554   /// node, and \p Step is the loop-invariant step. If \p EntryVal is a
555   /// truncate instruction, instead of widening the original IV, we widen a
556   /// version of the IV truncated to \p EntryVal's type.
557   void createVectorIntOrFpInductionPHI(const InductionDescriptor &II,
558                                        Value *Step, Instruction *EntryVal);
559 
560   /// Widen an integer or floating-point induction variable \p IV. If \p Trunc
561   /// is provided, the integer induction variable will first be truncated to
562   /// the corresponding type.
563   void widenIntOrFpInduction(PHINode *IV, TruncInst *Trunc = nullptr);
564 
565   /// Returns true if an instruction \p I should be scalarized instead of
566   /// vectorized for the chosen vectorization factor.
567   bool shouldScalarizeInstruction(Instruction *I) const;
568 
569   /// Returns true if we should generate a scalar version of \p IV.
570   bool needsScalarInduction(Instruction *IV) const;
571 
572   /// Return a constant reference to the VectorParts corresponding to \p V from
573   /// the original loop. If the value has already been vectorized, the
574   /// corresponding vector entry in VectorLoopValueMap is returned. If,
575   /// however, the value has a scalar entry in VectorLoopValueMap, we construct
576   /// new vector values on-demand by inserting the scalar values into vectors
577   /// with an insertelement sequence. If the value has been neither vectorized
578   /// nor scalarized, it must be loop invariant, so we simply broadcast the
579   /// value into vectors.
580   const VectorParts &getVectorValue(Value *V);
581 
582   /// Return a value in the new loop corresponding to \p V from the original
583   /// loop at unroll index \p Part and vector index \p Lane. If the value has
584   /// been vectorized but not scalarized, the necessary extractelement
585   /// instruction will be generated.
586   Value *getScalarValue(Value *V, unsigned Part, unsigned Lane);
587 
588   /// Try to vectorize the interleaved access group that \p Instr belongs to.
589   void vectorizeInterleaveGroup(Instruction *Instr);
590 
591   /// Generate a shuffle sequence that will reverse the vector Vec.
592   virtual Value *reverseVector(Value *Vec);
593 
594   /// Returns (and creates if needed) the original loop trip count.
595   Value *getOrCreateTripCount(Loop *NewLoop);
596 
597   /// Returns (and creates if needed) the trip count of the widened loop.
598   Value *getOrCreateVectorTripCount(Loop *NewLoop);
599 
600   /// Emit a bypass check to see if the trip count would overflow, or we
601   /// wouldn't have enough iterations to execute one vector loop.
602   void emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass);
603   /// Emit a bypass check to see if the vector trip count is nonzero.
604   void emitVectorLoopEnteredCheck(Loop *L, BasicBlock *Bypass);
605   /// Emit a bypass check to see if all of the SCEV assumptions we've
606   /// had to make are correct.
607   void emitSCEVChecks(Loop *L, BasicBlock *Bypass);
608   /// Emit bypass checks to check any memory assumptions we may have made.
609   void emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass);
610 
611   /// Add additional metadata to \p To that was not present on \p Orig.
612   ///
613   /// Currently this is used to add the noalias annotations based on the
614   /// inserted memchecks.  Use this for instructions that are *cloned* into the
615   /// vector loop.
616   void addNewMetadata(Instruction *To, const Instruction *Orig);
617 
618   /// Add metadata from one instruction to another.
619   ///
620   /// This includes both the original MDs from \p From and additional ones (\see
621   /// addNewMetadata).  Use this for *newly created* instructions in the vector
622   /// loop.
623   void addMetadata(Instruction *To, Instruction *From);
624 
625   /// \brief Similar to the previous function but it adds the metadata to a
626   /// vector of instructions.
627   void addMetadata(ArrayRef<Value *> To, Instruction *From);
628 
629   /// \brief Set the debug location in the builder using the debug location in
630   /// the instruction.
631   void setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr);
632 
633   /// This is a helper class for maintaining vectorization state. It's used for
634   /// mapping values from the original loop to their corresponding values in
635   /// the new loop. Two mappings are maintained: one for vectorized values and
636   /// one for scalarized values. Vectorized values are represented with UF
637   /// vector values in the new loop, and scalarized values are represented with
638   /// UF x VF scalar values in the new loop. UF and VF are the unroll and
639   /// vectorization factors, respectively.
640   ///
641   /// Entries can be added to either map with initVector and initScalar, which
642   /// initialize and return a constant reference to the new entry. If a
643   /// non-constant reference to a vector entry is required, getVector can be
644   /// used to retrieve a mutable entry. We currently directly modify the mapped
645   /// values during "fix-up" operations that occur once the first phase of
646   /// widening is complete. These operations include type truncation and the
647   /// second phase of recurrence widening.
648   ///
649   /// Otherwise, entries from either map should be accessed using the
650   /// getVectorValue or getScalarValue functions from InnerLoopVectorizer.
651   /// getVectorValue and getScalarValue coordinate to generate a vector or
652   /// scalar value on-demand if one is not yet available. When vectorizing a
653   /// loop, we visit the definition of an instruction before its uses. When
654   /// visiting the definition, we either vectorize or scalarize the
655   /// instruction, creating an entry for it in the corresponding map. (In some
656   /// cases, such as induction variables, we will create both vector and scalar
657   /// entries.) Then, as we encounter uses of the definition, we derive values
658   /// for each scalar or vector use unless such a value is already available.
659   /// For example, if we scalarize a definition and one of its uses is vector,
660   /// we build the required vector on-demand with an insertelement sequence
661   /// when visiting the use. Otherwise, if the use is scalar, we can use the
662   /// existing scalar definition.
663   struct ValueMap {
664 
665     /// Construct an empty map with the given unroll and vectorization factors.
666     ValueMap(unsigned UnrollFactor, unsigned VecWidth)
667         : UF(UnrollFactor), VF(VecWidth) {
668       // The unroll and vectorization factors are only used in asserts builds
669       // to verify map entries are sized appropriately.
670       (void)UF;
671       (void)VF;
672     }
673 
674     /// \return True if the map has a vector entry for \p Key.
675     bool hasVector(Value *Key) const { return VectorMapStorage.count(Key); }
676 
677     /// \return True if the map has a scalar entry for \p Key.
678     bool hasScalar(Value *Key) const { return ScalarMapStorage.count(Key); }
679 
680     /// \brief Map \p Key to the given VectorParts \p Entry, and return a
681     /// constant reference to the new vector map entry. The given key should
682     /// not already be in the map, and the given VectorParts should be
683     /// correctly sized for the current unroll factor.
684     const VectorParts &initVector(Value *Key, const VectorParts &Entry) {
685       assert(!hasVector(Key) && "Vector entry already initialized");
686       assert(Entry.size() == UF && "VectorParts has wrong dimensions");
687       VectorMapStorage[Key] = Entry;
688       return VectorMapStorage[Key];
689     }
690 
691     /// \brief Map \p Key to the given ScalarParts \p Entry, and return a
692     /// constant reference to the new scalar map entry. The given key should
693     /// not already be in the map, and the given ScalarParts should be
694     /// correctly sized for the current unroll and vectorization factors.
695     const ScalarParts &initScalar(Value *Key, const ScalarParts &Entry) {
696       assert(!hasScalar(Key) && "Scalar entry already initialized");
697       assert(Entry.size() == UF &&
698              all_of(make_range(Entry.begin(), Entry.end()),
699                     [&](const SmallVectorImpl<Value *> &Values) -> bool {
700                       return Values.size() == VF;
701                     }) &&
702              "ScalarParts has wrong dimensions");
703       ScalarMapStorage[Key] = Entry;
704       return ScalarMapStorage[Key];
705     }
706 
707     /// \return A reference to the vector map entry corresponding to \p Key.
708     /// The key should already be in the map. This function should only be used
709     /// when it's necessary to update values that have already been vectorized.
710     /// This is the case for "fix-up" operations including type truncation and
711     /// the second phase of recurrence vectorization. If a non-const reference
712     /// isn't required, getVectorValue should be used instead.
713     VectorParts &getVector(Value *Key) {
714       assert(hasVector(Key) && "Vector entry not initialized");
715       return VectorMapStorage.find(Key)->second;
716     }
717 
718     /// Retrieve an entry from the vector or scalar maps. The preferred way to
719     /// access an existing mapped entry is with getVectorValue or
720     /// getScalarValue from InnerLoopVectorizer. Until those functions can be
721     /// moved inside ValueMap, we have to declare them as friends.
722     friend const VectorParts &InnerLoopVectorizer::getVectorValue(Value *V);
723     friend Value *InnerLoopVectorizer::getScalarValue(Value *V, unsigned Part,
724                                                       unsigned Lane);
725 
726   private:
727     /// The unroll factor. Each entry in the vector map contains UF vector
728     /// values.
729     unsigned UF;
730 
731     /// The vectorization factor. Each entry in the scalar map contains UF x VF
732     /// scalar values.
733     unsigned VF;
734 
735     /// The vector and scalar map storage. We use std::map and not DenseMap
736     /// because insertions to DenseMap invalidate its iterators.
737     std::map<Value *, VectorParts> VectorMapStorage;
738     std::map<Value *, ScalarParts> ScalarMapStorage;
739   };
740 
741   /// The original loop.
742   Loop *OrigLoop;
743   /// A wrapper around ScalarEvolution used to add runtime SCEV checks. Applies
744   /// dynamic knowledge to simplify SCEV expressions and converts them to a
745   /// more usable form.
746   PredicatedScalarEvolution &PSE;
747   /// Loop Info.
748   LoopInfo *LI;
749   /// Dominator Tree.
750   DominatorTree *DT;
751   /// Alias Analysis.
752   AliasAnalysis *AA;
753   /// Target Library Info.
754   const TargetLibraryInfo *TLI;
755   /// Target Transform Info.
756   const TargetTransformInfo *TTI;
757   /// Assumption Cache.
758   AssumptionCache *AC;
759   /// Interface to emit optimization remarks.
760   OptimizationRemarkEmitter *ORE;
761 
762   /// \brief LoopVersioning.  It's only set up (non-null) if memchecks were
763   /// used.
764   ///
765   /// This is currently only used to add no-alias metadata based on the
766   /// memchecks.  The actually versioning is performed manually.
767   std::unique_ptr<LoopVersioning> LVer;
768 
769   /// The vectorization SIMD factor to use. Each vector will have this many
770   /// vector elements.
771   unsigned VF;
772 
773 protected:
774   /// The vectorization unroll factor to use. Each scalar is vectorized to this
775   /// many different vector instructions.
776   unsigned UF;
777 
778   /// The builder that we use
779   IRBuilder<> Builder;
780 
781   // --- Vectorization state ---
782 
783   /// The vector-loop preheader.
784   BasicBlock *LoopVectorPreHeader;
785   /// The scalar-loop preheader.
786   BasicBlock *LoopScalarPreHeader;
787   /// Middle Block between the vector and the scalar.
788   BasicBlock *LoopMiddleBlock;
789   /// The ExitBlock of the scalar loop.
790   BasicBlock *LoopExitBlock;
791   /// The vector loop body.
792   BasicBlock *LoopVectorBody;
793   /// The scalar loop body.
794   BasicBlock *LoopScalarBody;
795   /// A list of all bypass blocks. The first block is the entry of the loop.
796   SmallVector<BasicBlock *, 4> LoopBypassBlocks;
797 
798   /// The new Induction variable which was added to the new block.
799   PHINode *Induction;
800   /// The induction variable of the old basic block.
801   PHINode *OldInduction;
802 
803   /// Maps values from the original loop to their corresponding values in the
804   /// vectorized loop. A key value can map to either vector values, scalar
805   /// values or both kinds of values, depending on whether the key was
806   /// vectorized and scalarized.
807   ValueMap VectorLoopValueMap;
808 
809   /// Store instructions that should be predicated, as a pair
810   ///   <StoreInst, Predicate>
811   SmallVector<std::pair<Instruction *, Value *>, 4> PredicatedInstructions;
812   EdgeMaskCache MaskCache;
813   /// Trip count of the original loop.
814   Value *TripCount;
815   /// Trip count of the widened loop (TripCount - TripCount % (VF*UF))
816   Value *VectorTripCount;
817 
818   /// The legality analysis.
819   LoopVectorizationLegality *Legal;
820 
821   /// The profitablity analysis.
822   LoopVectorizationCostModel *Cost;
823 
824   // Record whether runtime checks are added.
825   bool AddedSafetyChecks;
826 
827   // Holds instructions from the original loop whose counterparts in the
828   // vectorized loop would be trivially dead if generated. For example,
829   // original induction update instructions can become dead because we
830   // separately emit induction "steps" when generating code for the new loop.
831   // Similarly, we create a new latch condition when setting up the structure
832   // of the new loop, so the old one can become dead.
833   SmallPtrSet<Instruction *, 4> DeadInstructions;
834 
835   // Holds the end values for each induction variable. We save the end values
836   // so we can later fix-up the external users of the induction variables.
837   DenseMap<PHINode *, Value *> IVEndValues;
838 };
839 
840 class InnerLoopUnroller : public InnerLoopVectorizer {
841 public:
842   InnerLoopUnroller(Loop *OrigLoop, PredicatedScalarEvolution &PSE,
843                     LoopInfo *LI, DominatorTree *DT,
844                     const TargetLibraryInfo *TLI,
845                     const TargetTransformInfo *TTI, AssumptionCache *AC,
846                     OptimizationRemarkEmitter *ORE, unsigned UnrollFactor,
847                     LoopVectorizationLegality *LVL,
848                     LoopVectorizationCostModel *CM)
849       : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 1,
850                             UnrollFactor, LVL, CM) {}
851 
852 private:
853   void vectorizeMemoryInstruction(Instruction *Instr) override;
854   Value *getBroadcastInstrs(Value *V) override;
855   Value *getStepVector(Value *Val, int StartIdx, Value *Step,
856                        Instruction::BinaryOps Opcode =
857                        Instruction::BinaryOpsEnd) override;
858   Value *reverseVector(Value *Vec) override;
859 };
860 
861 /// \brief Look for a meaningful debug location on the instruction or it's
862 /// operands.
863 static Instruction *getDebugLocFromInstOrOperands(Instruction *I) {
864   if (!I)
865     return I;
866 
867   DebugLoc Empty;
868   if (I->getDebugLoc() != Empty)
869     return I;
870 
871   for (User::op_iterator OI = I->op_begin(), OE = I->op_end(); OI != OE; ++OI) {
872     if (Instruction *OpInst = dyn_cast<Instruction>(*OI))
873       if (OpInst->getDebugLoc() != Empty)
874         return OpInst;
875   }
876 
877   return I;
878 }
879 
880 void InnerLoopVectorizer::setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr) {
881   if (const Instruction *Inst = dyn_cast_or_null<Instruction>(Ptr)) {
882     const DILocation *DIL = Inst->getDebugLoc();
883     if (DIL && Inst->getFunction()->isDebugInfoForProfiling())
884       B.SetCurrentDebugLocation(DIL->cloneWithDuplicationFactor(UF * VF));
885     else
886       B.SetCurrentDebugLocation(DIL);
887   } else
888     B.SetCurrentDebugLocation(DebugLoc());
889 }
890 
891 #ifndef NDEBUG
892 /// \return string containing a file name and a line # for the given loop.
893 static std::string getDebugLocString(const Loop *L) {
894   std::string Result;
895   if (L) {
896     raw_string_ostream OS(Result);
897     if (const DebugLoc LoopDbgLoc = L->getStartLoc())
898       LoopDbgLoc.print(OS);
899     else
900       // Just print the module name.
901       OS << L->getHeader()->getParent()->getParent()->getModuleIdentifier();
902     OS.flush();
903   }
904   return Result;
905 }
906 #endif
907 
908 void InnerLoopVectorizer::addNewMetadata(Instruction *To,
909                                          const Instruction *Orig) {
910   // If the loop was versioned with memchecks, add the corresponding no-alias
911   // metadata.
912   if (LVer && (isa<LoadInst>(Orig) || isa<StoreInst>(Orig)))
913     LVer->annotateInstWithNoAlias(To, Orig);
914 }
915 
916 void InnerLoopVectorizer::addMetadata(Instruction *To,
917                                       Instruction *From) {
918   propagateMetadata(To, From);
919   addNewMetadata(To, From);
920 }
921 
922 void InnerLoopVectorizer::addMetadata(ArrayRef<Value *> To,
923                                       Instruction *From) {
924   for (Value *V : To) {
925     if (Instruction *I = dyn_cast<Instruction>(V))
926       addMetadata(I, From);
927   }
928 }
929 
930 /// \brief The group of interleaved loads/stores sharing the same stride and
931 /// close to each other.
932 ///
933 /// Each member in this group has an index starting from 0, and the largest
934 /// index should be less than interleaved factor, which is equal to the absolute
935 /// value of the access's stride.
936 ///
937 /// E.g. An interleaved load group of factor 4:
938 ///        for (unsigned i = 0; i < 1024; i+=4) {
939 ///          a = A[i];                           // Member of index 0
940 ///          b = A[i+1];                         // Member of index 1
941 ///          d = A[i+3];                         // Member of index 3
942 ///          ...
943 ///        }
944 ///
945 ///      An interleaved store group of factor 4:
946 ///        for (unsigned i = 0; i < 1024; i+=4) {
947 ///          ...
948 ///          A[i]   = a;                         // Member of index 0
949 ///          A[i+1] = b;                         // Member of index 1
950 ///          A[i+2] = c;                         // Member of index 2
951 ///          A[i+3] = d;                         // Member of index 3
952 ///        }
953 ///
954 /// Note: the interleaved load group could have gaps (missing members), but
955 /// the interleaved store group doesn't allow gaps.
956 class InterleaveGroup {
957 public:
958   InterleaveGroup(Instruction *Instr, int Stride, unsigned Align)
959       : Align(Align), SmallestKey(0), LargestKey(0), InsertPos(Instr) {
960     assert(Align && "The alignment should be non-zero");
961 
962     Factor = std::abs(Stride);
963     assert(Factor > 1 && "Invalid interleave factor");
964 
965     Reverse = Stride < 0;
966     Members[0] = Instr;
967   }
968 
969   bool isReverse() const { return Reverse; }
970   unsigned getFactor() const { return Factor; }
971   unsigned getAlignment() const { return Align; }
972   unsigned getNumMembers() const { return Members.size(); }
973 
974   /// \brief Try to insert a new member \p Instr with index \p Index and
975   /// alignment \p NewAlign. The index is related to the leader and it could be
976   /// negative if it is the new leader.
977   ///
978   /// \returns false if the instruction doesn't belong to the group.
979   bool insertMember(Instruction *Instr, int Index, unsigned NewAlign) {
980     assert(NewAlign && "The new member's alignment should be non-zero");
981 
982     int Key = Index + SmallestKey;
983 
984     // Skip if there is already a member with the same index.
985     if (Members.count(Key))
986       return false;
987 
988     if (Key > LargestKey) {
989       // The largest index is always less than the interleave factor.
990       if (Index >= static_cast<int>(Factor))
991         return false;
992 
993       LargestKey = Key;
994     } else if (Key < SmallestKey) {
995       // The largest index is always less than the interleave factor.
996       if (LargestKey - Key >= static_cast<int>(Factor))
997         return false;
998 
999       SmallestKey = Key;
1000     }
1001 
1002     // It's always safe to select the minimum alignment.
1003     Align = std::min(Align, NewAlign);
1004     Members[Key] = Instr;
1005     return true;
1006   }
1007 
1008   /// \brief Get the member with the given index \p Index
1009   ///
1010   /// \returns nullptr if contains no such member.
1011   Instruction *getMember(unsigned Index) const {
1012     int Key = SmallestKey + Index;
1013     if (!Members.count(Key))
1014       return nullptr;
1015 
1016     return Members.find(Key)->second;
1017   }
1018 
1019   /// \brief Get the index for the given member. Unlike the key in the member
1020   /// map, the index starts from 0.
1021   unsigned getIndex(Instruction *Instr) const {
1022     for (auto I : Members)
1023       if (I.second == Instr)
1024         return I.first - SmallestKey;
1025 
1026     llvm_unreachable("InterleaveGroup contains no such member");
1027   }
1028 
1029   Instruction *getInsertPos() const { return InsertPos; }
1030   void setInsertPos(Instruction *Inst) { InsertPos = Inst; }
1031 
1032 private:
1033   unsigned Factor; // Interleave Factor.
1034   bool Reverse;
1035   unsigned Align;
1036   DenseMap<int, Instruction *> Members;
1037   int SmallestKey;
1038   int LargestKey;
1039 
1040   // To avoid breaking dependences, vectorized instructions of an interleave
1041   // group should be inserted at either the first load or the last store in
1042   // program order.
1043   //
1044   // E.g. %even = load i32             // Insert Position
1045   //      %add = add i32 %even         // Use of %even
1046   //      %odd = load i32
1047   //
1048   //      store i32 %even
1049   //      %odd = add i32               // Def of %odd
1050   //      store i32 %odd               // Insert Position
1051   Instruction *InsertPos;
1052 };
1053 
1054 /// \brief Drive the analysis of interleaved memory accesses in the loop.
1055 ///
1056 /// Use this class to analyze interleaved accesses only when we can vectorize
1057 /// a loop. Otherwise it's meaningless to do analysis as the vectorization
1058 /// on interleaved accesses is unsafe.
1059 ///
1060 /// The analysis collects interleave groups and records the relationships
1061 /// between the member and the group in a map.
1062 class InterleavedAccessInfo {
1063 public:
1064   InterleavedAccessInfo(PredicatedScalarEvolution &PSE, Loop *L,
1065                         DominatorTree *DT, LoopInfo *LI)
1066       : PSE(PSE), TheLoop(L), DT(DT), LI(LI), LAI(nullptr),
1067         RequiresScalarEpilogue(false) {}
1068 
1069   ~InterleavedAccessInfo() {
1070     SmallSet<InterleaveGroup *, 4> DelSet;
1071     // Avoid releasing a pointer twice.
1072     for (auto &I : InterleaveGroupMap)
1073       DelSet.insert(I.second);
1074     for (auto *Ptr : DelSet)
1075       delete Ptr;
1076   }
1077 
1078   /// \brief Analyze the interleaved accesses and collect them in interleave
1079   /// groups. Substitute symbolic strides using \p Strides.
1080   void analyzeInterleaving(const ValueToValueMap &Strides);
1081 
1082   /// \brief Check if \p Instr belongs to any interleave group.
1083   bool isInterleaved(Instruction *Instr) const {
1084     return InterleaveGroupMap.count(Instr);
1085   }
1086 
1087   /// \brief Return the maximum interleave factor of all interleaved groups.
1088   unsigned getMaxInterleaveFactor() const {
1089     unsigned MaxFactor = 1;
1090     for (auto &Entry : InterleaveGroupMap)
1091       MaxFactor = std::max(MaxFactor, Entry.second->getFactor());
1092     return MaxFactor;
1093   }
1094 
1095   /// \brief Get the interleave group that \p Instr belongs to.
1096   ///
1097   /// \returns nullptr if doesn't have such group.
1098   InterleaveGroup *getInterleaveGroup(Instruction *Instr) const {
1099     if (InterleaveGroupMap.count(Instr))
1100       return InterleaveGroupMap.find(Instr)->second;
1101     return nullptr;
1102   }
1103 
1104   /// \brief Returns true if an interleaved group that may access memory
1105   /// out-of-bounds requires a scalar epilogue iteration for correctness.
1106   bool requiresScalarEpilogue() const { return RequiresScalarEpilogue; }
1107 
1108   /// \brief Initialize the LoopAccessInfo used for dependence checking.
1109   void setLAI(const LoopAccessInfo *Info) { LAI = Info; }
1110 
1111 private:
1112   /// A wrapper around ScalarEvolution, used to add runtime SCEV checks.
1113   /// Simplifies SCEV expressions in the context of existing SCEV assumptions.
1114   /// The interleaved access analysis can also add new predicates (for example
1115   /// by versioning strides of pointers).
1116   PredicatedScalarEvolution &PSE;
1117   Loop *TheLoop;
1118   DominatorTree *DT;
1119   LoopInfo *LI;
1120   const LoopAccessInfo *LAI;
1121 
1122   /// True if the loop may contain non-reversed interleaved groups with
1123   /// out-of-bounds accesses. We ensure we don't speculatively access memory
1124   /// out-of-bounds by executing at least one scalar epilogue iteration.
1125   bool RequiresScalarEpilogue;
1126 
1127   /// Holds the relationships between the members and the interleave group.
1128   DenseMap<Instruction *, InterleaveGroup *> InterleaveGroupMap;
1129 
1130   /// Holds dependences among the memory accesses in the loop. It maps a source
1131   /// access to a set of dependent sink accesses.
1132   DenseMap<Instruction *, SmallPtrSet<Instruction *, 2>> Dependences;
1133 
1134   /// \brief The descriptor for a strided memory access.
1135   struct StrideDescriptor {
1136     StrideDescriptor(int64_t Stride, const SCEV *Scev, uint64_t Size,
1137                      unsigned Align)
1138         : Stride(Stride), Scev(Scev), Size(Size), Align(Align) {}
1139 
1140     StrideDescriptor() = default;
1141 
1142     // The access's stride. It is negative for a reverse access.
1143     int64_t Stride = 0;
1144     const SCEV *Scev = nullptr; // The scalar expression of this access
1145     uint64_t Size = 0;          // The size of the memory object.
1146     unsigned Align = 0;         // The alignment of this access.
1147   };
1148 
1149   /// \brief A type for holding instructions and their stride descriptors.
1150   typedef std::pair<Instruction *, StrideDescriptor> StrideEntry;
1151 
1152   /// \brief Create a new interleave group with the given instruction \p Instr,
1153   /// stride \p Stride and alignment \p Align.
1154   ///
1155   /// \returns the newly created interleave group.
1156   InterleaveGroup *createInterleaveGroup(Instruction *Instr, int Stride,
1157                                          unsigned Align) {
1158     assert(!InterleaveGroupMap.count(Instr) &&
1159            "Already in an interleaved access group");
1160     InterleaveGroupMap[Instr] = new InterleaveGroup(Instr, Stride, Align);
1161     return InterleaveGroupMap[Instr];
1162   }
1163 
1164   /// \brief Release the group and remove all the relationships.
1165   void releaseGroup(InterleaveGroup *Group) {
1166     for (unsigned i = 0; i < Group->getFactor(); i++)
1167       if (Instruction *Member = Group->getMember(i))
1168         InterleaveGroupMap.erase(Member);
1169 
1170     delete Group;
1171   }
1172 
1173   /// \brief Collect all the accesses with a constant stride in program order.
1174   void collectConstStrideAccesses(
1175       MapVector<Instruction *, StrideDescriptor> &AccessStrideInfo,
1176       const ValueToValueMap &Strides);
1177 
1178   /// \brief Returns true if \p Stride is allowed in an interleaved group.
1179   static bool isStrided(int Stride) {
1180     unsigned Factor = std::abs(Stride);
1181     return Factor >= 2 && Factor <= MaxInterleaveGroupFactor;
1182   }
1183 
1184   /// \brief Returns true if \p BB is a predicated block.
1185   bool isPredicated(BasicBlock *BB) const {
1186     return LoopAccessInfo::blockNeedsPredication(BB, TheLoop, DT);
1187   }
1188 
1189   /// \brief Returns true if LoopAccessInfo can be used for dependence queries.
1190   bool areDependencesValid() const {
1191     return LAI && LAI->getDepChecker().getDependences();
1192   }
1193 
1194   /// \brief Returns true if memory accesses \p A and \p B can be reordered, if
1195   /// necessary, when constructing interleaved groups.
1196   ///
1197   /// \p A must precede \p B in program order. We return false if reordering is
1198   /// not necessary or is prevented because \p A and \p B may be dependent.
1199   bool canReorderMemAccessesForInterleavedGroups(StrideEntry *A,
1200                                                  StrideEntry *B) const {
1201 
1202     // Code motion for interleaved accesses can potentially hoist strided loads
1203     // and sink strided stores. The code below checks the legality of the
1204     // following two conditions:
1205     //
1206     // 1. Potentially moving a strided load (B) before any store (A) that
1207     //    precedes B, or
1208     //
1209     // 2. Potentially moving a strided store (A) after any load or store (B)
1210     //    that A precedes.
1211     //
1212     // It's legal to reorder A and B if we know there isn't a dependence from A
1213     // to B. Note that this determination is conservative since some
1214     // dependences could potentially be reordered safely.
1215 
1216     // A is potentially the source of a dependence.
1217     auto *Src = A->first;
1218     auto SrcDes = A->second;
1219 
1220     // B is potentially the sink of a dependence.
1221     auto *Sink = B->first;
1222     auto SinkDes = B->second;
1223 
1224     // Code motion for interleaved accesses can't violate WAR dependences.
1225     // Thus, reordering is legal if the source isn't a write.
1226     if (!Src->mayWriteToMemory())
1227       return true;
1228 
1229     // At least one of the accesses must be strided.
1230     if (!isStrided(SrcDes.Stride) && !isStrided(SinkDes.Stride))
1231       return true;
1232 
1233     // If dependence information is not available from LoopAccessInfo,
1234     // conservatively assume the instructions can't be reordered.
1235     if (!areDependencesValid())
1236       return false;
1237 
1238     // If we know there is a dependence from source to sink, assume the
1239     // instructions can't be reordered. Otherwise, reordering is legal.
1240     return !Dependences.count(Src) || !Dependences.lookup(Src).count(Sink);
1241   }
1242 
1243   /// \brief Collect the dependences from LoopAccessInfo.
1244   ///
1245   /// We process the dependences once during the interleaved access analysis to
1246   /// enable constant-time dependence queries.
1247   void collectDependences() {
1248     if (!areDependencesValid())
1249       return;
1250     auto *Deps = LAI->getDepChecker().getDependences();
1251     for (auto Dep : *Deps)
1252       Dependences[Dep.getSource(*LAI)].insert(Dep.getDestination(*LAI));
1253   }
1254 };
1255 
1256 /// Utility class for getting and setting loop vectorizer hints in the form
1257 /// of loop metadata.
1258 /// This class keeps a number of loop annotations locally (as member variables)
1259 /// and can, upon request, write them back as metadata on the loop. It will
1260 /// initially scan the loop for existing metadata, and will update the local
1261 /// values based on information in the loop.
1262 /// We cannot write all values to metadata, as the mere presence of some info,
1263 /// for example 'force', means a decision has been made. So, we need to be
1264 /// careful NOT to add them if the user hasn't specifically asked so.
1265 class LoopVectorizeHints {
1266   enum HintKind { HK_WIDTH, HK_UNROLL, HK_FORCE };
1267 
1268   /// Hint - associates name and validation with the hint value.
1269   struct Hint {
1270     const char *Name;
1271     unsigned Value; // This may have to change for non-numeric values.
1272     HintKind Kind;
1273 
1274     Hint(const char *Name, unsigned Value, HintKind Kind)
1275         : Name(Name), Value(Value), Kind(Kind) {}
1276 
1277     bool validate(unsigned Val) {
1278       switch (Kind) {
1279       case HK_WIDTH:
1280         return isPowerOf2_32(Val) && Val <= VectorizerParams::MaxVectorWidth;
1281       case HK_UNROLL:
1282         return isPowerOf2_32(Val) && Val <= MaxInterleaveFactor;
1283       case HK_FORCE:
1284         return (Val <= 1);
1285       }
1286       return false;
1287     }
1288   };
1289 
1290   /// Vectorization width.
1291   Hint Width;
1292   /// Vectorization interleave factor.
1293   Hint Interleave;
1294   /// Vectorization forced
1295   Hint Force;
1296 
1297   /// Return the loop metadata prefix.
1298   static StringRef Prefix() { return "llvm.loop."; }
1299 
1300   /// True if there is any unsafe math in the loop.
1301   bool PotentiallyUnsafe;
1302 
1303 public:
1304   enum ForceKind {
1305     FK_Undefined = -1, ///< Not selected.
1306     FK_Disabled = 0,   ///< Forcing disabled.
1307     FK_Enabled = 1,    ///< Forcing enabled.
1308   };
1309 
1310   LoopVectorizeHints(const Loop *L, bool DisableInterleaving,
1311                      OptimizationRemarkEmitter &ORE)
1312       : Width("vectorize.width", VectorizerParams::VectorizationFactor,
1313               HK_WIDTH),
1314         Interleave("interleave.count", DisableInterleaving, HK_UNROLL),
1315         Force("vectorize.enable", FK_Undefined, HK_FORCE),
1316         PotentiallyUnsafe(false), TheLoop(L), ORE(ORE) {
1317     // Populate values with existing loop metadata.
1318     getHintsFromMetadata();
1319 
1320     // force-vector-interleave overrides DisableInterleaving.
1321     if (VectorizerParams::isInterleaveForced())
1322       Interleave.Value = VectorizerParams::VectorizationInterleave;
1323 
1324     DEBUG(if (DisableInterleaving && Interleave.Value == 1) dbgs()
1325           << "LV: Interleaving disabled by the pass manager\n");
1326   }
1327 
1328   /// Mark the loop L as already vectorized by setting the width to 1.
1329   void setAlreadyVectorized() {
1330     Width.Value = Interleave.Value = 1;
1331     Hint Hints[] = {Width, Interleave};
1332     writeHintsToMetadata(Hints);
1333   }
1334 
1335   bool allowVectorization(Function *F, Loop *L, bool AlwaysVectorize) const {
1336     if (getForce() == LoopVectorizeHints::FK_Disabled) {
1337       DEBUG(dbgs() << "LV: Not vectorizing: #pragma vectorize disable.\n");
1338       emitRemarkWithHints();
1339       return false;
1340     }
1341 
1342     if (!AlwaysVectorize && getForce() != LoopVectorizeHints::FK_Enabled) {
1343       DEBUG(dbgs() << "LV: Not vectorizing: No #pragma vectorize enable.\n");
1344       emitRemarkWithHints();
1345       return false;
1346     }
1347 
1348     if (getWidth() == 1 && getInterleave() == 1) {
1349       // FIXME: Add a separate metadata to indicate when the loop has already
1350       // been vectorized instead of setting width and count to 1.
1351       DEBUG(dbgs() << "LV: Not vectorizing: Disabled/already vectorized.\n");
1352       // FIXME: Add interleave.disable metadata. This will allow
1353       // vectorize.disable to be used without disabling the pass and errors
1354       // to differentiate between disabled vectorization and a width of 1.
1355       ORE.emit(OptimizationRemarkAnalysis(vectorizeAnalysisPassName(),
1356                                           "AllDisabled", L->getStartLoc(),
1357                                           L->getHeader())
1358                << "loop not vectorized: vectorization and interleaving are "
1359                   "explicitly disabled, or vectorize width and interleave "
1360                   "count are both set to 1");
1361       return false;
1362     }
1363 
1364     return true;
1365   }
1366 
1367   /// Dumps all the hint information.
1368   void emitRemarkWithHints() const {
1369     using namespace ore;
1370     if (Force.Value == LoopVectorizeHints::FK_Disabled)
1371       ORE.emit(OptimizationRemarkMissed(LV_NAME, "MissedExplicitlyDisabled",
1372                                         TheLoop->getStartLoc(),
1373                                         TheLoop->getHeader())
1374                << "loop not vectorized: vectorization is explicitly disabled");
1375     else {
1376       OptimizationRemarkMissed R(LV_NAME, "MissedDetails",
1377                                  TheLoop->getStartLoc(), TheLoop->getHeader());
1378       R << "loop not vectorized";
1379       if (Force.Value == LoopVectorizeHints::FK_Enabled) {
1380         R << " (Force=" << NV("Force", true);
1381         if (Width.Value != 0)
1382           R << ", Vector Width=" << NV("VectorWidth", Width.Value);
1383         if (Interleave.Value != 0)
1384           R << ", Interleave Count=" << NV("InterleaveCount", Interleave.Value);
1385         R << ")";
1386       }
1387       ORE.emit(R);
1388     }
1389   }
1390 
1391   unsigned getWidth() const { return Width.Value; }
1392   unsigned getInterleave() const { return Interleave.Value; }
1393   enum ForceKind getForce() const { return (ForceKind)Force.Value; }
1394 
1395   /// \brief If hints are provided that force vectorization, use the AlwaysPrint
1396   /// pass name to force the frontend to print the diagnostic.
1397   const char *vectorizeAnalysisPassName() const {
1398     if (getWidth() == 1)
1399       return LV_NAME;
1400     if (getForce() == LoopVectorizeHints::FK_Disabled)
1401       return LV_NAME;
1402     if (getForce() == LoopVectorizeHints::FK_Undefined && getWidth() == 0)
1403       return LV_NAME;
1404     return OptimizationRemarkAnalysis::AlwaysPrint;
1405   }
1406 
1407   bool allowReordering() const {
1408     // When enabling loop hints are provided we allow the vectorizer to change
1409     // the order of operations that is given by the scalar loop. This is not
1410     // enabled by default because can be unsafe or inefficient. For example,
1411     // reordering floating-point operations will change the way round-off
1412     // error accumulates in the loop.
1413     return getForce() == LoopVectorizeHints::FK_Enabled || getWidth() > 1;
1414   }
1415 
1416   bool isPotentiallyUnsafe() const {
1417     // Avoid FP vectorization if the target is unsure about proper support.
1418     // This may be related to the SIMD unit in the target not handling
1419     // IEEE 754 FP ops properly, or bad single-to-double promotions.
1420     // Otherwise, a sequence of vectorized loops, even without reduction,
1421     // could lead to different end results on the destination vectors.
1422     return getForce() != LoopVectorizeHints::FK_Enabled && PotentiallyUnsafe;
1423   }
1424 
1425   void setPotentiallyUnsafe() { PotentiallyUnsafe = true; }
1426 
1427 private:
1428   /// Find hints specified in the loop metadata and update local values.
1429   void getHintsFromMetadata() {
1430     MDNode *LoopID = TheLoop->getLoopID();
1431     if (!LoopID)
1432       return;
1433 
1434     // First operand should refer to the loop id itself.
1435     assert(LoopID->getNumOperands() > 0 && "requires at least one operand");
1436     assert(LoopID->getOperand(0) == LoopID && "invalid loop id");
1437 
1438     for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) {
1439       const MDString *S = nullptr;
1440       SmallVector<Metadata *, 4> Args;
1441 
1442       // The expected hint is either a MDString or a MDNode with the first
1443       // operand a MDString.
1444       if (const MDNode *MD = dyn_cast<MDNode>(LoopID->getOperand(i))) {
1445         if (!MD || MD->getNumOperands() == 0)
1446           continue;
1447         S = dyn_cast<MDString>(MD->getOperand(0));
1448         for (unsigned i = 1, ie = MD->getNumOperands(); i < ie; ++i)
1449           Args.push_back(MD->getOperand(i));
1450       } else {
1451         S = dyn_cast<MDString>(LoopID->getOperand(i));
1452         assert(Args.size() == 0 && "too many arguments for MDString");
1453       }
1454 
1455       if (!S)
1456         continue;
1457 
1458       // Check if the hint starts with the loop metadata prefix.
1459       StringRef Name = S->getString();
1460       if (Args.size() == 1)
1461         setHint(Name, Args[0]);
1462     }
1463   }
1464 
1465   /// Checks string hint with one operand and set value if valid.
1466   void setHint(StringRef Name, Metadata *Arg) {
1467     if (!Name.startswith(Prefix()))
1468       return;
1469     Name = Name.substr(Prefix().size(), StringRef::npos);
1470 
1471     const ConstantInt *C = mdconst::dyn_extract<ConstantInt>(Arg);
1472     if (!C)
1473       return;
1474     unsigned Val = C->getZExtValue();
1475 
1476     Hint *Hints[] = {&Width, &Interleave, &Force};
1477     for (auto H : Hints) {
1478       if (Name == H->Name) {
1479         if (H->validate(Val))
1480           H->Value = Val;
1481         else
1482           DEBUG(dbgs() << "LV: ignoring invalid hint '" << Name << "'\n");
1483         break;
1484       }
1485     }
1486   }
1487 
1488   /// Create a new hint from name / value pair.
1489   MDNode *createHintMetadata(StringRef Name, unsigned V) const {
1490     LLVMContext &Context = TheLoop->getHeader()->getContext();
1491     Metadata *MDs[] = {MDString::get(Context, Name),
1492                        ConstantAsMetadata::get(
1493                            ConstantInt::get(Type::getInt32Ty(Context), V))};
1494     return MDNode::get(Context, MDs);
1495   }
1496 
1497   /// Matches metadata with hint name.
1498   bool matchesHintMetadataName(MDNode *Node, ArrayRef<Hint> HintTypes) {
1499     MDString *Name = dyn_cast<MDString>(Node->getOperand(0));
1500     if (!Name)
1501       return false;
1502 
1503     for (auto H : HintTypes)
1504       if (Name->getString().endswith(H.Name))
1505         return true;
1506     return false;
1507   }
1508 
1509   /// Sets current hints into loop metadata, keeping other values intact.
1510   void writeHintsToMetadata(ArrayRef<Hint> HintTypes) {
1511     if (HintTypes.size() == 0)
1512       return;
1513 
1514     // Reserve the first element to LoopID (see below).
1515     SmallVector<Metadata *, 4> MDs(1);
1516     // If the loop already has metadata, then ignore the existing operands.
1517     MDNode *LoopID = TheLoop->getLoopID();
1518     if (LoopID) {
1519       for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) {
1520         MDNode *Node = cast<MDNode>(LoopID->getOperand(i));
1521         // If node in update list, ignore old value.
1522         if (!matchesHintMetadataName(Node, HintTypes))
1523           MDs.push_back(Node);
1524       }
1525     }
1526 
1527     // Now, add the missing hints.
1528     for (auto H : HintTypes)
1529       MDs.push_back(createHintMetadata(Twine(Prefix(), H.Name).str(), H.Value));
1530 
1531     // Replace current metadata node with new one.
1532     LLVMContext &Context = TheLoop->getHeader()->getContext();
1533     MDNode *NewLoopID = MDNode::get(Context, MDs);
1534     // Set operand 0 to refer to the loop id itself.
1535     NewLoopID->replaceOperandWith(0, NewLoopID);
1536 
1537     TheLoop->setLoopID(NewLoopID);
1538   }
1539 
1540   /// The loop these hints belong to.
1541   const Loop *TheLoop;
1542 
1543   /// Interface to emit optimization remarks.
1544   OptimizationRemarkEmitter &ORE;
1545 };
1546 
1547 static void emitMissedWarning(Function *F, Loop *L,
1548                               const LoopVectorizeHints &LH,
1549                               OptimizationRemarkEmitter *ORE) {
1550   LH.emitRemarkWithHints();
1551 
1552   if (LH.getForce() == LoopVectorizeHints::FK_Enabled) {
1553     if (LH.getWidth() != 1)
1554       ORE->emit(DiagnosticInfoOptimizationFailure(
1555                     DEBUG_TYPE, "FailedRequestedVectorization",
1556                     L->getStartLoc(), L->getHeader())
1557                 << "loop not vectorized: "
1558                 << "failed explicitly specified loop vectorization");
1559     else if (LH.getInterleave() != 1)
1560       ORE->emit(DiagnosticInfoOptimizationFailure(
1561                     DEBUG_TYPE, "FailedRequestedInterleaving", L->getStartLoc(),
1562                     L->getHeader())
1563                 << "loop not interleaved: "
1564                 << "failed explicitly specified loop interleaving");
1565   }
1566 }
1567 
1568 /// LoopVectorizationLegality checks if it is legal to vectorize a loop, and
1569 /// to what vectorization factor.
1570 /// This class does not look at the profitability of vectorization, only the
1571 /// legality. This class has two main kinds of checks:
1572 /// * Memory checks - The code in canVectorizeMemory checks if vectorization
1573 ///   will change the order of memory accesses in a way that will change the
1574 ///   correctness of the program.
1575 /// * Scalars checks - The code in canVectorizeInstrs and canVectorizeMemory
1576 /// checks for a number of different conditions, such as the availability of a
1577 /// single induction variable, that all types are supported and vectorize-able,
1578 /// etc. This code reflects the capabilities of InnerLoopVectorizer.
1579 /// This class is also used by InnerLoopVectorizer for identifying
1580 /// induction variable and the different reduction variables.
1581 class LoopVectorizationLegality {
1582 public:
1583   LoopVectorizationLegality(
1584       Loop *L, PredicatedScalarEvolution &PSE, DominatorTree *DT,
1585       TargetLibraryInfo *TLI, AliasAnalysis *AA, Function *F,
1586       const TargetTransformInfo *TTI,
1587       std::function<const LoopAccessInfo &(Loop &)> *GetLAA, LoopInfo *LI,
1588       OptimizationRemarkEmitter *ORE, LoopVectorizationRequirements *R,
1589       LoopVectorizeHints *H)
1590       : NumPredStores(0), TheLoop(L), PSE(PSE), TLI(TLI), TTI(TTI), DT(DT),
1591         GetLAA(GetLAA), LAI(nullptr), ORE(ORE), InterleaveInfo(PSE, L, DT, LI),
1592         PrimaryInduction(nullptr), WidestIndTy(nullptr), HasFunNoNaNAttr(false),
1593         Requirements(R), Hints(H) {}
1594 
1595   /// ReductionList contains the reduction descriptors for all
1596   /// of the reductions that were found in the loop.
1597   typedef DenseMap<PHINode *, RecurrenceDescriptor> ReductionList;
1598 
1599   /// InductionList saves induction variables and maps them to the
1600   /// induction descriptor.
1601   typedef MapVector<PHINode *, InductionDescriptor> InductionList;
1602 
1603   /// RecurrenceSet contains the phi nodes that are recurrences other than
1604   /// inductions and reductions.
1605   typedef SmallPtrSet<const PHINode *, 8> RecurrenceSet;
1606 
1607   /// Returns true if it is legal to vectorize this loop.
1608   /// This does not mean that it is profitable to vectorize this
1609   /// loop, only that it is legal to do so.
1610   bool canVectorize();
1611 
1612   /// Returns the primary induction variable.
1613   PHINode *getPrimaryInduction() { return PrimaryInduction; }
1614 
1615   /// Returns the reduction variables found in the loop.
1616   ReductionList *getReductionVars() { return &Reductions; }
1617 
1618   /// Returns the induction variables found in the loop.
1619   InductionList *getInductionVars() { return &Inductions; }
1620 
1621   /// Return the first-order recurrences found in the loop.
1622   RecurrenceSet *getFirstOrderRecurrences() { return &FirstOrderRecurrences; }
1623 
1624   /// Returns the widest induction type.
1625   Type *getWidestInductionType() { return WidestIndTy; }
1626 
1627   /// Returns True if V is an induction variable in this loop.
1628   bool isInductionVariable(const Value *V);
1629 
1630   /// Returns True if PN is a reduction variable in this loop.
1631   bool isReductionVariable(PHINode *PN) { return Reductions.count(PN); }
1632 
1633   /// Returns True if Phi is a first-order recurrence in this loop.
1634   bool isFirstOrderRecurrence(const PHINode *Phi);
1635 
1636   /// Return true if the block BB needs to be predicated in order for the loop
1637   /// to be vectorized.
1638   bool blockNeedsPredication(BasicBlock *BB);
1639 
1640   /// Check if this pointer is consecutive when vectorizing. This happens
1641   /// when the last index of the GEP is the induction variable, or that the
1642   /// pointer itself is an induction variable.
1643   /// This check allows us to vectorize A[idx] into a wide load/store.
1644   /// Returns:
1645   /// 0 - Stride is unknown or non-consecutive.
1646   /// 1 - Address is consecutive.
1647   /// -1 - Address is consecutive, and decreasing.
1648   int isConsecutivePtr(Value *Ptr);
1649 
1650   /// Returns true if the value V is uniform within the loop.
1651   bool isUniform(Value *V);
1652 
1653   /// Returns the information that we collected about runtime memory check.
1654   const RuntimePointerChecking *getRuntimePointerChecking() const {
1655     return LAI->getRuntimePointerChecking();
1656   }
1657 
1658   const LoopAccessInfo *getLAI() const { return LAI; }
1659 
1660   /// \brief Check if \p Instr belongs to any interleaved access group.
1661   bool isAccessInterleaved(Instruction *Instr) {
1662     return InterleaveInfo.isInterleaved(Instr);
1663   }
1664 
1665   /// \brief Return the maximum interleave factor of all interleaved groups.
1666   unsigned getMaxInterleaveFactor() const {
1667     return InterleaveInfo.getMaxInterleaveFactor();
1668   }
1669 
1670   /// \brief Get the interleaved access group that \p Instr belongs to.
1671   const InterleaveGroup *getInterleavedAccessGroup(Instruction *Instr) {
1672     return InterleaveInfo.getInterleaveGroup(Instr);
1673   }
1674 
1675   /// \brief Returns true if an interleaved group requires a scalar iteration
1676   /// to handle accesses with gaps.
1677   bool requiresScalarEpilogue() const {
1678     return InterleaveInfo.requiresScalarEpilogue();
1679   }
1680 
1681   unsigned getMaxSafeDepDistBytes() { return LAI->getMaxSafeDepDistBytes(); }
1682 
1683   bool hasStride(Value *V) { return LAI->hasStride(V); }
1684 
1685   /// Returns true if the target machine supports masked store operation
1686   /// for the given \p DataType and kind of access to \p Ptr.
1687   bool isLegalMaskedStore(Type *DataType, Value *Ptr) {
1688     return isConsecutivePtr(Ptr) && TTI->isLegalMaskedStore(DataType);
1689   }
1690   /// Returns true if the target machine supports masked load operation
1691   /// for the given \p DataType and kind of access to \p Ptr.
1692   bool isLegalMaskedLoad(Type *DataType, Value *Ptr) {
1693     return isConsecutivePtr(Ptr) && TTI->isLegalMaskedLoad(DataType);
1694   }
1695   /// Returns true if the target machine supports masked scatter operation
1696   /// for the given \p DataType.
1697   bool isLegalMaskedScatter(Type *DataType) {
1698     return TTI->isLegalMaskedScatter(DataType);
1699   }
1700   /// Returns true if the target machine supports masked gather operation
1701   /// for the given \p DataType.
1702   bool isLegalMaskedGather(Type *DataType) {
1703     return TTI->isLegalMaskedGather(DataType);
1704   }
1705   /// Returns true if the target machine can represent \p V as a masked gather
1706   /// or scatter operation.
1707   bool isLegalGatherOrScatter(Value *V) {
1708     auto *LI = dyn_cast<LoadInst>(V);
1709     auto *SI = dyn_cast<StoreInst>(V);
1710     if (!LI && !SI)
1711       return false;
1712     auto *Ptr = getPointerOperand(V);
1713     auto *Ty = cast<PointerType>(Ptr->getType())->getElementType();
1714     return (LI && isLegalMaskedGather(Ty)) || (SI && isLegalMaskedScatter(Ty));
1715   }
1716 
1717   /// Returns true if vector representation of the instruction \p I
1718   /// requires mask.
1719   bool isMaskRequired(const Instruction *I) { return (MaskedOp.count(I) != 0); }
1720   unsigned getNumStores() const { return LAI->getNumStores(); }
1721   unsigned getNumLoads() const { return LAI->getNumLoads(); }
1722   unsigned getNumPredStores() const { return NumPredStores; }
1723 
1724   /// Returns true if \p I is an instruction that will be scalarized with
1725   /// predication. Such instructions include conditional stores and
1726   /// instructions that may divide by zero.
1727   bool isScalarWithPredication(Instruction *I);
1728 
1729   /// Returns true if \p I is a memory instruction with consecutive memory
1730   /// access that can be widened.
1731   bool memoryInstructionCanBeWidened(Instruction *I, unsigned VF = 1);
1732 
1733 private:
1734   /// Check if a single basic block loop is vectorizable.
1735   /// At this point we know that this is a loop with a constant trip count
1736   /// and we only need to check individual instructions.
1737   bool canVectorizeInstrs();
1738 
1739   /// When we vectorize loops we may change the order in which
1740   /// we read and write from memory. This method checks if it is
1741   /// legal to vectorize the code, considering only memory constrains.
1742   /// Returns true if the loop is vectorizable
1743   bool canVectorizeMemory();
1744 
1745   /// Return true if we can vectorize this loop using the IF-conversion
1746   /// transformation.
1747   bool canVectorizeWithIfConvert();
1748 
1749   /// Return true if all of the instructions in the block can be speculatively
1750   /// executed. \p SafePtrs is a list of addresses that are known to be legal
1751   /// and we know that we can read from them without segfault.
1752   bool blockCanBePredicated(BasicBlock *BB, SmallPtrSetImpl<Value *> &SafePtrs);
1753 
1754   /// Updates the vectorization state by adding \p Phi to the inductions list.
1755   /// This can set \p Phi as the main induction of the loop if \p Phi is a
1756   /// better choice for the main induction than the existing one.
1757   void addInductionPhi(PHINode *Phi, const InductionDescriptor &ID,
1758                        SmallPtrSetImpl<Value *> &AllowedExit);
1759 
1760   /// Create an analysis remark that explains why vectorization failed
1761   ///
1762   /// \p RemarkName is the identifier for the remark.  If \p I is passed it is
1763   /// an instruction that prevents vectorization.  Otherwise the loop is used
1764   /// for the location of the remark.  \return the remark object that can be
1765   /// streamed to.
1766   OptimizationRemarkAnalysis
1767   createMissedAnalysis(StringRef RemarkName, Instruction *I = nullptr) const {
1768     return ::createMissedAnalysis(Hints->vectorizeAnalysisPassName(),
1769                                   RemarkName, TheLoop, I);
1770   }
1771 
1772   /// \brief If an access has a symbolic strides, this maps the pointer value to
1773   /// the stride symbol.
1774   const ValueToValueMap *getSymbolicStrides() {
1775     // FIXME: Currently, the set of symbolic strides is sometimes queried before
1776     // it's collected.  This happens from canVectorizeWithIfConvert, when the
1777     // pointer is checked to reference consecutive elements suitable for a
1778     // masked access.
1779     return LAI ? &LAI->getSymbolicStrides() : nullptr;
1780   }
1781 
1782   unsigned NumPredStores;
1783 
1784   /// The loop that we evaluate.
1785   Loop *TheLoop;
1786   /// A wrapper around ScalarEvolution used to add runtime SCEV checks.
1787   /// Applies dynamic knowledge to simplify SCEV expressions in the context
1788   /// of existing SCEV assumptions. The analysis will also add a minimal set
1789   /// of new predicates if this is required to enable vectorization and
1790   /// unrolling.
1791   PredicatedScalarEvolution &PSE;
1792   /// Target Library Info.
1793   TargetLibraryInfo *TLI;
1794   /// Target Transform Info
1795   const TargetTransformInfo *TTI;
1796   /// Dominator Tree.
1797   DominatorTree *DT;
1798   // LoopAccess analysis.
1799   std::function<const LoopAccessInfo &(Loop &)> *GetLAA;
1800   // And the loop-accesses info corresponding to this loop.  This pointer is
1801   // null until canVectorizeMemory sets it up.
1802   const LoopAccessInfo *LAI;
1803   /// Interface to emit optimization remarks.
1804   OptimizationRemarkEmitter *ORE;
1805 
1806   /// The interleave access information contains groups of interleaved accesses
1807   /// with the same stride and close to each other.
1808   InterleavedAccessInfo InterleaveInfo;
1809 
1810   //  ---  vectorization state --- //
1811 
1812   /// Holds the primary induction variable. This is the counter of the
1813   /// loop.
1814   PHINode *PrimaryInduction;
1815   /// Holds the reduction variables.
1816   ReductionList Reductions;
1817   /// Holds all of the induction variables that we found in the loop.
1818   /// Notice that inductions don't need to start at zero and that induction
1819   /// variables can be pointers.
1820   InductionList Inductions;
1821   /// Holds the phi nodes that are first-order recurrences.
1822   RecurrenceSet FirstOrderRecurrences;
1823   /// Holds the widest induction type encountered.
1824   Type *WidestIndTy;
1825 
1826   /// Allowed outside users. This holds the induction and reduction
1827   /// vars which can be accessed from outside the loop.
1828   SmallPtrSet<Value *, 4> AllowedExit;
1829 
1830   /// Can we assume the absence of NaNs.
1831   bool HasFunNoNaNAttr;
1832 
1833   /// Vectorization requirements that will go through late-evaluation.
1834   LoopVectorizationRequirements *Requirements;
1835 
1836   /// Used to emit an analysis of any legality issues.
1837   LoopVectorizeHints *Hints;
1838 
1839   /// While vectorizing these instructions we have to generate a
1840   /// call to the appropriate masked intrinsic
1841   SmallPtrSet<const Instruction *, 8> MaskedOp;
1842 };
1843 
1844 /// LoopVectorizationCostModel - estimates the expected speedups due to
1845 /// vectorization.
1846 /// In many cases vectorization is not profitable. This can happen because of
1847 /// a number of reasons. In this class we mainly attempt to predict the
1848 /// expected speedup/slowdowns due to the supported instruction set. We use the
1849 /// TargetTransformInfo to query the different backends for the cost of
1850 /// different operations.
1851 class LoopVectorizationCostModel {
1852 public:
1853   LoopVectorizationCostModel(Loop *L, PredicatedScalarEvolution &PSE,
1854                              LoopInfo *LI, LoopVectorizationLegality *Legal,
1855                              const TargetTransformInfo &TTI,
1856                              const TargetLibraryInfo *TLI, DemandedBits *DB,
1857                              AssumptionCache *AC,
1858                              OptimizationRemarkEmitter *ORE, const Function *F,
1859                              const LoopVectorizeHints *Hints)
1860       : TheLoop(L), PSE(PSE), LI(LI), Legal(Legal), TTI(TTI), TLI(TLI), DB(DB),
1861         AC(AC), ORE(ORE), TheFunction(F), Hints(Hints) {}
1862 
1863   /// \return An upper bound for the vectorization factor, or None if
1864   /// vectorization should be avoided up front.
1865   Optional<unsigned> computeMaxVF(bool OptForSize);
1866 
1867   /// Information about vectorization costs
1868   struct VectorizationFactor {
1869     unsigned Width; // Vector width with best cost
1870     unsigned Cost;  // Cost of the loop with that width
1871   };
1872   /// \return The most profitable vectorization factor and the cost of that VF.
1873   /// This method checks every power of two up to MaxVF. If UserVF is not ZERO
1874   /// then this vectorization factor will be selected if vectorization is
1875   /// possible.
1876   VectorizationFactor selectVectorizationFactor(unsigned MaxVF);
1877 
1878   /// Setup cost-based decisions for user vectorization factor.
1879   void selectUserVectorizationFactor(unsigned UserVF) {
1880     collectUniformsAndScalars(UserVF);
1881     collectInstsToScalarize(UserVF);
1882   }
1883 
1884   /// \return The size (in bits) of the smallest and widest types in the code
1885   /// that needs to be vectorized. We ignore values that remain scalar such as
1886   /// 64 bit loop indices.
1887   std::pair<unsigned, unsigned> getSmallestAndWidestTypes();
1888 
1889   /// \return The desired interleave count.
1890   /// If interleave count has been specified by metadata it will be returned.
1891   /// Otherwise, the interleave count is computed and returned. VF and LoopCost
1892   /// are the selected vectorization factor and the cost of the selected VF.
1893   unsigned selectInterleaveCount(bool OptForSize, unsigned VF,
1894                                  unsigned LoopCost);
1895 
1896   /// Memory access instruction may be vectorized in more than one way.
1897   /// Form of instruction after vectorization depends on cost.
1898   /// This function takes cost-based decisions for Load/Store instructions
1899   /// and collects them in a map. This decisions map is used for building
1900   /// the lists of loop-uniform and loop-scalar instructions.
1901   /// The calculated cost is saved with widening decision in order to
1902   /// avoid redundant calculations.
1903   void setCostBasedWideningDecision(unsigned VF);
1904 
1905   /// \brief A struct that represents some properties of the register usage
1906   /// of a loop.
1907   struct RegisterUsage {
1908     /// Holds the number of loop invariant values that are used in the loop.
1909     unsigned LoopInvariantRegs;
1910     /// Holds the maximum number of concurrent live intervals in the loop.
1911     unsigned MaxLocalUsers;
1912     /// Holds the number of instructions in the loop.
1913     unsigned NumInstructions;
1914   };
1915 
1916   /// \return Returns information about the register usages of the loop for the
1917   /// given vectorization factors.
1918   SmallVector<RegisterUsage, 8> calculateRegisterUsage(ArrayRef<unsigned> VFs);
1919 
1920   /// Collect values we want to ignore in the cost model.
1921   void collectValuesToIgnore();
1922 
1923   /// \returns The smallest bitwidth each instruction can be represented with.
1924   /// The vector equivalents of these instructions should be truncated to this
1925   /// type.
1926   const MapVector<Instruction *, uint64_t> &getMinimalBitwidths() const {
1927     return MinBWs;
1928   }
1929 
1930   /// \returns True if it is more profitable to scalarize instruction \p I for
1931   /// vectorization factor \p VF.
1932   bool isProfitableToScalarize(Instruction *I, unsigned VF) const {
1933     auto Scalars = InstsToScalarize.find(VF);
1934     assert(Scalars != InstsToScalarize.end() &&
1935            "VF not yet analyzed for scalarization profitability");
1936     return Scalars->second.count(I);
1937   }
1938 
1939   /// Returns true if \p I is known to be uniform after vectorization.
1940   bool isUniformAfterVectorization(Instruction *I, unsigned VF) const {
1941     if (VF == 1)
1942       return true;
1943     assert(Uniforms.count(VF) && "VF not yet analyzed for uniformity");
1944     auto UniformsPerVF = Uniforms.find(VF);
1945     return UniformsPerVF->second.count(I);
1946   }
1947 
1948   /// Returns true if \p I is known to be scalar after vectorization.
1949   bool isScalarAfterVectorization(Instruction *I, unsigned VF) const {
1950     if (VF == 1)
1951       return true;
1952     assert(Scalars.count(VF) && "Scalar values are not calculated for VF");
1953     auto ScalarsPerVF = Scalars.find(VF);
1954     return ScalarsPerVF->second.count(I);
1955   }
1956 
1957   /// \returns True if instruction \p I can be truncated to a smaller bitwidth
1958   /// for vectorization factor \p VF.
1959   bool canTruncateToMinimalBitwidth(Instruction *I, unsigned VF) const {
1960     return VF > 1 && MinBWs.count(I) && !isProfitableToScalarize(I, VF) &&
1961            !isScalarAfterVectorization(I, VF);
1962   }
1963 
1964   /// Decision that was taken during cost calculation for memory instruction.
1965   enum InstWidening {
1966     CM_Unknown,
1967     CM_Widen,
1968     CM_Interleave,
1969     CM_GatherScatter,
1970     CM_Scalarize
1971   };
1972 
1973   /// Save vectorization decision \p W and \p Cost taken by the cost model for
1974   /// instruction \p I and vector width \p VF.
1975   void setWideningDecision(Instruction *I, unsigned VF, InstWidening W,
1976                            unsigned Cost) {
1977     assert(VF >= 2 && "Expected VF >=2");
1978     WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost);
1979   }
1980 
1981   /// Save vectorization decision \p W and \p Cost taken by the cost model for
1982   /// interleaving group \p Grp and vector width \p VF.
1983   void setWideningDecision(const InterleaveGroup *Grp, unsigned VF,
1984                            InstWidening W, unsigned Cost) {
1985     assert(VF >= 2 && "Expected VF >=2");
1986     /// Broadcast this decicion to all instructions inside the group.
1987     /// But the cost will be assigned to one instruction only.
1988     for (unsigned i = 0; i < Grp->getFactor(); ++i) {
1989       if (auto *I = Grp->getMember(i)) {
1990         if (Grp->getInsertPos() == I)
1991           WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost);
1992         else
1993           WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, 0);
1994       }
1995     }
1996   }
1997 
1998   /// Return the cost model decision for the given instruction \p I and vector
1999   /// width \p VF. Return CM_Unknown if this instruction did not pass
2000   /// through the cost modeling.
2001   InstWidening getWideningDecision(Instruction *I, unsigned VF) {
2002     assert(VF >= 2 && "Expected VF >=2");
2003     std::pair<Instruction *, unsigned> InstOnVF = std::make_pair(I, VF);
2004     auto Itr = WideningDecisions.find(InstOnVF);
2005     if (Itr == WideningDecisions.end())
2006       return CM_Unknown;
2007     return Itr->second.first;
2008   }
2009 
2010   /// Return the vectorization cost for the given instruction \p I and vector
2011   /// width \p VF.
2012   unsigned getWideningCost(Instruction *I, unsigned VF) {
2013     assert(VF >= 2 && "Expected VF >=2");
2014     std::pair<Instruction *, unsigned> InstOnVF = std::make_pair(I, VF);
2015     assert(WideningDecisions.count(InstOnVF) && "The cost is not calculated");
2016     return WideningDecisions[InstOnVF].second;
2017   }
2018 
2019   /// Return True if instruction \p I is an optimizable truncate whose operand
2020   /// is an induction variable. Such a truncate will be removed by adding a new
2021   /// induction variable with the destination type.
2022   bool isOptimizableIVTruncate(Instruction *I, unsigned VF) {
2023 
2024     // If the instruction is not a truncate, return false.
2025     auto *Trunc = dyn_cast<TruncInst>(I);
2026     if (!Trunc)
2027       return false;
2028 
2029     // Get the source and destination types of the truncate.
2030     Type *SrcTy = ToVectorTy(cast<CastInst>(I)->getSrcTy(), VF);
2031     Type *DestTy = ToVectorTy(cast<CastInst>(I)->getDestTy(), VF);
2032 
2033     // If the truncate is free for the given types, return false. Replacing a
2034     // free truncate with an induction variable would add an induction variable
2035     // update instruction to each iteration of the loop. We exclude from this
2036     // check the primary induction variable since it will need an update
2037     // instruction regardless.
2038     Value *Op = Trunc->getOperand(0);
2039     if (Op != Legal->getPrimaryInduction() && TTI.isTruncateFree(SrcTy, DestTy))
2040       return false;
2041 
2042     // If the truncated value is not an induction variable, return false.
2043     return Legal->isInductionVariable(Op);
2044   }
2045 
2046 private:
2047   /// \return An upper bound for the vectorization factor, larger than zero.
2048   /// One is returned if vectorization should best be avoided due to cost.
2049   unsigned computeFeasibleMaxVF(bool OptForSize);
2050 
2051   /// The vectorization cost is a combination of the cost itself and a boolean
2052   /// indicating whether any of the contributing operations will actually
2053   /// operate on
2054   /// vector values after type legalization in the backend. If this latter value
2055   /// is
2056   /// false, then all operations will be scalarized (i.e. no vectorization has
2057   /// actually taken place).
2058   typedef std::pair<unsigned, bool> VectorizationCostTy;
2059 
2060   /// Returns the expected execution cost. The unit of the cost does
2061   /// not matter because we use the 'cost' units to compare different
2062   /// vector widths. The cost that is returned is *not* normalized by
2063   /// the factor width.
2064   VectorizationCostTy expectedCost(unsigned VF);
2065 
2066   /// Returns the execution time cost of an instruction for a given vector
2067   /// width. Vector width of one means scalar.
2068   VectorizationCostTy getInstructionCost(Instruction *I, unsigned VF);
2069 
2070   /// The cost-computation logic from getInstructionCost which provides
2071   /// the vector type as an output parameter.
2072   unsigned getInstructionCost(Instruction *I, unsigned VF, Type *&VectorTy);
2073 
2074   /// Calculate vectorization cost of memory instruction \p I.
2075   unsigned getMemoryInstructionCost(Instruction *I, unsigned VF);
2076 
2077   /// The cost computation for scalarized memory instruction.
2078   unsigned getMemInstScalarizationCost(Instruction *I, unsigned VF);
2079 
2080   /// The cost computation for interleaving group of memory instructions.
2081   unsigned getInterleaveGroupCost(Instruction *I, unsigned VF);
2082 
2083   /// The cost computation for Gather/Scatter instruction.
2084   unsigned getGatherScatterCost(Instruction *I, unsigned VF);
2085 
2086   /// The cost computation for widening instruction \p I with consecutive
2087   /// memory access.
2088   unsigned getConsecutiveMemOpCost(Instruction *I, unsigned VF);
2089 
2090   /// The cost calculation for Load instruction \p I with uniform pointer -
2091   /// scalar load + broadcast.
2092   unsigned getUniformMemOpCost(Instruction *I, unsigned VF);
2093 
2094   /// Returns whether the instruction is a load or store and will be a emitted
2095   /// as a vector operation.
2096   bool isConsecutiveLoadOrStore(Instruction *I);
2097 
2098   /// Create an analysis remark that explains why vectorization failed
2099   ///
2100   /// \p RemarkName is the identifier for the remark.  \return the remark object
2101   /// that can be streamed to.
2102   OptimizationRemarkAnalysis createMissedAnalysis(StringRef RemarkName) {
2103     return ::createMissedAnalysis(Hints->vectorizeAnalysisPassName(),
2104                                   RemarkName, TheLoop);
2105   }
2106 
2107   /// Map of scalar integer values to the smallest bitwidth they can be legally
2108   /// represented as. The vector equivalents of these values should be truncated
2109   /// to this type.
2110   MapVector<Instruction *, uint64_t> MinBWs;
2111 
2112   /// A type representing the costs for instructions if they were to be
2113   /// scalarized rather than vectorized. The entries are Instruction-Cost
2114   /// pairs.
2115   typedef DenseMap<Instruction *, unsigned> ScalarCostsTy;
2116 
2117   /// A map holding scalar costs for different vectorization factors. The
2118   /// presence of a cost for an instruction in the mapping indicates that the
2119   /// instruction will be scalarized when vectorizing with the associated
2120   /// vectorization factor. The entries are VF-ScalarCostTy pairs.
2121   DenseMap<unsigned, ScalarCostsTy> InstsToScalarize;
2122 
2123   /// Holds the instructions known to be uniform after vectorization.
2124   /// The data is collected per VF.
2125   DenseMap<unsigned, SmallPtrSet<Instruction *, 4>> Uniforms;
2126 
2127   /// Holds the instructions known to be scalar after vectorization.
2128   /// The data is collected per VF.
2129   DenseMap<unsigned, SmallPtrSet<Instruction *, 4>> Scalars;
2130 
2131   /// Returns the expected difference in cost from scalarizing the expression
2132   /// feeding a predicated instruction \p PredInst. The instructions to
2133   /// scalarize and their scalar costs are collected in \p ScalarCosts. A
2134   /// non-negative return value implies the expression will be scalarized.
2135   /// Currently, only single-use chains are considered for scalarization.
2136   int computePredInstDiscount(Instruction *PredInst, ScalarCostsTy &ScalarCosts,
2137                               unsigned VF);
2138 
2139   /// Collects the instructions to scalarize for each predicated instruction in
2140   /// the loop.
2141   void collectInstsToScalarize(unsigned VF);
2142 
2143   /// Collect the instructions that are uniform after vectorization. An
2144   /// instruction is uniform if we represent it with a single scalar value in
2145   /// the vectorized loop corresponding to each vector iteration. Examples of
2146   /// uniform instructions include pointer operands of consecutive or
2147   /// interleaved memory accesses. Note that although uniformity implies an
2148   /// instruction will be scalar, the reverse is not true. In general, a
2149   /// scalarized instruction will be represented by VF scalar values in the
2150   /// vectorized loop, each corresponding to an iteration of the original
2151   /// scalar loop.
2152   void collectLoopUniforms(unsigned VF);
2153 
2154   /// Collect the instructions that are scalar after vectorization. An
2155   /// instruction is scalar if it is known to be uniform or will be scalarized
2156   /// during vectorization. Non-uniform scalarized instructions will be
2157   /// represented by VF values in the vectorized loop, each corresponding to an
2158   /// iteration of the original scalar loop.
2159   void collectLoopScalars(unsigned VF);
2160 
2161   /// Collect Uniform and Scalar values for the given \p VF.
2162   /// The sets depend on CM decision for Load/Store instructions
2163   /// that may be vectorized as interleave, gather-scatter or scalarized.
2164   void collectUniformsAndScalars(unsigned VF) {
2165     // Do the analysis once.
2166     if (VF == 1 || Uniforms.count(VF))
2167       return;
2168     setCostBasedWideningDecision(VF);
2169     collectLoopUniforms(VF);
2170     collectLoopScalars(VF);
2171   }
2172 
2173   /// Keeps cost model vectorization decision and cost for instructions.
2174   /// Right now it is used for memory instructions only.
2175   typedef DenseMap<std::pair<Instruction *, unsigned>,
2176                    std::pair<InstWidening, unsigned>>
2177       DecisionList;
2178 
2179   DecisionList WideningDecisions;
2180 
2181 public:
2182   /// The loop that we evaluate.
2183   Loop *TheLoop;
2184   /// Predicated scalar evolution analysis.
2185   PredicatedScalarEvolution &PSE;
2186   /// Loop Info analysis.
2187   LoopInfo *LI;
2188   /// Vectorization legality.
2189   LoopVectorizationLegality *Legal;
2190   /// Vector target information.
2191   const TargetTransformInfo &TTI;
2192   /// Target Library Info.
2193   const TargetLibraryInfo *TLI;
2194   /// Demanded bits analysis.
2195   DemandedBits *DB;
2196   /// Assumption cache.
2197   AssumptionCache *AC;
2198   /// Interface to emit optimization remarks.
2199   OptimizationRemarkEmitter *ORE;
2200 
2201   const Function *TheFunction;
2202   /// Loop Vectorize Hint.
2203   const LoopVectorizeHints *Hints;
2204   /// Values to ignore in the cost model.
2205   SmallPtrSet<const Value *, 16> ValuesToIgnore;
2206   /// Values to ignore in the cost model when VF > 1.
2207   SmallPtrSet<const Value *, 16> VecValuesToIgnore;
2208 };
2209 
2210 /// LoopVectorizationPlanner - drives the vectorization process after having
2211 /// passed Legality checks.
2212 class LoopVectorizationPlanner {
2213 public:
2214   LoopVectorizationPlanner(LoopVectorizationCostModel &CM) : CM(CM) {}
2215 
2216   ~LoopVectorizationPlanner() {}
2217 
2218   /// Plan how to best vectorize, return the best VF and its cost.
2219   LoopVectorizationCostModel::VectorizationFactor plan(bool OptForSize,
2220                                                        unsigned UserVF);
2221 
2222 private:
2223   /// The profitablity analysis.
2224   LoopVectorizationCostModel &CM;
2225 };
2226 
2227 /// \brief This holds vectorization requirements that must be verified late in
2228 /// the process. The requirements are set by legalize and costmodel. Once
2229 /// vectorization has been determined to be possible and profitable the
2230 /// requirements can be verified by looking for metadata or compiler options.
2231 /// For example, some loops require FP commutativity which is only allowed if
2232 /// vectorization is explicitly specified or if the fast-math compiler option
2233 /// has been provided.
2234 /// Late evaluation of these requirements allows helpful diagnostics to be
2235 /// composed that tells the user what need to be done to vectorize the loop. For
2236 /// example, by specifying #pragma clang loop vectorize or -ffast-math. Late
2237 /// evaluation should be used only when diagnostics can generated that can be
2238 /// followed by a non-expert user.
2239 class LoopVectorizationRequirements {
2240 public:
2241   LoopVectorizationRequirements(OptimizationRemarkEmitter &ORE)
2242       : NumRuntimePointerChecks(0), UnsafeAlgebraInst(nullptr), ORE(ORE) {}
2243 
2244   void addUnsafeAlgebraInst(Instruction *I) {
2245     // First unsafe algebra instruction.
2246     if (!UnsafeAlgebraInst)
2247       UnsafeAlgebraInst = I;
2248   }
2249 
2250   void addRuntimePointerChecks(unsigned Num) { NumRuntimePointerChecks = Num; }
2251 
2252   bool doesNotMeet(Function *F, Loop *L, const LoopVectorizeHints &Hints) {
2253     const char *PassName = Hints.vectorizeAnalysisPassName();
2254     bool Failed = false;
2255     if (UnsafeAlgebraInst && !Hints.allowReordering()) {
2256       ORE.emit(
2257           OptimizationRemarkAnalysisFPCommute(PassName, "CantReorderFPOps",
2258                                               UnsafeAlgebraInst->getDebugLoc(),
2259                                               UnsafeAlgebraInst->getParent())
2260           << "loop not vectorized: cannot prove it is safe to reorder "
2261              "floating-point operations");
2262       Failed = true;
2263     }
2264 
2265     // Test if runtime memcheck thresholds are exceeded.
2266     bool PragmaThresholdReached =
2267         NumRuntimePointerChecks > PragmaVectorizeMemoryCheckThreshold;
2268     bool ThresholdReached =
2269         NumRuntimePointerChecks > VectorizerParams::RuntimeMemoryCheckThreshold;
2270     if ((ThresholdReached && !Hints.allowReordering()) ||
2271         PragmaThresholdReached) {
2272       ORE.emit(OptimizationRemarkAnalysisAliasing(PassName, "CantReorderMemOps",
2273                                                   L->getStartLoc(),
2274                                                   L->getHeader())
2275                << "loop not vectorized: cannot prove it is safe to reorder "
2276                   "memory operations");
2277       DEBUG(dbgs() << "LV: Too many memory checks needed.\n");
2278       Failed = true;
2279     }
2280 
2281     return Failed;
2282   }
2283 
2284 private:
2285   unsigned NumRuntimePointerChecks;
2286   Instruction *UnsafeAlgebraInst;
2287 
2288   /// Interface to emit optimization remarks.
2289   OptimizationRemarkEmitter &ORE;
2290 };
2291 
2292 static void addAcyclicInnerLoop(Loop &L, SmallVectorImpl<Loop *> &V) {
2293   if (L.empty()) {
2294     if (!hasCyclesInLoopBody(L))
2295       V.push_back(&L);
2296     return;
2297   }
2298   for (Loop *InnerL : L)
2299     addAcyclicInnerLoop(*InnerL, V);
2300 }
2301 
2302 /// The LoopVectorize Pass.
2303 struct LoopVectorize : public FunctionPass {
2304   /// Pass identification, replacement for typeid
2305   static char ID;
2306 
2307   explicit LoopVectorize(bool NoUnrolling = false, bool AlwaysVectorize = true)
2308       : FunctionPass(ID) {
2309     Impl.DisableUnrolling = NoUnrolling;
2310     Impl.AlwaysVectorize = AlwaysVectorize;
2311     initializeLoopVectorizePass(*PassRegistry::getPassRegistry());
2312   }
2313 
2314   LoopVectorizePass Impl;
2315 
2316   bool runOnFunction(Function &F) override {
2317     if (skipFunction(F))
2318       return false;
2319 
2320     auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
2321     auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
2322     auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
2323     auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
2324     auto *BFI = &getAnalysis<BlockFrequencyInfoWrapperPass>().getBFI();
2325     auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
2326     auto *TLI = TLIP ? &TLIP->getTLI() : nullptr;
2327     auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
2328     auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
2329     auto *LAA = &getAnalysis<LoopAccessLegacyAnalysis>();
2330     auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits();
2331     auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE();
2332 
2333     std::function<const LoopAccessInfo &(Loop &)> GetLAA =
2334         [&](Loop &L) -> const LoopAccessInfo & { return LAA->getInfo(&L); };
2335 
2336     return Impl.runImpl(F, *SE, *LI, *TTI, *DT, *BFI, TLI, *DB, *AA, *AC,
2337                         GetLAA, *ORE);
2338   }
2339 
2340   void getAnalysisUsage(AnalysisUsage &AU) const override {
2341     AU.addRequired<AssumptionCacheTracker>();
2342     AU.addRequired<BlockFrequencyInfoWrapperPass>();
2343     AU.addRequired<DominatorTreeWrapperPass>();
2344     AU.addRequired<LoopInfoWrapperPass>();
2345     AU.addRequired<ScalarEvolutionWrapperPass>();
2346     AU.addRequired<TargetTransformInfoWrapperPass>();
2347     AU.addRequired<AAResultsWrapperPass>();
2348     AU.addRequired<LoopAccessLegacyAnalysis>();
2349     AU.addRequired<DemandedBitsWrapperPass>();
2350     AU.addRequired<OptimizationRemarkEmitterWrapperPass>();
2351     AU.addPreserved<LoopInfoWrapperPass>();
2352     AU.addPreserved<DominatorTreeWrapperPass>();
2353     AU.addPreserved<BasicAAWrapperPass>();
2354     AU.addPreserved<GlobalsAAWrapperPass>();
2355   }
2356 };
2357 
2358 } // end anonymous namespace
2359 
2360 //===----------------------------------------------------------------------===//
2361 // Implementation of LoopVectorizationLegality, InnerLoopVectorizer and
2362 // LoopVectorizationCostModel and LoopVectorizationPlanner.
2363 //===----------------------------------------------------------------------===//
2364 
2365 Value *InnerLoopVectorizer::getBroadcastInstrs(Value *V) {
2366   // We need to place the broadcast of invariant variables outside the loop.
2367   Instruction *Instr = dyn_cast<Instruction>(V);
2368   bool NewInstr = (Instr && Instr->getParent() == LoopVectorBody);
2369   bool Invariant = OrigLoop->isLoopInvariant(V) && !NewInstr;
2370 
2371   // Place the code for broadcasting invariant variables in the new preheader.
2372   IRBuilder<>::InsertPointGuard Guard(Builder);
2373   if (Invariant)
2374     Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
2375 
2376   // Broadcast the scalar into all locations in the vector.
2377   Value *Shuf = Builder.CreateVectorSplat(VF, V, "broadcast");
2378 
2379   return Shuf;
2380 }
2381 
2382 void InnerLoopVectorizer::createVectorIntOrFpInductionPHI(
2383     const InductionDescriptor &II, Value *Step, Instruction *EntryVal) {
2384   Value *Start = II.getStartValue();
2385 
2386   // Construct the initial value of the vector IV in the vector loop preheader
2387   auto CurrIP = Builder.saveIP();
2388   Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
2389   if (isa<TruncInst>(EntryVal)) {
2390     assert(Start->getType()->isIntegerTy() &&
2391            "Truncation requires an integer type");
2392     auto *TruncType = cast<IntegerType>(EntryVal->getType());
2393     Step = Builder.CreateTrunc(Step, TruncType);
2394     Start = Builder.CreateCast(Instruction::Trunc, Start, TruncType);
2395   }
2396   Value *SplatStart = Builder.CreateVectorSplat(VF, Start);
2397   Value *SteppedStart =
2398       getStepVector(SplatStart, 0, Step, II.getInductionOpcode());
2399 
2400   // We create vector phi nodes for both integer and floating-point induction
2401   // variables. Here, we determine the kind of arithmetic we will perform.
2402   Instruction::BinaryOps AddOp;
2403   Instruction::BinaryOps MulOp;
2404   if (Step->getType()->isIntegerTy()) {
2405     AddOp = Instruction::Add;
2406     MulOp = Instruction::Mul;
2407   } else {
2408     AddOp = II.getInductionOpcode();
2409     MulOp = Instruction::FMul;
2410   }
2411 
2412   // Multiply the vectorization factor by the step using integer or
2413   // floating-point arithmetic as appropriate.
2414   Value *ConstVF = getSignedIntOrFpConstant(Step->getType(), VF);
2415   Value *Mul = addFastMathFlag(Builder.CreateBinOp(MulOp, Step, ConstVF));
2416 
2417   // Create a vector splat to use in the induction update.
2418   //
2419   // FIXME: If the step is non-constant, we create the vector splat with
2420   //        IRBuilder. IRBuilder can constant-fold the multiply, but it doesn't
2421   //        handle a constant vector splat.
2422   Value *SplatVF = isa<Constant>(Mul)
2423                        ? ConstantVector::getSplat(VF, cast<Constant>(Mul))
2424                        : Builder.CreateVectorSplat(VF, Mul);
2425   Builder.restoreIP(CurrIP);
2426 
2427   // We may need to add the step a number of times, depending on the unroll
2428   // factor. The last of those goes into the PHI.
2429   PHINode *VecInd = PHINode::Create(SteppedStart->getType(), 2, "vec.ind",
2430                                     &*LoopVectorBody->getFirstInsertionPt());
2431   Instruction *LastInduction = VecInd;
2432   VectorParts Entry(UF);
2433   for (unsigned Part = 0; Part < UF; ++Part) {
2434     Entry[Part] = LastInduction;
2435     LastInduction = cast<Instruction>(addFastMathFlag(
2436         Builder.CreateBinOp(AddOp, LastInduction, SplatVF, "step.add")));
2437   }
2438   VectorLoopValueMap.initVector(EntryVal, Entry);
2439   if (isa<TruncInst>(EntryVal))
2440     addMetadata(Entry, EntryVal);
2441 
2442   // Move the last step to the end of the latch block. This ensures consistent
2443   // placement of all induction updates.
2444   auto *LoopVectorLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch();
2445   auto *Br = cast<BranchInst>(LoopVectorLatch->getTerminator());
2446   auto *ICmp = cast<Instruction>(Br->getCondition());
2447   LastInduction->moveBefore(ICmp);
2448   LastInduction->setName("vec.ind.next");
2449 
2450   VecInd->addIncoming(SteppedStart, LoopVectorPreHeader);
2451   VecInd->addIncoming(LastInduction, LoopVectorLatch);
2452 }
2453 
2454 bool InnerLoopVectorizer::shouldScalarizeInstruction(Instruction *I) const {
2455   return Cost->isScalarAfterVectorization(I, VF) ||
2456          Cost->isProfitableToScalarize(I, VF);
2457 }
2458 
2459 bool InnerLoopVectorizer::needsScalarInduction(Instruction *IV) const {
2460   if (shouldScalarizeInstruction(IV))
2461     return true;
2462   auto isScalarInst = [&](User *U) -> bool {
2463     auto *I = cast<Instruction>(U);
2464     return (OrigLoop->contains(I) && shouldScalarizeInstruction(I));
2465   };
2466   return any_of(IV->users(), isScalarInst);
2467 }
2468 
2469 void InnerLoopVectorizer::widenIntOrFpInduction(PHINode *IV, TruncInst *Trunc) {
2470 
2471   assert((IV->getType()->isIntegerTy() || IV != OldInduction) &&
2472          "Primary induction variable must have an integer type");
2473 
2474   auto II = Legal->getInductionVars()->find(IV);
2475   assert(II != Legal->getInductionVars()->end() && "IV is not an induction");
2476 
2477   auto ID = II->second;
2478   assert(IV->getType() == ID.getStartValue()->getType() && "Types must match");
2479 
2480   // The scalar value to broadcast. This will be derived from the canonical
2481   // induction variable.
2482   Value *ScalarIV = nullptr;
2483 
2484   // The value from the original loop to which we are mapping the new induction
2485   // variable.
2486   Instruction *EntryVal = Trunc ? cast<Instruction>(Trunc) : IV;
2487 
2488   // True if we have vectorized the induction variable.
2489   auto VectorizedIV = false;
2490 
2491   // Determine if we want a scalar version of the induction variable. This is
2492   // true if the induction variable itself is not widened, or if it has at
2493   // least one user in the loop that is not widened.
2494   auto NeedsScalarIV = VF > 1 && needsScalarInduction(EntryVal);
2495 
2496   // Generate code for the induction step. Note that induction steps are
2497   // required to be loop-invariant
2498   assert(PSE.getSE()->isLoopInvariant(ID.getStep(), OrigLoop) &&
2499          "Induction step should be loop invariant");
2500   auto &DL = OrigLoop->getHeader()->getModule()->getDataLayout();
2501   Value *Step = nullptr;
2502   if (PSE.getSE()->isSCEVable(IV->getType())) {
2503     SCEVExpander Exp(*PSE.getSE(), DL, "induction");
2504     Step = Exp.expandCodeFor(ID.getStep(), ID.getStep()->getType(),
2505                              LoopVectorPreHeader->getTerminator());
2506   } else {
2507     Step = cast<SCEVUnknown>(ID.getStep())->getValue();
2508   }
2509 
2510   // Try to create a new independent vector induction variable. If we can't
2511   // create the phi node, we will splat the scalar induction variable in each
2512   // loop iteration.
2513   if (VF > 1 && !shouldScalarizeInstruction(EntryVal)) {
2514     createVectorIntOrFpInductionPHI(ID, Step, EntryVal);
2515     VectorizedIV = true;
2516   }
2517 
2518   // If we haven't yet vectorized the induction variable, or if we will create
2519   // a scalar one, we need to define the scalar induction variable and step
2520   // values. If we were given a truncation type, truncate the canonical
2521   // induction variable and step. Otherwise, derive these values from the
2522   // induction descriptor.
2523   if (!VectorizedIV || NeedsScalarIV) {
2524     if (Trunc) {
2525       auto *TruncType = cast<IntegerType>(Trunc->getType());
2526       assert(Step->getType()->isIntegerTy() &&
2527              "Truncation requires an integer step");
2528       ScalarIV = Builder.CreateCast(Instruction::Trunc, Induction, TruncType);
2529       Step = Builder.CreateTrunc(Step, TruncType);
2530     } else {
2531       ScalarIV = Induction;
2532       if (IV != OldInduction) {
2533         ScalarIV = IV->getType()->isIntegerTy()
2534                        ? Builder.CreateSExtOrTrunc(ScalarIV, IV->getType())
2535                        : Builder.CreateCast(Instruction::SIToFP, Induction,
2536                                             IV->getType());
2537         ScalarIV = ID.transform(Builder, ScalarIV, PSE.getSE(), DL);
2538         ScalarIV->setName("offset.idx");
2539       }
2540     }
2541   }
2542 
2543   // If we haven't yet vectorized the induction variable, splat the scalar
2544   // induction variable, and build the necessary step vectors.
2545   if (!VectorizedIV) {
2546     Value *Broadcasted = getBroadcastInstrs(ScalarIV);
2547     VectorParts Entry(UF);
2548     for (unsigned Part = 0; Part < UF; ++Part)
2549       Entry[Part] =
2550           getStepVector(Broadcasted, VF * Part, Step, ID.getInductionOpcode());
2551     VectorLoopValueMap.initVector(EntryVal, Entry);
2552     if (Trunc)
2553       addMetadata(Entry, Trunc);
2554   }
2555 
2556   // If an induction variable is only used for counting loop iterations or
2557   // calculating addresses, it doesn't need to be widened. Create scalar steps
2558   // that can be used by instructions we will later scalarize. Note that the
2559   // addition of the scalar steps will not increase the number of instructions
2560   // in the loop in the common case prior to InstCombine. We will be trading
2561   // one vector extract for each scalar step.
2562   if (NeedsScalarIV)
2563     buildScalarSteps(ScalarIV, Step, EntryVal, ID);
2564 }
2565 
2566 Value *InnerLoopVectorizer::getStepVector(Value *Val, int StartIdx, Value *Step,
2567                                           Instruction::BinaryOps BinOp) {
2568   // Create and check the types.
2569   assert(Val->getType()->isVectorTy() && "Must be a vector");
2570   int VLen = Val->getType()->getVectorNumElements();
2571 
2572   Type *STy = Val->getType()->getScalarType();
2573   assert((STy->isIntegerTy() || STy->isFloatingPointTy()) &&
2574          "Induction Step must be an integer or FP");
2575   assert(Step->getType() == STy && "Step has wrong type");
2576 
2577   SmallVector<Constant *, 8> Indices;
2578 
2579   if (STy->isIntegerTy()) {
2580     // Create a vector of consecutive numbers from zero to VF.
2581     for (int i = 0; i < VLen; ++i)
2582       Indices.push_back(ConstantInt::get(STy, StartIdx + i));
2583 
2584     // Add the consecutive indices to the vector value.
2585     Constant *Cv = ConstantVector::get(Indices);
2586     assert(Cv->getType() == Val->getType() && "Invalid consecutive vec");
2587     Step = Builder.CreateVectorSplat(VLen, Step);
2588     assert(Step->getType() == Val->getType() && "Invalid step vec");
2589     // FIXME: The newly created binary instructions should contain nsw/nuw flags,
2590     // which can be found from the original scalar operations.
2591     Step = Builder.CreateMul(Cv, Step);
2592     return Builder.CreateAdd(Val, Step, "induction");
2593   }
2594 
2595   // Floating point induction.
2596   assert((BinOp == Instruction::FAdd || BinOp == Instruction::FSub) &&
2597          "Binary Opcode should be specified for FP induction");
2598   // Create a vector of consecutive numbers from zero to VF.
2599   for (int i = 0; i < VLen; ++i)
2600     Indices.push_back(ConstantFP::get(STy, (double)(StartIdx + i)));
2601 
2602   // Add the consecutive indices to the vector value.
2603   Constant *Cv = ConstantVector::get(Indices);
2604 
2605   Step = Builder.CreateVectorSplat(VLen, Step);
2606 
2607   // Floating point operations had to be 'fast' to enable the induction.
2608   FastMathFlags Flags;
2609   Flags.setUnsafeAlgebra();
2610 
2611   Value *MulOp = Builder.CreateFMul(Cv, Step);
2612   if (isa<Instruction>(MulOp))
2613     // Have to check, MulOp may be a constant
2614     cast<Instruction>(MulOp)->setFastMathFlags(Flags);
2615 
2616   Value *BOp = Builder.CreateBinOp(BinOp, Val, MulOp, "induction");
2617   if (isa<Instruction>(BOp))
2618     cast<Instruction>(BOp)->setFastMathFlags(Flags);
2619   return BOp;
2620 }
2621 
2622 void InnerLoopVectorizer::buildScalarSteps(Value *ScalarIV, Value *Step,
2623                                            Value *EntryVal,
2624                                            const InductionDescriptor &ID) {
2625 
2626   // We shouldn't have to build scalar steps if we aren't vectorizing.
2627   assert(VF > 1 && "VF should be greater than one");
2628 
2629   // Get the value type and ensure it and the step have the same integer type.
2630   Type *ScalarIVTy = ScalarIV->getType()->getScalarType();
2631   assert(ScalarIVTy == Step->getType() &&
2632          "Val and Step should have the same type");
2633 
2634   // We build scalar steps for both integer and floating-point induction
2635   // variables. Here, we determine the kind of arithmetic we will perform.
2636   Instruction::BinaryOps AddOp;
2637   Instruction::BinaryOps MulOp;
2638   if (ScalarIVTy->isIntegerTy()) {
2639     AddOp = Instruction::Add;
2640     MulOp = Instruction::Mul;
2641   } else {
2642     AddOp = ID.getInductionOpcode();
2643     MulOp = Instruction::FMul;
2644   }
2645 
2646   // Determine the number of scalars we need to generate for each unroll
2647   // iteration. If EntryVal is uniform, we only need to generate the first
2648   // lane. Otherwise, we generate all VF values.
2649   unsigned Lanes =
2650     Cost->isUniformAfterVectorization(cast<Instruction>(EntryVal), VF) ? 1 : VF;
2651 
2652   // Compute the scalar steps and save the results in VectorLoopValueMap.
2653   ScalarParts Entry(UF);
2654   for (unsigned Part = 0; Part < UF; ++Part) {
2655     Entry[Part].resize(VF);
2656     for (unsigned Lane = 0; Lane < Lanes; ++Lane) {
2657       auto *StartIdx = getSignedIntOrFpConstant(ScalarIVTy, VF * Part + Lane);
2658       auto *Mul = addFastMathFlag(Builder.CreateBinOp(MulOp, StartIdx, Step));
2659       auto *Add = addFastMathFlag(Builder.CreateBinOp(AddOp, ScalarIV, Mul));
2660       Entry[Part][Lane] = Add;
2661     }
2662   }
2663   VectorLoopValueMap.initScalar(EntryVal, Entry);
2664 }
2665 
2666 int LoopVectorizationLegality::isConsecutivePtr(Value *Ptr) {
2667 
2668   const ValueToValueMap &Strides = getSymbolicStrides() ? *getSymbolicStrides() :
2669     ValueToValueMap();
2670 
2671   int Stride = getPtrStride(PSE, Ptr, TheLoop, Strides, true, false);
2672   if (Stride == 1 || Stride == -1)
2673     return Stride;
2674   return 0;
2675 }
2676 
2677 bool LoopVectorizationLegality::isUniform(Value *V) {
2678   return LAI->isUniform(V);
2679 }
2680 
2681 const InnerLoopVectorizer::VectorParts &
2682 InnerLoopVectorizer::getVectorValue(Value *V) {
2683   assert(V != Induction && "The new induction variable should not be used.");
2684   assert(!V->getType()->isVectorTy() && "Can't widen a vector");
2685   assert(!V->getType()->isVoidTy() && "Type does not produce a value");
2686 
2687   // If we have a stride that is replaced by one, do it here.
2688   if (Legal->hasStride(V))
2689     V = ConstantInt::get(V->getType(), 1);
2690 
2691   // If we have this scalar in the map, return it.
2692   if (VectorLoopValueMap.hasVector(V))
2693     return VectorLoopValueMap.VectorMapStorage[V];
2694 
2695   // If the value has not been vectorized, check if it has been scalarized
2696   // instead. If it has been scalarized, and we actually need the value in
2697   // vector form, we will construct the vector values on demand.
2698   if (VectorLoopValueMap.hasScalar(V)) {
2699 
2700     // Initialize a new vector map entry.
2701     VectorParts Entry(UF);
2702 
2703     // If we've scalarized a value, that value should be an instruction.
2704     auto *I = cast<Instruction>(V);
2705 
2706     // If we aren't vectorizing, we can just copy the scalar map values over to
2707     // the vector map.
2708     if (VF == 1) {
2709       for (unsigned Part = 0; Part < UF; ++Part)
2710         Entry[Part] = getScalarValue(V, Part, 0);
2711       return VectorLoopValueMap.initVector(V, Entry);
2712     }
2713 
2714     // Get the last scalar instruction we generated for V. If the value is
2715     // known to be uniform after vectorization, this corresponds to lane zero
2716     // of the last unroll iteration. Otherwise, the last instruction is the one
2717     // we created for the last vector lane of the last unroll iteration.
2718     unsigned LastLane = Cost->isUniformAfterVectorization(I, VF) ? 0 : VF - 1;
2719     auto *LastInst = cast<Instruction>(getScalarValue(V, UF - 1, LastLane));
2720 
2721     // Set the insert point after the last scalarized instruction. This ensures
2722     // the insertelement sequence will directly follow the scalar definitions.
2723     auto OldIP = Builder.saveIP();
2724     auto NewIP = std::next(BasicBlock::iterator(LastInst));
2725     Builder.SetInsertPoint(&*NewIP);
2726 
2727     // However, if we are vectorizing, we need to construct the vector values.
2728     // If the value is known to be uniform after vectorization, we can just
2729     // broadcast the scalar value corresponding to lane zero for each unroll
2730     // iteration. Otherwise, we construct the vector values using insertelement
2731     // instructions. Since the resulting vectors are stored in
2732     // VectorLoopValueMap, we will only generate the insertelements once.
2733     for (unsigned Part = 0; Part < UF; ++Part) {
2734       Value *VectorValue = nullptr;
2735       if (Cost->isUniformAfterVectorization(I, VF)) {
2736         VectorValue = getBroadcastInstrs(getScalarValue(V, Part, 0));
2737       } else {
2738         VectorValue = UndefValue::get(VectorType::get(V->getType(), VF));
2739         for (unsigned Lane = 0; Lane < VF; ++Lane)
2740           VectorValue = Builder.CreateInsertElement(
2741               VectorValue, getScalarValue(V, Part, Lane),
2742               Builder.getInt32(Lane));
2743       }
2744       Entry[Part] = VectorValue;
2745     }
2746     Builder.restoreIP(OldIP);
2747     return VectorLoopValueMap.initVector(V, Entry);
2748   }
2749 
2750   // If this scalar is unknown, assume that it is a constant or that it is
2751   // loop invariant. Broadcast V and save the value for future uses.
2752   Value *B = getBroadcastInstrs(V);
2753   return VectorLoopValueMap.initVector(V, VectorParts(UF, B));
2754 }
2755 
2756 Value *InnerLoopVectorizer::getScalarValue(Value *V, unsigned Part,
2757                                            unsigned Lane) {
2758 
2759   // If the value is not an instruction contained in the loop, it should
2760   // already be scalar.
2761   if (OrigLoop->isLoopInvariant(V))
2762     return V;
2763 
2764   assert(Lane > 0 ?
2765          !Cost->isUniformAfterVectorization(cast<Instruction>(V), VF)
2766          : true && "Uniform values only have lane zero");
2767 
2768   // If the value from the original loop has not been vectorized, it is
2769   // represented by UF x VF scalar values in the new loop. Return the requested
2770   // scalar value.
2771   if (VectorLoopValueMap.hasScalar(V))
2772     return VectorLoopValueMap.ScalarMapStorage[V][Part][Lane];
2773 
2774   // If the value has not been scalarized, get its entry in VectorLoopValueMap
2775   // for the given unroll part. If this entry is not a vector type (i.e., the
2776   // vectorization factor is one), there is no need to generate an
2777   // extractelement instruction.
2778   auto *U = getVectorValue(V)[Part];
2779   if (!U->getType()->isVectorTy()) {
2780     assert(VF == 1 && "Value not scalarized has non-vector type");
2781     return U;
2782   }
2783 
2784   // Otherwise, the value from the original loop has been vectorized and is
2785   // represented by UF vector values. Extract and return the requested scalar
2786   // value from the appropriate vector lane.
2787   return Builder.CreateExtractElement(U, Builder.getInt32(Lane));
2788 }
2789 
2790 Value *InnerLoopVectorizer::reverseVector(Value *Vec) {
2791   assert(Vec->getType()->isVectorTy() && "Invalid type");
2792   SmallVector<Constant *, 8> ShuffleMask;
2793   for (unsigned i = 0; i < VF; ++i)
2794     ShuffleMask.push_back(Builder.getInt32(VF - i - 1));
2795 
2796   return Builder.CreateShuffleVector(Vec, UndefValue::get(Vec->getType()),
2797                                      ConstantVector::get(ShuffleMask),
2798                                      "reverse");
2799 }
2800 
2801 // Try to vectorize the interleave group that \p Instr belongs to.
2802 //
2803 // E.g. Translate following interleaved load group (factor = 3):
2804 //   for (i = 0; i < N; i+=3) {
2805 //     R = Pic[i];             // Member of index 0
2806 //     G = Pic[i+1];           // Member of index 1
2807 //     B = Pic[i+2];           // Member of index 2
2808 //     ... // do something to R, G, B
2809 //   }
2810 // To:
2811 //   %wide.vec = load <12 x i32>                       ; Read 4 tuples of R,G,B
2812 //   %R.vec = shuffle %wide.vec, undef, <0, 3, 6, 9>   ; R elements
2813 //   %G.vec = shuffle %wide.vec, undef, <1, 4, 7, 10>  ; G elements
2814 //   %B.vec = shuffle %wide.vec, undef, <2, 5, 8, 11>  ; B elements
2815 //
2816 // Or translate following interleaved store group (factor = 3):
2817 //   for (i = 0; i < N; i+=3) {
2818 //     ... do something to R, G, B
2819 //     Pic[i]   = R;           // Member of index 0
2820 //     Pic[i+1] = G;           // Member of index 1
2821 //     Pic[i+2] = B;           // Member of index 2
2822 //   }
2823 // To:
2824 //   %R_G.vec = shuffle %R.vec, %G.vec, <0, 1, 2, ..., 7>
2825 //   %B_U.vec = shuffle %B.vec, undef, <0, 1, 2, 3, u, u, u, u>
2826 //   %interleaved.vec = shuffle %R_G.vec, %B_U.vec,
2827 //        <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11>    ; Interleave R,G,B elements
2828 //   store <12 x i32> %interleaved.vec              ; Write 4 tuples of R,G,B
2829 void InnerLoopVectorizer::vectorizeInterleaveGroup(Instruction *Instr) {
2830   const InterleaveGroup *Group = Legal->getInterleavedAccessGroup(Instr);
2831   assert(Group && "Fail to get an interleaved access group.");
2832 
2833   // Skip if current instruction is not the insert position.
2834   if (Instr != Group->getInsertPos())
2835     return;
2836 
2837   Value *Ptr = getPointerOperand(Instr);
2838 
2839   // Prepare for the vector type of the interleaved load/store.
2840   Type *ScalarTy = getMemInstValueType(Instr);
2841   unsigned InterleaveFactor = Group->getFactor();
2842   Type *VecTy = VectorType::get(ScalarTy, InterleaveFactor * VF);
2843   Type *PtrTy = VecTy->getPointerTo(getMemInstAddressSpace(Instr));
2844 
2845   // Prepare for the new pointers.
2846   setDebugLocFromInst(Builder, Ptr);
2847   SmallVector<Value *, 2> NewPtrs;
2848   unsigned Index = Group->getIndex(Instr);
2849 
2850   // If the group is reverse, adjust the index to refer to the last vector lane
2851   // instead of the first. We adjust the index from the first vector lane,
2852   // rather than directly getting the pointer for lane VF - 1, because the
2853   // pointer operand of the interleaved access is supposed to be uniform. For
2854   // uniform instructions, we're only required to generate a value for the
2855   // first vector lane in each unroll iteration.
2856   if (Group->isReverse())
2857     Index += (VF - 1) * Group->getFactor();
2858 
2859   for (unsigned Part = 0; Part < UF; Part++) {
2860     Value *NewPtr = getScalarValue(Ptr, Part, 0);
2861 
2862     // Notice current instruction could be any index. Need to adjust the address
2863     // to the member of index 0.
2864     //
2865     // E.g.  a = A[i+1];     // Member of index 1 (Current instruction)
2866     //       b = A[i];       // Member of index 0
2867     // Current pointer is pointed to A[i+1], adjust it to A[i].
2868     //
2869     // E.g.  A[i+1] = a;     // Member of index 1
2870     //       A[i]   = b;     // Member of index 0
2871     //       A[i+2] = c;     // Member of index 2 (Current instruction)
2872     // Current pointer is pointed to A[i+2], adjust it to A[i].
2873     NewPtr = Builder.CreateGEP(NewPtr, Builder.getInt32(-Index));
2874 
2875     // Cast to the vector pointer type.
2876     NewPtrs.push_back(Builder.CreateBitCast(NewPtr, PtrTy));
2877   }
2878 
2879   setDebugLocFromInst(Builder, Instr);
2880   Value *UndefVec = UndefValue::get(VecTy);
2881 
2882   // Vectorize the interleaved load group.
2883   if (isa<LoadInst>(Instr)) {
2884 
2885     // For each unroll part, create a wide load for the group.
2886     SmallVector<Value *, 2> NewLoads;
2887     for (unsigned Part = 0; Part < UF; Part++) {
2888       auto *NewLoad = Builder.CreateAlignedLoad(
2889           NewPtrs[Part], Group->getAlignment(), "wide.vec");
2890       addMetadata(NewLoad, Instr);
2891       NewLoads.push_back(NewLoad);
2892     }
2893 
2894     // For each member in the group, shuffle out the appropriate data from the
2895     // wide loads.
2896     for (unsigned I = 0; I < InterleaveFactor; ++I) {
2897       Instruction *Member = Group->getMember(I);
2898 
2899       // Skip the gaps in the group.
2900       if (!Member)
2901         continue;
2902 
2903       VectorParts Entry(UF);
2904       Constant *StrideMask = createStrideMask(Builder, I, InterleaveFactor, VF);
2905       for (unsigned Part = 0; Part < UF; Part++) {
2906         Value *StridedVec = Builder.CreateShuffleVector(
2907             NewLoads[Part], UndefVec, StrideMask, "strided.vec");
2908 
2909         // If this member has different type, cast the result type.
2910         if (Member->getType() != ScalarTy) {
2911           VectorType *OtherVTy = VectorType::get(Member->getType(), VF);
2912           StridedVec = Builder.CreateBitOrPointerCast(StridedVec, OtherVTy);
2913         }
2914 
2915         Entry[Part] =
2916             Group->isReverse() ? reverseVector(StridedVec) : StridedVec;
2917       }
2918       VectorLoopValueMap.initVector(Member, Entry);
2919     }
2920     return;
2921   }
2922 
2923   // The sub vector type for current instruction.
2924   VectorType *SubVT = VectorType::get(ScalarTy, VF);
2925 
2926   // Vectorize the interleaved store group.
2927   for (unsigned Part = 0; Part < UF; Part++) {
2928     // Collect the stored vector from each member.
2929     SmallVector<Value *, 4> StoredVecs;
2930     for (unsigned i = 0; i < InterleaveFactor; i++) {
2931       // Interleaved store group doesn't allow a gap, so each index has a member
2932       Instruction *Member = Group->getMember(i);
2933       assert(Member && "Fail to get a member from an interleaved store group");
2934 
2935       Value *StoredVec =
2936           getVectorValue(cast<StoreInst>(Member)->getValueOperand())[Part];
2937       if (Group->isReverse())
2938         StoredVec = reverseVector(StoredVec);
2939 
2940       // If this member has different type, cast it to an unified type.
2941       if (StoredVec->getType() != SubVT)
2942         StoredVec = Builder.CreateBitOrPointerCast(StoredVec, SubVT);
2943 
2944       StoredVecs.push_back(StoredVec);
2945     }
2946 
2947     // Concatenate all vectors into a wide vector.
2948     Value *WideVec = concatenateVectors(Builder, StoredVecs);
2949 
2950     // Interleave the elements in the wide vector.
2951     Constant *IMask = createInterleaveMask(Builder, VF, InterleaveFactor);
2952     Value *IVec = Builder.CreateShuffleVector(WideVec, UndefVec, IMask,
2953                                               "interleaved.vec");
2954 
2955     Instruction *NewStoreInstr =
2956         Builder.CreateAlignedStore(IVec, NewPtrs[Part], Group->getAlignment());
2957     addMetadata(NewStoreInstr, Instr);
2958   }
2959 }
2960 
2961 void InnerLoopVectorizer::vectorizeMemoryInstruction(Instruction *Instr) {
2962   // Attempt to issue a wide load.
2963   LoadInst *LI = dyn_cast<LoadInst>(Instr);
2964   StoreInst *SI = dyn_cast<StoreInst>(Instr);
2965 
2966   assert((LI || SI) && "Invalid Load/Store instruction");
2967 
2968   LoopVectorizationCostModel::InstWidening Decision =
2969       Cost->getWideningDecision(Instr, VF);
2970   assert(Decision != LoopVectorizationCostModel::CM_Unknown &&
2971          "CM decision should be taken at this point");
2972   if (Decision == LoopVectorizationCostModel::CM_Interleave)
2973     return vectorizeInterleaveGroup(Instr);
2974 
2975   Type *ScalarDataTy = getMemInstValueType(Instr);
2976   Type *DataTy = VectorType::get(ScalarDataTy, VF);
2977   Value *Ptr = getPointerOperand(Instr);
2978   unsigned Alignment = getMemInstAlignment(Instr);
2979   // An alignment of 0 means target abi alignment. We need to use the scalar's
2980   // target abi alignment in such a case.
2981   const DataLayout &DL = Instr->getModule()->getDataLayout();
2982   if (!Alignment)
2983     Alignment = DL.getABITypeAlignment(ScalarDataTy);
2984   unsigned AddressSpace = getMemInstAddressSpace(Instr);
2985 
2986   // Scalarize the memory instruction if necessary.
2987   if (Decision == LoopVectorizationCostModel::CM_Scalarize)
2988     return scalarizeInstruction(Instr, Legal->isScalarWithPredication(Instr));
2989 
2990   // Determine if the pointer operand of the access is either consecutive or
2991   // reverse consecutive.
2992   int ConsecutiveStride = Legal->isConsecutivePtr(Ptr);
2993   bool Reverse = ConsecutiveStride < 0;
2994   bool CreateGatherScatter =
2995       (Decision == LoopVectorizationCostModel::CM_GatherScatter);
2996 
2997   VectorParts VectorGep;
2998 
2999   // Handle consecutive loads/stores.
3000   GetElementPtrInst *Gep = getGEPInstruction(Ptr);
3001   if (ConsecutiveStride) {
3002     if (Gep) {
3003       unsigned NumOperands = Gep->getNumOperands();
3004 #ifndef NDEBUG
3005       // The original GEP that identified as a consecutive memory access
3006       // should have only one loop-variant operand.
3007       unsigned NumOfLoopVariantOps = 0;
3008       for (unsigned i = 0; i < NumOperands; ++i)
3009         if (!PSE.getSE()->isLoopInvariant(PSE.getSCEV(Gep->getOperand(i)),
3010                                           OrigLoop))
3011           NumOfLoopVariantOps++;
3012       assert(NumOfLoopVariantOps == 1 &&
3013              "Consecutive GEP should have only one loop-variant operand");
3014 #endif
3015       GetElementPtrInst *Gep2 = cast<GetElementPtrInst>(Gep->clone());
3016       Gep2->setName("gep.indvar");
3017 
3018       // A new GEP is created for a 0-lane value of the first unroll iteration.
3019       // The GEPs for the rest of the unroll iterations are computed below as an
3020       // offset from this GEP.
3021       for (unsigned i = 0; i < NumOperands; ++i)
3022         // We can apply getScalarValue() for all GEP indices. It returns an
3023         // original value for loop-invariant operand and 0-lane for consecutive
3024         // operand.
3025         Gep2->setOperand(i, getScalarValue(Gep->getOperand(i),
3026                                            0, /* First unroll iteration */
3027                                            0  /* 0-lane of the vector */ ));
3028       setDebugLocFromInst(Builder, Gep);
3029       Ptr = Builder.Insert(Gep2);
3030 
3031     } else { // No GEP
3032       setDebugLocFromInst(Builder, Ptr);
3033       Ptr = getScalarValue(Ptr, 0, 0);
3034     }
3035   } else {
3036     // At this point we should vector version of GEP for Gather or Scatter
3037     assert(CreateGatherScatter && "The instruction should be scalarized");
3038     if (Gep) {
3039       // Vectorizing GEP, across UF parts. We want to get a vector value for base
3040       // and each index that's defined inside the loop, even if it is
3041       // loop-invariant but wasn't hoisted out. Otherwise we want to keep them
3042       // scalar.
3043       SmallVector<VectorParts, 4> OpsV;
3044       for (Value *Op : Gep->operands()) {
3045         Instruction *SrcInst = dyn_cast<Instruction>(Op);
3046         if (SrcInst && OrigLoop->contains(SrcInst))
3047           OpsV.push_back(getVectorValue(Op));
3048         else
3049           OpsV.push_back(VectorParts(UF, Op));
3050       }
3051       for (unsigned Part = 0; Part < UF; ++Part) {
3052         SmallVector<Value *, 4> Ops;
3053         Value *GEPBasePtr = OpsV[0][Part];
3054         for (unsigned i = 1; i < Gep->getNumOperands(); i++)
3055           Ops.push_back(OpsV[i][Part]);
3056         Value *NewGep =  Builder.CreateGEP(GEPBasePtr, Ops, "VectorGep");
3057         cast<GetElementPtrInst>(NewGep)->setIsInBounds(Gep->isInBounds());
3058         assert(NewGep->getType()->isVectorTy() && "Expected vector GEP");
3059 
3060         NewGep =
3061             Builder.CreateBitCast(NewGep, VectorType::get(Ptr->getType(), VF));
3062         VectorGep.push_back(NewGep);
3063       }
3064     } else
3065       VectorGep = getVectorValue(Ptr);
3066   }
3067 
3068   VectorParts Mask = createBlockInMask(Instr->getParent());
3069   // Handle Stores:
3070   if (SI) {
3071     assert(!Legal->isUniform(SI->getPointerOperand()) &&
3072            "We do not allow storing to uniform addresses");
3073     setDebugLocFromInst(Builder, SI);
3074     // We don't want to update the value in the map as it might be used in
3075     // another expression. So don't use a reference type for "StoredVal".
3076     VectorParts StoredVal = getVectorValue(SI->getValueOperand());
3077 
3078     for (unsigned Part = 0; Part < UF; ++Part) {
3079       Instruction *NewSI = nullptr;
3080       if (CreateGatherScatter) {
3081         Value *MaskPart = Legal->isMaskRequired(SI) ? Mask[Part] : nullptr;
3082         NewSI = Builder.CreateMaskedScatter(StoredVal[Part], VectorGep[Part],
3083                                             Alignment, MaskPart);
3084       } else {
3085         // Calculate the pointer for the specific unroll-part.
3086         Value *PartPtr =
3087             Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(Part * VF));
3088 
3089         if (Reverse) {
3090           // If we store to reverse consecutive memory locations, then we need
3091           // to reverse the order of elements in the stored value.
3092           StoredVal[Part] = reverseVector(StoredVal[Part]);
3093           // If the address is consecutive but reversed, then the
3094           // wide store needs to start at the last vector element.
3095           PartPtr =
3096               Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(-Part * VF));
3097           PartPtr =
3098               Builder.CreateGEP(nullptr, PartPtr, Builder.getInt32(1 - VF));
3099           Mask[Part] = reverseVector(Mask[Part]);
3100         }
3101 
3102         Value *VecPtr =
3103             Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace));
3104 
3105         if (Legal->isMaskRequired(SI))
3106           NewSI = Builder.CreateMaskedStore(StoredVal[Part], VecPtr, Alignment,
3107                                             Mask[Part]);
3108         else
3109           NewSI =
3110               Builder.CreateAlignedStore(StoredVal[Part], VecPtr, Alignment);
3111       }
3112       addMetadata(NewSI, SI);
3113     }
3114     return;
3115   }
3116 
3117   // Handle loads.
3118   assert(LI && "Must have a load instruction");
3119   setDebugLocFromInst(Builder, LI);
3120   VectorParts Entry(UF);
3121   for (unsigned Part = 0; Part < UF; ++Part) {
3122     Instruction *NewLI;
3123     if (CreateGatherScatter) {
3124       Value *MaskPart = Legal->isMaskRequired(LI) ? Mask[Part] : nullptr;
3125       NewLI = Builder.CreateMaskedGather(VectorGep[Part], Alignment, MaskPart,
3126                                          0, "wide.masked.gather");
3127       Entry[Part] = NewLI;
3128     } else {
3129       // Calculate the pointer for the specific unroll-part.
3130       Value *PartPtr =
3131           Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(Part * VF));
3132 
3133       if (Reverse) {
3134         // If the address is consecutive but reversed, then the
3135         // wide load needs to start at the last vector element.
3136         PartPtr = Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(-Part * VF));
3137         PartPtr = Builder.CreateGEP(nullptr, PartPtr, Builder.getInt32(1 - VF));
3138         Mask[Part] = reverseVector(Mask[Part]);
3139       }
3140 
3141       Value *VecPtr =
3142           Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace));
3143       if (Legal->isMaskRequired(LI))
3144         NewLI = Builder.CreateMaskedLoad(VecPtr, Alignment, Mask[Part],
3145                                          UndefValue::get(DataTy),
3146                                          "wide.masked.load");
3147       else
3148         NewLI = Builder.CreateAlignedLoad(VecPtr, Alignment, "wide.load");
3149       Entry[Part] = Reverse ? reverseVector(NewLI) : NewLI;
3150     }
3151     addMetadata(NewLI, LI);
3152   }
3153   VectorLoopValueMap.initVector(Instr, Entry);
3154 }
3155 
3156 void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr,
3157                                                bool IfPredicateInstr) {
3158   assert(!Instr->getType()->isAggregateType() && "Can't handle vectors");
3159   DEBUG(dbgs() << "LV: Scalarizing"
3160                << (IfPredicateInstr ? " and predicating:" : ":") << *Instr
3161                << '\n');
3162   // Holds vector parameters or scalars, in case of uniform vals.
3163   SmallVector<VectorParts, 4> Params;
3164 
3165   setDebugLocFromInst(Builder, Instr);
3166 
3167   // Does this instruction return a value ?
3168   bool IsVoidRetTy = Instr->getType()->isVoidTy();
3169 
3170   // Initialize a new scalar map entry.
3171   ScalarParts Entry(UF);
3172 
3173   VectorParts Cond;
3174   if (IfPredicateInstr)
3175     Cond = createBlockInMask(Instr->getParent());
3176 
3177   // Determine the number of scalars we need to generate for each unroll
3178   // iteration. If the instruction is uniform, we only need to generate the
3179   // first lane. Otherwise, we generate all VF values.
3180   unsigned Lanes = Cost->isUniformAfterVectorization(Instr, VF) ? 1 : VF;
3181 
3182   // For each vector unroll 'part':
3183   for (unsigned Part = 0; Part < UF; ++Part) {
3184     Entry[Part].resize(VF);
3185     // For each scalar that we create:
3186     for (unsigned Lane = 0; Lane < Lanes; ++Lane) {
3187 
3188       // Start if-block.
3189       Value *Cmp = nullptr;
3190       if (IfPredicateInstr) {
3191         Cmp = Cond[Part];
3192         if (Cmp->getType()->isVectorTy())
3193           Cmp = Builder.CreateExtractElement(Cmp, Builder.getInt32(Lane));
3194         Cmp = Builder.CreateICmp(ICmpInst::ICMP_EQ, Cmp,
3195                                  ConstantInt::get(Cmp->getType(), 1));
3196       }
3197 
3198       Instruction *Cloned = Instr->clone();
3199       if (!IsVoidRetTy)
3200         Cloned->setName(Instr->getName() + ".cloned");
3201 
3202       // Replace the operands of the cloned instructions with their scalar
3203       // equivalents in the new loop.
3204       for (unsigned op = 0, e = Instr->getNumOperands(); op != e; ++op) {
3205         auto *NewOp = getScalarValue(Instr->getOperand(op), Part, Lane);
3206         Cloned->setOperand(op, NewOp);
3207       }
3208       addNewMetadata(Cloned, Instr);
3209 
3210       // Place the cloned scalar in the new loop.
3211       Builder.Insert(Cloned);
3212 
3213       // Add the cloned scalar to the scalar map entry.
3214       Entry[Part][Lane] = Cloned;
3215 
3216       // If we just cloned a new assumption, add it the assumption cache.
3217       if (auto *II = dyn_cast<IntrinsicInst>(Cloned))
3218         if (II->getIntrinsicID() == Intrinsic::assume)
3219           AC->registerAssumption(II);
3220 
3221       // End if-block.
3222       if (IfPredicateInstr)
3223         PredicatedInstructions.push_back(std::make_pair(Cloned, Cmp));
3224     }
3225   }
3226   VectorLoopValueMap.initScalar(Instr, Entry);
3227 }
3228 
3229 PHINode *InnerLoopVectorizer::createInductionVariable(Loop *L, Value *Start,
3230                                                       Value *End, Value *Step,
3231                                                       Instruction *DL) {
3232   BasicBlock *Header = L->getHeader();
3233   BasicBlock *Latch = L->getLoopLatch();
3234   // As we're just creating this loop, it's possible no latch exists
3235   // yet. If so, use the header as this will be a single block loop.
3236   if (!Latch)
3237     Latch = Header;
3238 
3239   IRBuilder<> Builder(&*Header->getFirstInsertionPt());
3240   Instruction *OldInst = getDebugLocFromInstOrOperands(OldInduction);
3241   setDebugLocFromInst(Builder, OldInst);
3242   auto *Induction = Builder.CreatePHI(Start->getType(), 2, "index");
3243 
3244   Builder.SetInsertPoint(Latch->getTerminator());
3245   setDebugLocFromInst(Builder, OldInst);
3246 
3247   // Create i+1 and fill the PHINode.
3248   Value *Next = Builder.CreateAdd(Induction, Step, "index.next");
3249   Induction->addIncoming(Start, L->getLoopPreheader());
3250   Induction->addIncoming(Next, Latch);
3251   // Create the compare.
3252   Value *ICmp = Builder.CreateICmpEQ(Next, End);
3253   Builder.CreateCondBr(ICmp, L->getExitBlock(), Header);
3254 
3255   // Now we have two terminators. Remove the old one from the block.
3256   Latch->getTerminator()->eraseFromParent();
3257 
3258   return Induction;
3259 }
3260 
3261 Value *InnerLoopVectorizer::getOrCreateTripCount(Loop *L) {
3262   if (TripCount)
3263     return TripCount;
3264 
3265   IRBuilder<> Builder(L->getLoopPreheader()->getTerminator());
3266   // Find the loop boundaries.
3267   ScalarEvolution *SE = PSE.getSE();
3268   const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount();
3269   assert(BackedgeTakenCount != SE->getCouldNotCompute() &&
3270          "Invalid loop count");
3271 
3272   Type *IdxTy = Legal->getWidestInductionType();
3273 
3274   // The exit count might have the type of i64 while the phi is i32. This can
3275   // happen if we have an induction variable that is sign extended before the
3276   // compare. The only way that we get a backedge taken count is that the
3277   // induction variable was signed and as such will not overflow. In such a case
3278   // truncation is legal.
3279   if (BackedgeTakenCount->getType()->getPrimitiveSizeInBits() >
3280       IdxTy->getPrimitiveSizeInBits())
3281     BackedgeTakenCount = SE->getTruncateOrNoop(BackedgeTakenCount, IdxTy);
3282   BackedgeTakenCount = SE->getNoopOrZeroExtend(BackedgeTakenCount, IdxTy);
3283 
3284   // Get the total trip count from the count by adding 1.
3285   const SCEV *ExitCount = SE->getAddExpr(
3286       BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType()));
3287 
3288   const DataLayout &DL = L->getHeader()->getModule()->getDataLayout();
3289 
3290   // Expand the trip count and place the new instructions in the preheader.
3291   // Notice that the pre-header does not change, only the loop body.
3292   SCEVExpander Exp(*SE, DL, "induction");
3293 
3294   // Count holds the overall loop count (N).
3295   TripCount = Exp.expandCodeFor(ExitCount, ExitCount->getType(),
3296                                 L->getLoopPreheader()->getTerminator());
3297 
3298   if (TripCount->getType()->isPointerTy())
3299     TripCount =
3300         CastInst::CreatePointerCast(TripCount, IdxTy, "exitcount.ptrcnt.to.int",
3301                                     L->getLoopPreheader()->getTerminator());
3302 
3303   return TripCount;
3304 }
3305 
3306 Value *InnerLoopVectorizer::getOrCreateVectorTripCount(Loop *L) {
3307   if (VectorTripCount)
3308     return VectorTripCount;
3309 
3310   Value *TC = getOrCreateTripCount(L);
3311   IRBuilder<> Builder(L->getLoopPreheader()->getTerminator());
3312 
3313   // Now we need to generate the expression for the part of the loop that the
3314   // vectorized body will execute. This is equal to N - (N % Step) if scalar
3315   // iterations are not required for correctness, or N - Step, otherwise. Step
3316   // is equal to the vectorization factor (number of SIMD elements) times the
3317   // unroll factor (number of SIMD instructions).
3318   Constant *Step = ConstantInt::get(TC->getType(), VF * UF);
3319   Value *R = Builder.CreateURem(TC, Step, "n.mod.vf");
3320 
3321   // If there is a non-reversed interleaved group that may speculatively access
3322   // memory out-of-bounds, we need to ensure that there will be at least one
3323   // iteration of the scalar epilogue loop. Thus, if the step evenly divides
3324   // the trip count, we set the remainder to be equal to the step. If the step
3325   // does not evenly divide the trip count, no adjustment is necessary since
3326   // there will already be scalar iterations. Note that the minimum iterations
3327   // check ensures that N >= Step.
3328   if (VF > 1 && Legal->requiresScalarEpilogue()) {
3329     auto *IsZero = Builder.CreateICmpEQ(R, ConstantInt::get(R->getType(), 0));
3330     R = Builder.CreateSelect(IsZero, Step, R);
3331   }
3332 
3333   VectorTripCount = Builder.CreateSub(TC, R, "n.vec");
3334 
3335   return VectorTripCount;
3336 }
3337 
3338 void InnerLoopVectorizer::emitMinimumIterationCountCheck(Loop *L,
3339                                                          BasicBlock *Bypass) {
3340   Value *Count = getOrCreateTripCount(L);
3341   BasicBlock *BB = L->getLoopPreheader();
3342   IRBuilder<> Builder(BB->getTerminator());
3343 
3344   // Generate code to check that the loop's trip count that we computed by
3345   // adding one to the backedge-taken count will not overflow.
3346   Value *CheckMinIters = Builder.CreateICmpULT(
3347       Count, ConstantInt::get(Count->getType(), VF * UF), "min.iters.check");
3348 
3349   BasicBlock *NewBB =
3350       BB->splitBasicBlock(BB->getTerminator(), "min.iters.checked");
3351   // Update dominator tree immediately if the generated block is a
3352   // LoopBypassBlock because SCEV expansions to generate loop bypass
3353   // checks may query it before the current function is finished.
3354   DT->addNewBlock(NewBB, BB);
3355   if (L->getParentLoop())
3356     L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI);
3357   ReplaceInstWithInst(BB->getTerminator(),
3358                       BranchInst::Create(Bypass, NewBB, CheckMinIters));
3359   LoopBypassBlocks.push_back(BB);
3360 }
3361 
3362 void InnerLoopVectorizer::emitVectorLoopEnteredCheck(Loop *L,
3363                                                      BasicBlock *Bypass) {
3364   Value *TC = getOrCreateVectorTripCount(L);
3365   BasicBlock *BB = L->getLoopPreheader();
3366   IRBuilder<> Builder(BB->getTerminator());
3367 
3368   // Now, compare the new count to zero. If it is zero skip the vector loop and
3369   // jump to the scalar loop.
3370   Value *Cmp = Builder.CreateICmpEQ(TC, Constant::getNullValue(TC->getType()),
3371                                     "cmp.zero");
3372 
3373   // Generate code to check that the loop's trip count that we computed by
3374   // adding one to the backedge-taken count will not overflow.
3375   BasicBlock *NewBB = BB->splitBasicBlock(BB->getTerminator(), "vector.ph");
3376   // Update dominator tree immediately if the generated block is a
3377   // LoopBypassBlock because SCEV expansions to generate loop bypass
3378   // checks may query it before the current function is finished.
3379   DT->addNewBlock(NewBB, BB);
3380   if (L->getParentLoop())
3381     L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI);
3382   ReplaceInstWithInst(BB->getTerminator(),
3383                       BranchInst::Create(Bypass, NewBB, Cmp));
3384   LoopBypassBlocks.push_back(BB);
3385 }
3386 
3387 void InnerLoopVectorizer::emitSCEVChecks(Loop *L, BasicBlock *Bypass) {
3388   BasicBlock *BB = L->getLoopPreheader();
3389 
3390   // Generate the code to check that the SCEV assumptions that we made.
3391   // We want the new basic block to start at the first instruction in a
3392   // sequence of instructions that form a check.
3393   SCEVExpander Exp(*PSE.getSE(), Bypass->getModule()->getDataLayout(),
3394                    "scev.check");
3395   Value *SCEVCheck =
3396       Exp.expandCodeForPredicate(&PSE.getUnionPredicate(), BB->getTerminator());
3397 
3398   if (auto *C = dyn_cast<ConstantInt>(SCEVCheck))
3399     if (C->isZero())
3400       return;
3401 
3402   // Create a new block containing the stride check.
3403   BB->setName("vector.scevcheck");
3404   auto *NewBB = BB->splitBasicBlock(BB->getTerminator(), "vector.ph");
3405   // Update dominator tree immediately if the generated block is a
3406   // LoopBypassBlock because SCEV expansions to generate loop bypass
3407   // checks may query it before the current function is finished.
3408   DT->addNewBlock(NewBB, BB);
3409   if (L->getParentLoop())
3410     L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI);
3411   ReplaceInstWithInst(BB->getTerminator(),
3412                       BranchInst::Create(Bypass, NewBB, SCEVCheck));
3413   LoopBypassBlocks.push_back(BB);
3414   AddedSafetyChecks = true;
3415 }
3416 
3417 void InnerLoopVectorizer::emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass) {
3418   BasicBlock *BB = L->getLoopPreheader();
3419 
3420   // Generate the code that checks in runtime if arrays overlap. We put the
3421   // checks into a separate block to make the more common case of few elements
3422   // faster.
3423   Instruction *FirstCheckInst;
3424   Instruction *MemRuntimeCheck;
3425   std::tie(FirstCheckInst, MemRuntimeCheck) =
3426       Legal->getLAI()->addRuntimeChecks(BB->getTerminator());
3427   if (!MemRuntimeCheck)
3428     return;
3429 
3430   // Create a new block containing the memory check.
3431   BB->setName("vector.memcheck");
3432   auto *NewBB = BB->splitBasicBlock(BB->getTerminator(), "vector.ph");
3433   // Update dominator tree immediately if the generated block is a
3434   // LoopBypassBlock because SCEV expansions to generate loop bypass
3435   // checks may query it before the current function is finished.
3436   DT->addNewBlock(NewBB, BB);
3437   if (L->getParentLoop())
3438     L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI);
3439   ReplaceInstWithInst(BB->getTerminator(),
3440                       BranchInst::Create(Bypass, NewBB, MemRuntimeCheck));
3441   LoopBypassBlocks.push_back(BB);
3442   AddedSafetyChecks = true;
3443 
3444   // We currently don't use LoopVersioning for the actual loop cloning but we
3445   // still use it to add the noalias metadata.
3446   LVer = llvm::make_unique<LoopVersioning>(*Legal->getLAI(), OrigLoop, LI, DT,
3447                                            PSE.getSE());
3448   LVer->prepareNoAliasMetadata();
3449 }
3450 
3451 void InnerLoopVectorizer::createEmptyLoop() {
3452   /*
3453    In this function we generate a new loop. The new loop will contain
3454    the vectorized instructions while the old loop will continue to run the
3455    scalar remainder.
3456 
3457        [ ] <-- loop iteration number check.
3458     /   |
3459    /    v
3460   |    [ ] <-- vector loop bypass (may consist of multiple blocks).
3461   |  /  |
3462   | /   v
3463   ||   [ ]     <-- vector pre header.
3464   |/    |
3465   |     v
3466   |    [  ] \
3467   |    [  ]_|   <-- vector loop.
3468   |     |
3469   |     v
3470   |   -[ ]   <--- middle-block.
3471   |  /  |
3472   | /   v
3473   -|- >[ ]     <--- new preheader.
3474    |    |
3475    |    v
3476    |   [ ] \
3477    |   [ ]_|   <-- old scalar loop to handle remainder.
3478     \   |
3479      \  v
3480       >[ ]     <-- exit block.
3481    ...
3482    */
3483 
3484   BasicBlock *OldBasicBlock = OrigLoop->getHeader();
3485   BasicBlock *VectorPH = OrigLoop->getLoopPreheader();
3486   BasicBlock *ExitBlock = OrigLoop->getExitBlock();
3487   assert(VectorPH && "Invalid loop structure");
3488   assert(ExitBlock && "Must have an exit block");
3489 
3490   // Some loops have a single integer induction variable, while other loops
3491   // don't. One example is c++ iterators that often have multiple pointer
3492   // induction variables. In the code below we also support a case where we
3493   // don't have a single induction variable.
3494   //
3495   // We try to obtain an induction variable from the original loop as hard
3496   // as possible. However if we don't find one that:
3497   //   - is an integer
3498   //   - counts from zero, stepping by one
3499   //   - is the size of the widest induction variable type
3500   // then we create a new one.
3501   OldInduction = Legal->getPrimaryInduction();
3502   Type *IdxTy = Legal->getWidestInductionType();
3503 
3504   // Split the single block loop into the two loop structure described above.
3505   BasicBlock *VecBody =
3506       VectorPH->splitBasicBlock(VectorPH->getTerminator(), "vector.body");
3507   BasicBlock *MiddleBlock =
3508       VecBody->splitBasicBlock(VecBody->getTerminator(), "middle.block");
3509   BasicBlock *ScalarPH =
3510       MiddleBlock->splitBasicBlock(MiddleBlock->getTerminator(), "scalar.ph");
3511 
3512   // Create and register the new vector loop.
3513   Loop *Lp = new Loop();
3514   Loop *ParentLoop = OrigLoop->getParentLoop();
3515 
3516   // Insert the new loop into the loop nest and register the new basic blocks
3517   // before calling any utilities such as SCEV that require valid LoopInfo.
3518   if (ParentLoop) {
3519     ParentLoop->addChildLoop(Lp);
3520     ParentLoop->addBasicBlockToLoop(ScalarPH, *LI);
3521     ParentLoop->addBasicBlockToLoop(MiddleBlock, *LI);
3522   } else {
3523     LI->addTopLevelLoop(Lp);
3524   }
3525   Lp->addBasicBlockToLoop(VecBody, *LI);
3526 
3527   // Find the loop boundaries.
3528   Value *Count = getOrCreateTripCount(Lp);
3529 
3530   Value *StartIdx = ConstantInt::get(IdxTy, 0);
3531 
3532   // We need to test whether the backedge-taken count is uint##_max. Adding one
3533   // to it will cause overflow and an incorrect loop trip count in the vector
3534   // body. In case of overflow we want to directly jump to the scalar remainder
3535   // loop.
3536   emitMinimumIterationCountCheck(Lp, ScalarPH);
3537   // Now, compare the new count to zero. If it is zero skip the vector loop and
3538   // jump to the scalar loop.
3539   emitVectorLoopEnteredCheck(Lp, ScalarPH);
3540   // Generate the code to check any assumptions that we've made for SCEV
3541   // expressions.
3542   emitSCEVChecks(Lp, ScalarPH);
3543 
3544   // Generate the code that checks in runtime if arrays overlap. We put the
3545   // checks into a separate block to make the more common case of few elements
3546   // faster.
3547   emitMemRuntimeChecks(Lp, ScalarPH);
3548 
3549   // Generate the induction variable.
3550   // The loop step is equal to the vectorization factor (num of SIMD elements)
3551   // times the unroll factor (num of SIMD instructions).
3552   Value *CountRoundDown = getOrCreateVectorTripCount(Lp);
3553   Constant *Step = ConstantInt::get(IdxTy, VF * UF);
3554   Induction =
3555       createInductionVariable(Lp, StartIdx, CountRoundDown, Step,
3556                               getDebugLocFromInstOrOperands(OldInduction));
3557 
3558   // We are going to resume the execution of the scalar loop.
3559   // Go over all of the induction variables that we found and fix the
3560   // PHIs that are left in the scalar version of the loop.
3561   // The starting values of PHI nodes depend on the counter of the last
3562   // iteration in the vectorized loop.
3563   // If we come from a bypass edge then we need to start from the original
3564   // start value.
3565 
3566   // This variable saves the new starting index for the scalar loop. It is used
3567   // to test if there are any tail iterations left once the vector loop has
3568   // completed.
3569   LoopVectorizationLegality::InductionList *List = Legal->getInductionVars();
3570   for (auto &InductionEntry : *List) {
3571     PHINode *OrigPhi = InductionEntry.first;
3572     InductionDescriptor II = InductionEntry.second;
3573 
3574     // Create phi nodes to merge from the  backedge-taken check block.
3575     PHINode *BCResumeVal = PHINode::Create(
3576         OrigPhi->getType(), 3, "bc.resume.val", ScalarPH->getTerminator());
3577     Value *&EndValue = IVEndValues[OrigPhi];
3578     if (OrigPhi == OldInduction) {
3579       // We know what the end value is.
3580       EndValue = CountRoundDown;
3581     } else {
3582       IRBuilder<> B(LoopBypassBlocks.back()->getTerminator());
3583       Type *StepType = II.getStep()->getType();
3584       Instruction::CastOps CastOp =
3585         CastInst::getCastOpcode(CountRoundDown, true, StepType, true);
3586       Value *CRD = B.CreateCast(CastOp, CountRoundDown, StepType, "cast.crd");
3587       const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout();
3588       EndValue = II.transform(B, CRD, PSE.getSE(), DL);
3589       EndValue->setName("ind.end");
3590     }
3591 
3592     // The new PHI merges the original incoming value, in case of a bypass,
3593     // or the value at the end of the vectorized loop.
3594     BCResumeVal->addIncoming(EndValue, MiddleBlock);
3595 
3596     // Fix the scalar body counter (PHI node).
3597     unsigned BlockIdx = OrigPhi->getBasicBlockIndex(ScalarPH);
3598 
3599     // The old induction's phi node in the scalar body needs the truncated
3600     // value.
3601     for (BasicBlock *BB : LoopBypassBlocks)
3602       BCResumeVal->addIncoming(II.getStartValue(), BB);
3603     OrigPhi->setIncomingValue(BlockIdx, BCResumeVal);
3604   }
3605 
3606   // Add a check in the middle block to see if we have completed
3607   // all of the iterations in the first vector loop.
3608   // If (N - N%VF) == N, then we *don't* need to run the remainder.
3609   Value *CmpN =
3610       CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ, Count,
3611                       CountRoundDown, "cmp.n", MiddleBlock->getTerminator());
3612   ReplaceInstWithInst(MiddleBlock->getTerminator(),
3613                       BranchInst::Create(ExitBlock, ScalarPH, CmpN));
3614 
3615   // Get ready to start creating new instructions into the vectorized body.
3616   Builder.SetInsertPoint(&*VecBody->getFirstInsertionPt());
3617 
3618   // Save the state.
3619   LoopVectorPreHeader = Lp->getLoopPreheader();
3620   LoopScalarPreHeader = ScalarPH;
3621   LoopMiddleBlock = MiddleBlock;
3622   LoopExitBlock = ExitBlock;
3623   LoopVectorBody = VecBody;
3624   LoopScalarBody = OldBasicBlock;
3625 
3626   // Keep all loop hints from the original loop on the vector loop (we'll
3627   // replace the vectorizer-specific hints below).
3628   if (MDNode *LID = OrigLoop->getLoopID())
3629     Lp->setLoopID(LID);
3630 
3631   LoopVectorizeHints Hints(Lp, true, *ORE);
3632   Hints.setAlreadyVectorized();
3633 }
3634 
3635 // Fix up external users of the induction variable. At this point, we are
3636 // in LCSSA form, with all external PHIs that use the IV having one input value,
3637 // coming from the remainder loop. We need those PHIs to also have a correct
3638 // value for the IV when arriving directly from the middle block.
3639 void InnerLoopVectorizer::fixupIVUsers(PHINode *OrigPhi,
3640                                        const InductionDescriptor &II,
3641                                        Value *CountRoundDown, Value *EndValue,
3642                                        BasicBlock *MiddleBlock) {
3643   // There are two kinds of external IV usages - those that use the value
3644   // computed in the last iteration (the PHI) and those that use the penultimate
3645   // value (the value that feeds into the phi from the loop latch).
3646   // We allow both, but they, obviously, have different values.
3647 
3648   assert(OrigLoop->getExitBlock() && "Expected a single exit block");
3649 
3650   DenseMap<Value *, Value *> MissingVals;
3651 
3652   // An external user of the last iteration's value should see the value that
3653   // the remainder loop uses to initialize its own IV.
3654   Value *PostInc = OrigPhi->getIncomingValueForBlock(OrigLoop->getLoopLatch());
3655   for (User *U : PostInc->users()) {
3656     Instruction *UI = cast<Instruction>(U);
3657     if (!OrigLoop->contains(UI)) {
3658       assert(isa<PHINode>(UI) && "Expected LCSSA form");
3659       MissingVals[UI] = EndValue;
3660     }
3661   }
3662 
3663   // An external user of the penultimate value need to see EndValue - Step.
3664   // The simplest way to get this is to recompute it from the constituent SCEVs,
3665   // that is Start + (Step * (CRD - 1)).
3666   for (User *U : OrigPhi->users()) {
3667     auto *UI = cast<Instruction>(U);
3668     if (!OrigLoop->contains(UI)) {
3669       const DataLayout &DL =
3670           OrigLoop->getHeader()->getModule()->getDataLayout();
3671       assert(isa<PHINode>(UI) && "Expected LCSSA form");
3672 
3673       IRBuilder<> B(MiddleBlock->getTerminator());
3674       Value *CountMinusOne = B.CreateSub(
3675           CountRoundDown, ConstantInt::get(CountRoundDown->getType(), 1));
3676       Value *CMO = B.CreateSExtOrTrunc(CountMinusOne, II.getStep()->getType(),
3677                                        "cast.cmo");
3678       Value *Escape = II.transform(B, CMO, PSE.getSE(), DL);
3679       Escape->setName("ind.escape");
3680       MissingVals[UI] = Escape;
3681     }
3682   }
3683 
3684   for (auto &I : MissingVals) {
3685     PHINode *PHI = cast<PHINode>(I.first);
3686     // One corner case we have to handle is two IVs "chasing" each-other,
3687     // that is %IV2 = phi [...], [ %IV1, %latch ]
3688     // In this case, if IV1 has an external use, we need to avoid adding both
3689     // "last value of IV1" and "penultimate value of IV2". So, verify that we
3690     // don't already have an incoming value for the middle block.
3691     if (PHI->getBasicBlockIndex(MiddleBlock) == -1)
3692       PHI->addIncoming(I.second, MiddleBlock);
3693   }
3694 }
3695 
3696 namespace {
3697 struct CSEDenseMapInfo {
3698   static bool canHandle(const Instruction *I) {
3699     return isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) ||
3700            isa<ShuffleVectorInst>(I) || isa<GetElementPtrInst>(I);
3701   }
3702   static inline Instruction *getEmptyKey() {
3703     return DenseMapInfo<Instruction *>::getEmptyKey();
3704   }
3705   static inline Instruction *getTombstoneKey() {
3706     return DenseMapInfo<Instruction *>::getTombstoneKey();
3707   }
3708   static unsigned getHashValue(const Instruction *I) {
3709     assert(canHandle(I) && "Unknown instruction!");
3710     return hash_combine(I->getOpcode(), hash_combine_range(I->value_op_begin(),
3711                                                            I->value_op_end()));
3712   }
3713   static bool isEqual(const Instruction *LHS, const Instruction *RHS) {
3714     if (LHS == getEmptyKey() || RHS == getEmptyKey() ||
3715         LHS == getTombstoneKey() || RHS == getTombstoneKey())
3716       return LHS == RHS;
3717     return LHS->isIdenticalTo(RHS);
3718   }
3719 };
3720 }
3721 
3722 ///\brief Perform cse of induction variable instructions.
3723 static void cse(BasicBlock *BB) {
3724   // Perform simple cse.
3725   SmallDenseMap<Instruction *, Instruction *, 4, CSEDenseMapInfo> CSEMap;
3726   for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) {
3727     Instruction *In = &*I++;
3728 
3729     if (!CSEDenseMapInfo::canHandle(In))
3730       continue;
3731 
3732     // Check if we can replace this instruction with any of the
3733     // visited instructions.
3734     if (Instruction *V = CSEMap.lookup(In)) {
3735       In->replaceAllUsesWith(V);
3736       In->eraseFromParent();
3737       continue;
3738     }
3739 
3740     CSEMap[In] = In;
3741   }
3742 }
3743 
3744 /// \brief Estimate the overhead of scalarizing an instruction. This is a
3745 /// convenience wrapper for the type-based getScalarizationOverhead API.
3746 static unsigned getScalarizationOverhead(Instruction *I, unsigned VF,
3747                                          const TargetTransformInfo &TTI) {
3748   if (VF == 1)
3749     return 0;
3750 
3751   unsigned Cost = 0;
3752   Type *RetTy = ToVectorTy(I->getType(), VF);
3753   if (!RetTy->isVoidTy())
3754     Cost += TTI.getScalarizationOverhead(RetTy, true, false);
3755 
3756   if (CallInst *CI = dyn_cast<CallInst>(I)) {
3757     SmallVector<const Value *, 4> Operands(CI->arg_operands());
3758     Cost += TTI.getOperandsScalarizationOverhead(Operands, VF);
3759   } else {
3760     SmallVector<const Value *, 4> Operands(I->operand_values());
3761     Cost += TTI.getOperandsScalarizationOverhead(Operands, VF);
3762   }
3763 
3764   return Cost;
3765 }
3766 
3767 // Estimate cost of a call instruction CI if it were vectorized with factor VF.
3768 // Return the cost of the instruction, including scalarization overhead if it's
3769 // needed. The flag NeedToScalarize shows if the call needs to be scalarized -
3770 // i.e. either vector version isn't available, or is too expensive.
3771 static unsigned getVectorCallCost(CallInst *CI, unsigned VF,
3772                                   const TargetTransformInfo &TTI,
3773                                   const TargetLibraryInfo *TLI,
3774                                   bool &NeedToScalarize) {
3775   Function *F = CI->getCalledFunction();
3776   StringRef FnName = CI->getCalledFunction()->getName();
3777   Type *ScalarRetTy = CI->getType();
3778   SmallVector<Type *, 4> Tys, ScalarTys;
3779   for (auto &ArgOp : CI->arg_operands())
3780     ScalarTys.push_back(ArgOp->getType());
3781 
3782   // Estimate cost of scalarized vector call. The source operands are assumed
3783   // to be vectors, so we need to extract individual elements from there,
3784   // execute VF scalar calls, and then gather the result into the vector return
3785   // value.
3786   unsigned ScalarCallCost = TTI.getCallInstrCost(F, ScalarRetTy, ScalarTys);
3787   if (VF == 1)
3788     return ScalarCallCost;
3789 
3790   // Compute corresponding vector type for return value and arguments.
3791   Type *RetTy = ToVectorTy(ScalarRetTy, VF);
3792   for (Type *ScalarTy : ScalarTys)
3793     Tys.push_back(ToVectorTy(ScalarTy, VF));
3794 
3795   // Compute costs of unpacking argument values for the scalar calls and
3796   // packing the return values to a vector.
3797   unsigned ScalarizationCost = getScalarizationOverhead(CI, VF, TTI);
3798 
3799   unsigned Cost = ScalarCallCost * VF + ScalarizationCost;
3800 
3801   // If we can't emit a vector call for this function, then the currently found
3802   // cost is the cost we need to return.
3803   NeedToScalarize = true;
3804   if (!TLI || !TLI->isFunctionVectorizable(FnName, VF) || CI->isNoBuiltin())
3805     return Cost;
3806 
3807   // If the corresponding vector cost is cheaper, return its cost.
3808   unsigned VectorCallCost = TTI.getCallInstrCost(nullptr, RetTy, Tys);
3809   if (VectorCallCost < Cost) {
3810     NeedToScalarize = false;
3811     return VectorCallCost;
3812   }
3813   return Cost;
3814 }
3815 
3816 // Estimate cost of an intrinsic call instruction CI if it were vectorized with
3817 // factor VF.  Return the cost of the instruction, including scalarization
3818 // overhead if it's needed.
3819 static unsigned getVectorIntrinsicCost(CallInst *CI, unsigned VF,
3820                                        const TargetTransformInfo &TTI,
3821                                        const TargetLibraryInfo *TLI) {
3822   Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
3823   assert(ID && "Expected intrinsic call!");
3824 
3825   FastMathFlags FMF;
3826   if (auto *FPMO = dyn_cast<FPMathOperator>(CI))
3827     FMF = FPMO->getFastMathFlags();
3828 
3829   SmallVector<Value *, 4> Operands(CI->arg_operands());
3830   return TTI.getIntrinsicInstrCost(ID, CI->getType(), Operands, FMF, VF);
3831 }
3832 
3833 static Type *smallestIntegerVectorType(Type *T1, Type *T2) {
3834   auto *I1 = cast<IntegerType>(T1->getVectorElementType());
3835   auto *I2 = cast<IntegerType>(T2->getVectorElementType());
3836   return I1->getBitWidth() < I2->getBitWidth() ? T1 : T2;
3837 }
3838 static Type *largestIntegerVectorType(Type *T1, Type *T2) {
3839   auto *I1 = cast<IntegerType>(T1->getVectorElementType());
3840   auto *I2 = cast<IntegerType>(T2->getVectorElementType());
3841   return I1->getBitWidth() > I2->getBitWidth() ? T1 : T2;
3842 }
3843 
3844 void InnerLoopVectorizer::truncateToMinimalBitwidths() {
3845   // For every instruction `I` in MinBWs, truncate the operands, create a
3846   // truncated version of `I` and reextend its result. InstCombine runs
3847   // later and will remove any ext/trunc pairs.
3848   //
3849   SmallPtrSet<Value *, 4> Erased;
3850   for (const auto &KV : Cost->getMinimalBitwidths()) {
3851     // If the value wasn't vectorized, we must maintain the original scalar
3852     // type. The absence of the value from VectorLoopValueMap indicates that it
3853     // wasn't vectorized.
3854     if (!VectorLoopValueMap.hasVector(KV.first))
3855       continue;
3856     VectorParts &Parts = VectorLoopValueMap.getVector(KV.first);
3857     for (Value *&I : Parts) {
3858       if (Erased.count(I) || I->use_empty() || !isa<Instruction>(I))
3859         continue;
3860       Type *OriginalTy = I->getType();
3861       Type *ScalarTruncatedTy =
3862           IntegerType::get(OriginalTy->getContext(), KV.second);
3863       Type *TruncatedTy = VectorType::get(ScalarTruncatedTy,
3864                                           OriginalTy->getVectorNumElements());
3865       if (TruncatedTy == OriginalTy)
3866         continue;
3867 
3868       IRBuilder<> B(cast<Instruction>(I));
3869       auto ShrinkOperand = [&](Value *V) -> Value * {
3870         if (auto *ZI = dyn_cast<ZExtInst>(V))
3871           if (ZI->getSrcTy() == TruncatedTy)
3872             return ZI->getOperand(0);
3873         return B.CreateZExtOrTrunc(V, TruncatedTy);
3874       };
3875 
3876       // The actual instruction modification depends on the instruction type,
3877       // unfortunately.
3878       Value *NewI = nullptr;
3879       if (auto *BO = dyn_cast<BinaryOperator>(I)) {
3880         NewI = B.CreateBinOp(BO->getOpcode(), ShrinkOperand(BO->getOperand(0)),
3881                              ShrinkOperand(BO->getOperand(1)));
3882         cast<BinaryOperator>(NewI)->copyIRFlags(I);
3883       } else if (auto *CI = dyn_cast<ICmpInst>(I)) {
3884         NewI =
3885             B.CreateICmp(CI->getPredicate(), ShrinkOperand(CI->getOperand(0)),
3886                          ShrinkOperand(CI->getOperand(1)));
3887       } else if (auto *SI = dyn_cast<SelectInst>(I)) {
3888         NewI = B.CreateSelect(SI->getCondition(),
3889                               ShrinkOperand(SI->getTrueValue()),
3890                               ShrinkOperand(SI->getFalseValue()));
3891       } else if (auto *CI = dyn_cast<CastInst>(I)) {
3892         switch (CI->getOpcode()) {
3893         default:
3894           llvm_unreachable("Unhandled cast!");
3895         case Instruction::Trunc:
3896           NewI = ShrinkOperand(CI->getOperand(0));
3897           break;
3898         case Instruction::SExt:
3899           NewI = B.CreateSExtOrTrunc(
3900               CI->getOperand(0),
3901               smallestIntegerVectorType(OriginalTy, TruncatedTy));
3902           break;
3903         case Instruction::ZExt:
3904           NewI = B.CreateZExtOrTrunc(
3905               CI->getOperand(0),
3906               smallestIntegerVectorType(OriginalTy, TruncatedTy));
3907           break;
3908         }
3909       } else if (auto *SI = dyn_cast<ShuffleVectorInst>(I)) {
3910         auto Elements0 = SI->getOperand(0)->getType()->getVectorNumElements();
3911         auto *O0 = B.CreateZExtOrTrunc(
3912             SI->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements0));
3913         auto Elements1 = SI->getOperand(1)->getType()->getVectorNumElements();
3914         auto *O1 = B.CreateZExtOrTrunc(
3915             SI->getOperand(1), VectorType::get(ScalarTruncatedTy, Elements1));
3916 
3917         NewI = B.CreateShuffleVector(O0, O1, SI->getMask());
3918       } else if (isa<LoadInst>(I)) {
3919         // Don't do anything with the operands, just extend the result.
3920         continue;
3921       } else if (auto *IE = dyn_cast<InsertElementInst>(I)) {
3922         auto Elements = IE->getOperand(0)->getType()->getVectorNumElements();
3923         auto *O0 = B.CreateZExtOrTrunc(
3924             IE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements));
3925         auto *O1 = B.CreateZExtOrTrunc(IE->getOperand(1), ScalarTruncatedTy);
3926         NewI = B.CreateInsertElement(O0, O1, IE->getOperand(2));
3927       } else if (auto *EE = dyn_cast<ExtractElementInst>(I)) {
3928         auto Elements = EE->getOperand(0)->getType()->getVectorNumElements();
3929         auto *O0 = B.CreateZExtOrTrunc(
3930             EE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements));
3931         NewI = B.CreateExtractElement(O0, EE->getOperand(2));
3932       } else {
3933         llvm_unreachable("Unhandled instruction type!");
3934       }
3935 
3936       // Lastly, extend the result.
3937       NewI->takeName(cast<Instruction>(I));
3938       Value *Res = B.CreateZExtOrTrunc(NewI, OriginalTy);
3939       I->replaceAllUsesWith(Res);
3940       cast<Instruction>(I)->eraseFromParent();
3941       Erased.insert(I);
3942       I = Res;
3943     }
3944   }
3945 
3946   // We'll have created a bunch of ZExts that are now parentless. Clean up.
3947   for (const auto &KV : Cost->getMinimalBitwidths()) {
3948     // If the value wasn't vectorized, we must maintain the original scalar
3949     // type. The absence of the value from VectorLoopValueMap indicates that it
3950     // wasn't vectorized.
3951     if (!VectorLoopValueMap.hasVector(KV.first))
3952       continue;
3953     VectorParts &Parts = VectorLoopValueMap.getVector(KV.first);
3954     for (Value *&I : Parts) {
3955       ZExtInst *Inst = dyn_cast<ZExtInst>(I);
3956       if (Inst && Inst->use_empty()) {
3957         Value *NewI = Inst->getOperand(0);
3958         Inst->eraseFromParent();
3959         I = NewI;
3960       }
3961     }
3962   }
3963 }
3964 
3965 void InnerLoopVectorizer::vectorizeLoop() {
3966   //===------------------------------------------------===//
3967   //
3968   // Notice: any optimization or new instruction that go
3969   // into the code below should be also be implemented in
3970   // the cost-model.
3971   //
3972   //===------------------------------------------------===//
3973 
3974   // Collect instructions from the original loop that will become trivially
3975   // dead in the vectorized loop. We don't need to vectorize these
3976   // instructions.
3977   collectTriviallyDeadInstructions();
3978 
3979   // Scan the loop in a topological order to ensure that defs are vectorized
3980   // before users.
3981   LoopBlocksDFS DFS(OrigLoop);
3982   DFS.perform(LI);
3983 
3984   // Vectorize all of the blocks in the original loop.
3985   for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO()))
3986     vectorizeBlockInLoop(BB);
3987 
3988   // Insert truncates and extends for any truncated instructions as hints to
3989   // InstCombine.
3990   if (VF > 1)
3991     truncateToMinimalBitwidths();
3992 
3993   // At this point every instruction in the original loop is widened to a
3994   // vector form. Now we need to fix the recurrences in the loop. These PHI
3995   // nodes are currently empty because we did not want to introduce cycles.
3996   // This is the second stage of vectorizing recurrences.
3997   fixCrossIterationPHIs();
3998 
3999   // Update the dominator tree.
4000   //
4001   // FIXME: After creating the structure of the new loop, the dominator tree is
4002   //        no longer up-to-date, and it remains that way until we update it
4003   //        here. An out-of-date dominator tree is problematic for SCEV,
4004   //        because SCEVExpander uses it to guide code generation. The
4005   //        vectorizer use SCEVExpanders in several places. Instead, we should
4006   //        keep the dominator tree up-to-date as we go.
4007   updateAnalysis();
4008 
4009   // Fix-up external users of the induction variables.
4010   for (auto &Entry : *Legal->getInductionVars())
4011     fixupIVUsers(Entry.first, Entry.second,
4012                  getOrCreateVectorTripCount(LI->getLoopFor(LoopVectorBody)),
4013                  IVEndValues[Entry.first], LoopMiddleBlock);
4014 
4015   fixLCSSAPHIs();
4016   predicateInstructions();
4017 
4018   // Remove redundant induction instructions.
4019   cse(LoopVectorBody);
4020 }
4021 
4022 void InnerLoopVectorizer::fixCrossIterationPHIs() {
4023   // In order to support recurrences we need to be able to vectorize Phi nodes.
4024   // Phi nodes have cycles, so we need to vectorize them in two stages. This is
4025   // stage #2: We now need to fix the recurrences by adding incoming edges to
4026   // the currently empty PHI nodes. At this point every instruction in the
4027   // original loop is widened to a vector form so we can use them to construct
4028   // the incoming edges.
4029   for (Instruction &I : *OrigLoop->getHeader()) {
4030     PHINode *Phi = dyn_cast<PHINode>(&I);
4031     if (!Phi)
4032       break;
4033     // Handle first-order recurrences and reductions that need to be fixed.
4034     if (Legal->isFirstOrderRecurrence(Phi))
4035       fixFirstOrderRecurrence(Phi);
4036     else if (Legal->isReductionVariable(Phi))
4037       fixReduction(Phi);
4038   }
4039 }
4040 
4041 void InnerLoopVectorizer::fixFirstOrderRecurrence(PHINode *Phi) {
4042 
4043   // This is the second phase of vectorizing first-order recurrences. An
4044   // overview of the transformation is described below. Suppose we have the
4045   // following loop.
4046   //
4047   //   for (int i = 0; i < n; ++i)
4048   //     b[i] = a[i] - a[i - 1];
4049   //
4050   // There is a first-order recurrence on "a". For this loop, the shorthand
4051   // scalar IR looks like:
4052   //
4053   //   scalar.ph:
4054   //     s_init = a[-1]
4055   //     br scalar.body
4056   //
4057   //   scalar.body:
4058   //     i = phi [0, scalar.ph], [i+1, scalar.body]
4059   //     s1 = phi [s_init, scalar.ph], [s2, scalar.body]
4060   //     s2 = a[i]
4061   //     b[i] = s2 - s1
4062   //     br cond, scalar.body, ...
4063   //
4064   // In this example, s1 is a recurrence because it's value depends on the
4065   // previous iteration. In the first phase of vectorization, we created a
4066   // temporary value for s1. We now complete the vectorization and produce the
4067   // shorthand vector IR shown below (for VF = 4, UF = 1).
4068   //
4069   //   vector.ph:
4070   //     v_init = vector(..., ..., ..., a[-1])
4071   //     br vector.body
4072   //
4073   //   vector.body
4074   //     i = phi [0, vector.ph], [i+4, vector.body]
4075   //     v1 = phi [v_init, vector.ph], [v2, vector.body]
4076   //     v2 = a[i, i+1, i+2, i+3];
4077   //     v3 = vector(v1(3), v2(0, 1, 2))
4078   //     b[i, i+1, i+2, i+3] = v2 - v3
4079   //     br cond, vector.body, middle.block
4080   //
4081   //   middle.block:
4082   //     x = v2(3)
4083   //     br scalar.ph
4084   //
4085   //   scalar.ph:
4086   //     s_init = phi [x, middle.block], [a[-1], otherwise]
4087   //     br scalar.body
4088   //
4089   // After execution completes the vector loop, we extract the next value of
4090   // the recurrence (x) to use as the initial value in the scalar loop.
4091 
4092   // Get the original loop preheader and single loop latch.
4093   auto *Preheader = OrigLoop->getLoopPreheader();
4094   auto *Latch = OrigLoop->getLoopLatch();
4095 
4096   // Get the initial and previous values of the scalar recurrence.
4097   auto *ScalarInit = Phi->getIncomingValueForBlock(Preheader);
4098   auto *Previous = Phi->getIncomingValueForBlock(Latch);
4099 
4100   // Create a vector from the initial value.
4101   auto *VectorInit = ScalarInit;
4102   if (VF > 1) {
4103     Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
4104     VectorInit = Builder.CreateInsertElement(
4105         UndefValue::get(VectorType::get(VectorInit->getType(), VF)), VectorInit,
4106         Builder.getInt32(VF - 1), "vector.recur.init");
4107   }
4108 
4109   // We constructed a temporary phi node in the first phase of vectorization.
4110   // This phi node will eventually be deleted.
4111   VectorParts &PhiParts = VectorLoopValueMap.getVector(Phi);
4112   Builder.SetInsertPoint(cast<Instruction>(PhiParts[0]));
4113 
4114   // Create a phi node for the new recurrence. The current value will either be
4115   // the initial value inserted into a vector or loop-varying vector value.
4116   auto *VecPhi = Builder.CreatePHI(VectorInit->getType(), 2, "vector.recur");
4117   VecPhi->addIncoming(VectorInit, LoopVectorPreHeader);
4118 
4119   // Get the vectorized previous value.
4120   auto &PreviousParts = getVectorValue(Previous);
4121 
4122   // Set the insertion point after the previous value if it is an instruction.
4123   // Note that the previous value may have been constant-folded so it is not
4124   // guaranteed to be an instruction in the vector loop.
4125   if (LI->getLoopFor(LoopVectorBody)->isLoopInvariant(PreviousParts[UF - 1]))
4126     Builder.SetInsertPoint(&*LoopVectorBody->getFirstInsertionPt());
4127   else
4128     Builder.SetInsertPoint(
4129         &*++BasicBlock::iterator(cast<Instruction>(PreviousParts[UF - 1])));
4130 
4131   // We will construct a vector for the recurrence by combining the values for
4132   // the current and previous iterations. This is the required shuffle mask.
4133   SmallVector<Constant *, 8> ShuffleMask(VF);
4134   ShuffleMask[0] = Builder.getInt32(VF - 1);
4135   for (unsigned I = 1; I < VF; ++I)
4136     ShuffleMask[I] = Builder.getInt32(I + VF - 1);
4137 
4138   // The vector from which to take the initial value for the current iteration
4139   // (actual or unrolled). Initially, this is the vector phi node.
4140   Value *Incoming = VecPhi;
4141 
4142   // Shuffle the current and previous vector and update the vector parts.
4143   for (unsigned Part = 0; Part < UF; ++Part) {
4144     auto *Shuffle =
4145         VF > 1
4146             ? Builder.CreateShuffleVector(Incoming, PreviousParts[Part],
4147                                           ConstantVector::get(ShuffleMask))
4148             : Incoming;
4149     PhiParts[Part]->replaceAllUsesWith(Shuffle);
4150     cast<Instruction>(PhiParts[Part])->eraseFromParent();
4151     PhiParts[Part] = Shuffle;
4152     Incoming = PreviousParts[Part];
4153   }
4154 
4155   // Fix the latch value of the new recurrence in the vector loop.
4156   VecPhi->addIncoming(Incoming, LI->getLoopFor(LoopVectorBody)->getLoopLatch());
4157 
4158   // Extract the last vector element in the middle block. This will be the
4159   // initial value for the recurrence when jumping to the scalar loop.
4160   auto *Extract = Incoming;
4161   if (VF > 1) {
4162     Builder.SetInsertPoint(LoopMiddleBlock->getTerminator());
4163     Extract = Builder.CreateExtractElement(Extract, Builder.getInt32(VF - 1),
4164                                            "vector.recur.extract");
4165   }
4166 
4167   // Fix the initial value of the original recurrence in the scalar loop.
4168   Builder.SetInsertPoint(&*LoopScalarPreHeader->begin());
4169   auto *Start = Builder.CreatePHI(Phi->getType(), 2, "scalar.recur.init");
4170   for (auto *BB : predecessors(LoopScalarPreHeader)) {
4171     auto *Incoming = BB == LoopMiddleBlock ? Extract : ScalarInit;
4172     Start->addIncoming(Incoming, BB);
4173   }
4174 
4175   Phi->setIncomingValue(Phi->getBasicBlockIndex(LoopScalarPreHeader), Start);
4176   Phi->setName("scalar.recur");
4177 
4178   // Finally, fix users of the recurrence outside the loop. The users will need
4179   // either the last value of the scalar recurrence or the last value of the
4180   // vector recurrence we extracted in the middle block. Since the loop is in
4181   // LCSSA form, we just need to find the phi node for the original scalar
4182   // recurrence in the exit block, and then add an edge for the middle block.
4183   for (auto &I : *LoopExitBlock) {
4184     auto *LCSSAPhi = dyn_cast<PHINode>(&I);
4185     if (!LCSSAPhi)
4186       break;
4187     if (LCSSAPhi->getIncomingValue(0) == Phi) {
4188       LCSSAPhi->addIncoming(Extract, LoopMiddleBlock);
4189       break;
4190     }
4191   }
4192 }
4193 
4194 void InnerLoopVectorizer::fixReduction(PHINode *Phi) {
4195   Constant *Zero = Builder.getInt32(0);
4196 
4197   // Get it's reduction variable descriptor.
4198   assert(Legal->isReductionVariable(Phi) &&
4199          "Unable to find the reduction variable");
4200   RecurrenceDescriptor RdxDesc = (*Legal->getReductionVars())[Phi];
4201 
4202   RecurrenceDescriptor::RecurrenceKind RK = RdxDesc.getRecurrenceKind();
4203   TrackingVH<Value> ReductionStartValue = RdxDesc.getRecurrenceStartValue();
4204   Instruction *LoopExitInst = RdxDesc.getLoopExitInstr();
4205   RecurrenceDescriptor::MinMaxRecurrenceKind MinMaxKind =
4206     RdxDesc.getMinMaxRecurrenceKind();
4207   setDebugLocFromInst(Builder, ReductionStartValue);
4208 
4209   // We need to generate a reduction vector from the incoming scalar.
4210   // To do so, we need to generate the 'identity' vector and override
4211   // one of the elements with the incoming scalar reduction. We need
4212   // to do it in the vector-loop preheader.
4213   Builder.SetInsertPoint(LoopBypassBlocks[1]->getTerminator());
4214 
4215   // This is the vector-clone of the value that leaves the loop.
4216   const VectorParts &VectorExit = getVectorValue(LoopExitInst);
4217   Type *VecTy = VectorExit[0]->getType();
4218 
4219   // Find the reduction identity variable. Zero for addition, or, xor,
4220   // one for multiplication, -1 for And.
4221   Value *Identity;
4222   Value *VectorStart;
4223   if (RK == RecurrenceDescriptor::RK_IntegerMinMax ||
4224       RK == RecurrenceDescriptor::RK_FloatMinMax) {
4225     // MinMax reduction have the start value as their identify.
4226     if (VF == 1) {
4227       VectorStart = Identity = ReductionStartValue;
4228     } else {
4229       VectorStart = Identity =
4230         Builder.CreateVectorSplat(VF, ReductionStartValue, "minmax.ident");
4231     }
4232   } else {
4233     // Handle other reduction kinds:
4234     Constant *Iden = RecurrenceDescriptor::getRecurrenceIdentity(
4235         RK, VecTy->getScalarType());
4236     if (VF == 1) {
4237       Identity = Iden;
4238       // This vector is the Identity vector where the first element is the
4239       // incoming scalar reduction.
4240       VectorStart = ReductionStartValue;
4241     } else {
4242       Identity = ConstantVector::getSplat(VF, Iden);
4243 
4244       // This vector is the Identity vector where the first element is the
4245       // incoming scalar reduction.
4246       VectorStart =
4247         Builder.CreateInsertElement(Identity, ReductionStartValue, Zero);
4248     }
4249   }
4250 
4251   // Fix the vector-loop phi.
4252 
4253   // Reductions do not have to start at zero. They can start with
4254   // any loop invariant values.
4255   const VectorParts &VecRdxPhi = getVectorValue(Phi);
4256   BasicBlock *Latch = OrigLoop->getLoopLatch();
4257   Value *LoopVal = Phi->getIncomingValueForBlock(Latch);
4258   const VectorParts &Val = getVectorValue(LoopVal);
4259   for (unsigned part = 0; part < UF; ++part) {
4260     // Make sure to add the reduction stat value only to the
4261     // first unroll part.
4262     Value *StartVal = (part == 0) ? VectorStart : Identity;
4263     cast<PHINode>(VecRdxPhi[part])
4264       ->addIncoming(StartVal, LoopVectorPreHeader);
4265     cast<PHINode>(VecRdxPhi[part])
4266       ->addIncoming(Val[part], LoopVectorBody);
4267   }
4268 
4269   // Before each round, move the insertion point right between
4270   // the PHIs and the values we are going to write.
4271   // This allows us to write both PHINodes and the extractelement
4272   // instructions.
4273   Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt());
4274 
4275   VectorParts &RdxParts = VectorLoopValueMap.getVector(LoopExitInst);
4276   setDebugLocFromInst(Builder, LoopExitInst);
4277 
4278   // If the vector reduction can be performed in a smaller type, we truncate
4279   // then extend the loop exit value to enable InstCombine to evaluate the
4280   // entire expression in the smaller type.
4281   if (VF > 1 && Phi->getType() != RdxDesc.getRecurrenceType()) {
4282     Type *RdxVecTy = VectorType::get(RdxDesc.getRecurrenceType(), VF);
4283     Builder.SetInsertPoint(LoopVectorBody->getTerminator());
4284     for (unsigned part = 0; part < UF; ++part) {
4285       Value *Trunc = Builder.CreateTrunc(RdxParts[part], RdxVecTy);
4286       Value *Extnd = RdxDesc.isSigned() ? Builder.CreateSExt(Trunc, VecTy)
4287         : Builder.CreateZExt(Trunc, VecTy);
4288       for (Value::user_iterator UI = RdxParts[part]->user_begin();
4289            UI != RdxParts[part]->user_end();)
4290         if (*UI != Trunc) {
4291           (*UI++)->replaceUsesOfWith(RdxParts[part], Extnd);
4292           RdxParts[part] = Extnd;
4293         } else {
4294           ++UI;
4295         }
4296     }
4297     Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt());
4298     for (unsigned part = 0; part < UF; ++part)
4299       RdxParts[part] = Builder.CreateTrunc(RdxParts[part], RdxVecTy);
4300   }
4301 
4302   // Reduce all of the unrolled parts into a single vector.
4303   Value *ReducedPartRdx = RdxParts[0];
4304   unsigned Op = RecurrenceDescriptor::getRecurrenceBinOp(RK);
4305   setDebugLocFromInst(Builder, ReducedPartRdx);
4306   for (unsigned part = 1; part < UF; ++part) {
4307     if (Op != Instruction::ICmp && Op != Instruction::FCmp)
4308       // Floating point operations had to be 'fast' to enable the reduction.
4309       ReducedPartRdx = addFastMathFlag(
4310           Builder.CreateBinOp((Instruction::BinaryOps)Op, RdxParts[part],
4311                               ReducedPartRdx, "bin.rdx"));
4312     else
4313       ReducedPartRdx = RecurrenceDescriptor::createMinMaxOp(
4314           Builder, MinMaxKind, ReducedPartRdx, RdxParts[part]);
4315   }
4316 
4317   if (VF > 1) {
4318     // VF is a power of 2 so we can emit the reduction using log2(VF) shuffles
4319     // and vector ops, reducing the set of values being computed by half each
4320     // round.
4321     assert(isPowerOf2_32(VF) &&
4322            "Reduction emission only supported for pow2 vectors!");
4323     Value *TmpVec = ReducedPartRdx;
4324     SmallVector<Constant *, 32> ShuffleMask(VF, nullptr);
4325     for (unsigned i = VF; i != 1; i >>= 1) {
4326       // Move the upper half of the vector to the lower half.
4327       for (unsigned j = 0; j != i / 2; ++j)
4328         ShuffleMask[j] = Builder.getInt32(i / 2 + j);
4329 
4330       // Fill the rest of the mask with undef.
4331       std::fill(&ShuffleMask[i / 2], ShuffleMask.end(),
4332                 UndefValue::get(Builder.getInt32Ty()));
4333 
4334       Value *Shuf = Builder.CreateShuffleVector(
4335           TmpVec, UndefValue::get(TmpVec->getType()),
4336           ConstantVector::get(ShuffleMask), "rdx.shuf");
4337 
4338       if (Op != Instruction::ICmp && Op != Instruction::FCmp)
4339         // Floating point operations had to be 'fast' to enable the reduction.
4340         TmpVec = addFastMathFlag(Builder.CreateBinOp(
4341                                      (Instruction::BinaryOps)Op, TmpVec, Shuf, "bin.rdx"));
4342       else
4343         TmpVec = RecurrenceDescriptor::createMinMaxOp(Builder, MinMaxKind,
4344                                                       TmpVec, Shuf);
4345     }
4346 
4347     // The result is in the first element of the vector.
4348     ReducedPartRdx =
4349       Builder.CreateExtractElement(TmpVec, Builder.getInt32(0));
4350 
4351     // If the reduction can be performed in a smaller type, we need to extend
4352     // the reduction to the wider type before we branch to the original loop.
4353     if (Phi->getType() != RdxDesc.getRecurrenceType())
4354       ReducedPartRdx =
4355         RdxDesc.isSigned()
4356         ? Builder.CreateSExt(ReducedPartRdx, Phi->getType())
4357         : Builder.CreateZExt(ReducedPartRdx, Phi->getType());
4358   }
4359 
4360   // Create a phi node that merges control-flow from the backedge-taken check
4361   // block and the middle block.
4362   PHINode *BCBlockPhi = PHINode::Create(Phi->getType(), 2, "bc.merge.rdx",
4363                                         LoopScalarPreHeader->getTerminator());
4364   for (unsigned I = 0, E = LoopBypassBlocks.size(); I != E; ++I)
4365     BCBlockPhi->addIncoming(ReductionStartValue, LoopBypassBlocks[I]);
4366   BCBlockPhi->addIncoming(ReducedPartRdx, LoopMiddleBlock);
4367 
4368   // Now, we need to fix the users of the reduction variable
4369   // inside and outside of the scalar remainder loop.
4370   // We know that the loop is in LCSSA form. We need to update the
4371   // PHI nodes in the exit blocks.
4372   for (BasicBlock::iterator LEI = LoopExitBlock->begin(),
4373          LEE = LoopExitBlock->end();
4374        LEI != LEE; ++LEI) {
4375     PHINode *LCSSAPhi = dyn_cast<PHINode>(LEI);
4376     if (!LCSSAPhi)
4377       break;
4378 
4379     // All PHINodes need to have a single entry edge, or two if
4380     // we already fixed them.
4381     assert(LCSSAPhi->getNumIncomingValues() < 3 && "Invalid LCSSA PHI");
4382 
4383     // We found a reduction value exit-PHI. Update it with the
4384     // incoming bypass edge.
4385     if (LCSSAPhi->getIncomingValue(0) == LoopExitInst)
4386       LCSSAPhi->addIncoming(ReducedPartRdx, LoopMiddleBlock);
4387   } // end of the LCSSA phi scan.
4388 
4389     // Fix the scalar loop reduction variable with the incoming reduction sum
4390     // from the vector body and from the backedge value.
4391   int IncomingEdgeBlockIdx =
4392     Phi->getBasicBlockIndex(OrigLoop->getLoopLatch());
4393   assert(IncomingEdgeBlockIdx >= 0 && "Invalid block index");
4394   // Pick the other block.
4395   int SelfEdgeBlockIdx = (IncomingEdgeBlockIdx ? 0 : 1);
4396   Phi->setIncomingValue(SelfEdgeBlockIdx, BCBlockPhi);
4397   Phi->setIncomingValue(IncomingEdgeBlockIdx, LoopExitInst);
4398 }
4399 
4400 void InnerLoopVectorizer::fixLCSSAPHIs() {
4401   for (Instruction &LEI : *LoopExitBlock) {
4402     auto *LCSSAPhi = dyn_cast<PHINode>(&LEI);
4403     if (!LCSSAPhi)
4404       break;
4405     if (LCSSAPhi->getNumIncomingValues() == 1)
4406       LCSSAPhi->addIncoming(UndefValue::get(LCSSAPhi->getType()),
4407                             LoopMiddleBlock);
4408   }
4409 }
4410 
4411 void InnerLoopVectorizer::collectTriviallyDeadInstructions() {
4412   BasicBlock *Latch = OrigLoop->getLoopLatch();
4413 
4414   // We create new control-flow for the vectorized loop, so the original
4415   // condition will be dead after vectorization if it's only used by the
4416   // branch.
4417   auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0));
4418   if (Cmp && Cmp->hasOneUse())
4419     DeadInstructions.insert(Cmp);
4420 
4421   // We create new "steps" for induction variable updates to which the original
4422   // induction variables map. An original update instruction will be dead if
4423   // all its users except the induction variable are dead.
4424   for (auto &Induction : *Legal->getInductionVars()) {
4425     PHINode *Ind = Induction.first;
4426     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
4427     if (all_of(IndUpdate->users(), [&](User *U) -> bool {
4428           return U == Ind || DeadInstructions.count(cast<Instruction>(U));
4429         }))
4430       DeadInstructions.insert(IndUpdate);
4431   }
4432 }
4433 
4434 void InnerLoopVectorizer::sinkScalarOperands(Instruction *PredInst) {
4435 
4436   // The basic block and loop containing the predicated instruction.
4437   auto *PredBB = PredInst->getParent();
4438   auto *VectorLoop = LI->getLoopFor(PredBB);
4439 
4440   // Initialize a worklist with the operands of the predicated instruction.
4441   SetVector<Value *> Worklist(PredInst->op_begin(), PredInst->op_end());
4442 
4443   // Holds instructions that we need to analyze again. An instruction may be
4444   // reanalyzed if we don't yet know if we can sink it or not.
4445   SmallVector<Instruction *, 8> InstsToReanalyze;
4446 
4447   // Returns true if a given use occurs in the predicated block. Phi nodes use
4448   // their operands in their corresponding predecessor blocks.
4449   auto isBlockOfUsePredicated = [&](Use &U) -> bool {
4450     auto *I = cast<Instruction>(U.getUser());
4451     BasicBlock *BB = I->getParent();
4452     if (auto *Phi = dyn_cast<PHINode>(I))
4453       BB = Phi->getIncomingBlock(
4454           PHINode::getIncomingValueNumForOperand(U.getOperandNo()));
4455     return BB == PredBB;
4456   };
4457 
4458   // Iteratively sink the scalarized operands of the predicated instruction
4459   // into the block we created for it. When an instruction is sunk, it's
4460   // operands are then added to the worklist. The algorithm ends after one pass
4461   // through the worklist doesn't sink a single instruction.
4462   bool Changed;
4463   do {
4464 
4465     // Add the instructions that need to be reanalyzed to the worklist, and
4466     // reset the changed indicator.
4467     Worklist.insert(InstsToReanalyze.begin(), InstsToReanalyze.end());
4468     InstsToReanalyze.clear();
4469     Changed = false;
4470 
4471     while (!Worklist.empty()) {
4472       auto *I = dyn_cast<Instruction>(Worklist.pop_back_val());
4473 
4474       // We can't sink an instruction if it is a phi node, is already in the
4475       // predicated block, is not in the loop, or may have side effects.
4476       if (!I || isa<PHINode>(I) || I->getParent() == PredBB ||
4477           !VectorLoop->contains(I) || I->mayHaveSideEffects())
4478         continue;
4479 
4480       // It's legal to sink the instruction if all its uses occur in the
4481       // predicated block. Otherwise, there's nothing to do yet, and we may
4482       // need to reanalyze the instruction.
4483       if (!all_of(I->uses(), isBlockOfUsePredicated)) {
4484         InstsToReanalyze.push_back(I);
4485         continue;
4486       }
4487 
4488       // Move the instruction to the beginning of the predicated block, and add
4489       // it's operands to the worklist.
4490       I->moveBefore(&*PredBB->getFirstInsertionPt());
4491       Worklist.insert(I->op_begin(), I->op_end());
4492 
4493       // The sinking may have enabled other instructions to be sunk, so we will
4494       // need to iterate.
4495       Changed = true;
4496     }
4497   } while (Changed);
4498 }
4499 
4500 void InnerLoopVectorizer::predicateInstructions() {
4501 
4502   // For each instruction I marked for predication on value C, split I into its
4503   // own basic block to form an if-then construct over C. Since I may be fed by
4504   // an extractelement instruction or other scalar operand, we try to
4505   // iteratively sink its scalar operands into the predicated block. If I feeds
4506   // an insertelement instruction, we try to move this instruction into the
4507   // predicated block as well. For non-void types, a phi node will be created
4508   // for the resulting value (either vector or scalar).
4509   //
4510   // So for some predicated instruction, e.g. the conditional sdiv in:
4511   //
4512   // for.body:
4513   //  ...
4514   //  %add = add nsw i32 %mul, %0
4515   //  %cmp5 = icmp sgt i32 %2, 7
4516   //  br i1 %cmp5, label %if.then, label %if.end
4517   //
4518   // if.then:
4519   //  %div = sdiv i32 %0, %1
4520   //  br label %if.end
4521   //
4522   // if.end:
4523   //  %x.0 = phi i32 [ %div, %if.then ], [ %add, %for.body ]
4524   //
4525   // the sdiv at this point is scalarized and if-converted using a select.
4526   // The inactive elements in the vector are not used, but the predicated
4527   // instruction is still executed for all vector elements, essentially:
4528   //
4529   // vector.body:
4530   //  ...
4531   //  %17 = add nsw <2 x i32> %16, %wide.load
4532   //  %29 = extractelement <2 x i32> %wide.load, i32 0
4533   //  %30 = extractelement <2 x i32> %wide.load51, i32 0
4534   //  %31 = sdiv i32 %29, %30
4535   //  %32 = insertelement <2 x i32> undef, i32 %31, i32 0
4536   //  %35 = extractelement <2 x i32> %wide.load, i32 1
4537   //  %36 = extractelement <2 x i32> %wide.load51, i32 1
4538   //  %37 = sdiv i32 %35, %36
4539   //  %38 = insertelement <2 x i32> %32, i32 %37, i32 1
4540   //  %predphi = select <2 x i1> %26, <2 x i32> %38, <2 x i32> %17
4541   //
4542   // Predication will now re-introduce the original control flow to avoid false
4543   // side-effects by the sdiv instructions on the inactive elements, yielding
4544   // (after cleanup):
4545   //
4546   // vector.body:
4547   //  ...
4548   //  %5 = add nsw <2 x i32> %4, %wide.load
4549   //  %8 = icmp sgt <2 x i32> %wide.load52, <i32 7, i32 7>
4550   //  %9 = extractelement <2 x i1> %8, i32 0
4551   //  br i1 %9, label %pred.sdiv.if, label %pred.sdiv.continue
4552   //
4553   // pred.sdiv.if:
4554   //  %10 = extractelement <2 x i32> %wide.load, i32 0
4555   //  %11 = extractelement <2 x i32> %wide.load51, i32 0
4556   //  %12 = sdiv i32 %10, %11
4557   //  %13 = insertelement <2 x i32> undef, i32 %12, i32 0
4558   //  br label %pred.sdiv.continue
4559   //
4560   // pred.sdiv.continue:
4561   //  %14 = phi <2 x i32> [ undef, %vector.body ], [ %13, %pred.sdiv.if ]
4562   //  %15 = extractelement <2 x i1> %8, i32 1
4563   //  br i1 %15, label %pred.sdiv.if54, label %pred.sdiv.continue55
4564   //
4565   // pred.sdiv.if54:
4566   //  %16 = extractelement <2 x i32> %wide.load, i32 1
4567   //  %17 = extractelement <2 x i32> %wide.load51, i32 1
4568   //  %18 = sdiv i32 %16, %17
4569   //  %19 = insertelement <2 x i32> %14, i32 %18, i32 1
4570   //  br label %pred.sdiv.continue55
4571   //
4572   // pred.sdiv.continue55:
4573   //  %20 = phi <2 x i32> [ %14, %pred.sdiv.continue ], [ %19, %pred.sdiv.if54 ]
4574   //  %predphi = select <2 x i1> %8, <2 x i32> %20, <2 x i32> %5
4575 
4576   for (auto KV : PredicatedInstructions) {
4577     BasicBlock::iterator I(KV.first);
4578     BasicBlock *Head = I->getParent();
4579     auto *BB = SplitBlock(Head, &*std::next(I), DT, LI);
4580     auto *T = SplitBlockAndInsertIfThen(KV.second, &*I, /*Unreachable=*/false,
4581                                         /*BranchWeights=*/nullptr, DT, LI);
4582     I->moveBefore(T);
4583     sinkScalarOperands(&*I);
4584 
4585     I->getParent()->setName(Twine("pred.") + I->getOpcodeName() + ".if");
4586     BB->setName(Twine("pred.") + I->getOpcodeName() + ".continue");
4587 
4588     // If the instruction is non-void create a Phi node at reconvergence point.
4589     if (!I->getType()->isVoidTy()) {
4590       Value *IncomingTrue = nullptr;
4591       Value *IncomingFalse = nullptr;
4592 
4593       if (I->hasOneUse() && isa<InsertElementInst>(*I->user_begin())) {
4594         // If the predicated instruction is feeding an insert-element, move it
4595         // into the Then block; Phi node will be created for the vector.
4596         InsertElementInst *IEI = cast<InsertElementInst>(*I->user_begin());
4597         IEI->moveBefore(T);
4598         IncomingTrue = IEI; // the new vector with the inserted element.
4599         IncomingFalse = IEI->getOperand(0); // the unmodified vector
4600       } else {
4601         // Phi node will be created for the scalar predicated instruction.
4602         IncomingTrue = &*I;
4603         IncomingFalse = UndefValue::get(I->getType());
4604       }
4605 
4606       BasicBlock *PostDom = I->getParent()->getSingleSuccessor();
4607       assert(PostDom && "Then block has multiple successors");
4608       PHINode *Phi =
4609           PHINode::Create(IncomingTrue->getType(), 2, "", &PostDom->front());
4610       IncomingTrue->replaceAllUsesWith(Phi);
4611       Phi->addIncoming(IncomingFalse, Head);
4612       Phi->addIncoming(IncomingTrue, I->getParent());
4613     }
4614   }
4615 
4616   DEBUG(DT->verifyDomTree());
4617 }
4618 
4619 InnerLoopVectorizer::VectorParts
4620 InnerLoopVectorizer::createEdgeMask(BasicBlock *Src, BasicBlock *Dst) {
4621   assert(is_contained(predecessors(Dst), Src) && "Invalid edge");
4622 
4623   // Look for cached value.
4624   std::pair<BasicBlock *, BasicBlock *> Edge(Src, Dst);
4625   EdgeMaskCache::iterator ECEntryIt = MaskCache.find(Edge);
4626   if (ECEntryIt != MaskCache.end())
4627     return ECEntryIt->second;
4628 
4629   VectorParts SrcMask = createBlockInMask(Src);
4630 
4631   // The terminator has to be a branch inst!
4632   BranchInst *BI = dyn_cast<BranchInst>(Src->getTerminator());
4633   assert(BI && "Unexpected terminator found");
4634 
4635   if (BI->isConditional()) {
4636     VectorParts EdgeMask = getVectorValue(BI->getCondition());
4637 
4638     if (BI->getSuccessor(0) != Dst)
4639       for (unsigned part = 0; part < UF; ++part)
4640         EdgeMask[part] = Builder.CreateNot(EdgeMask[part]);
4641 
4642     for (unsigned part = 0; part < UF; ++part)
4643       EdgeMask[part] = Builder.CreateAnd(EdgeMask[part], SrcMask[part]);
4644 
4645     MaskCache[Edge] = EdgeMask;
4646     return EdgeMask;
4647   }
4648 
4649   MaskCache[Edge] = SrcMask;
4650   return SrcMask;
4651 }
4652 
4653 InnerLoopVectorizer::VectorParts
4654 InnerLoopVectorizer::createBlockInMask(BasicBlock *BB) {
4655   assert(OrigLoop->contains(BB) && "Block is not a part of a loop");
4656 
4657   // Loop incoming mask is all-one.
4658   if (OrigLoop->getHeader() == BB) {
4659     Value *C = ConstantInt::get(IntegerType::getInt1Ty(BB->getContext()), 1);
4660     return getVectorValue(C);
4661   }
4662 
4663   // This is the block mask. We OR all incoming edges, and with zero.
4664   Value *Zero = ConstantInt::get(IntegerType::getInt1Ty(BB->getContext()), 0);
4665   VectorParts BlockMask = getVectorValue(Zero);
4666 
4667   // For each pred:
4668   for (pred_iterator it = pred_begin(BB), e = pred_end(BB); it != e; ++it) {
4669     VectorParts EM = createEdgeMask(*it, BB);
4670     for (unsigned part = 0; part < UF; ++part)
4671       BlockMask[part] = Builder.CreateOr(BlockMask[part], EM[part]);
4672   }
4673 
4674   return BlockMask;
4675 }
4676 
4677 void InnerLoopVectorizer::widenPHIInstruction(Instruction *PN, unsigned UF,
4678                                               unsigned VF) {
4679   PHINode *P = cast<PHINode>(PN);
4680   // In order to support recurrences we need to be able to vectorize Phi nodes.
4681   // Phi nodes have cycles, so we need to vectorize them in two stages. This is
4682   // stage #1: We create a new vector PHI node with no incoming edges. We'll use
4683   // this value when we vectorize all of the instructions that use the PHI.
4684   if (Legal->isReductionVariable(P) || Legal->isFirstOrderRecurrence(P)) {
4685     VectorParts Entry(UF);
4686     for (unsigned part = 0; part < UF; ++part) {
4687       // This is phase one of vectorizing PHIs.
4688       Type *VecTy =
4689           (VF == 1) ? PN->getType() : VectorType::get(PN->getType(), VF);
4690       Entry[part] = PHINode::Create(
4691           VecTy, 2, "vec.phi", &*LoopVectorBody->getFirstInsertionPt());
4692     }
4693     VectorLoopValueMap.initVector(P, Entry);
4694     return;
4695   }
4696 
4697   setDebugLocFromInst(Builder, P);
4698   // Check for PHI nodes that are lowered to vector selects.
4699   if (P->getParent() != OrigLoop->getHeader()) {
4700     // We know that all PHIs in non-header blocks are converted into
4701     // selects, so we don't have to worry about the insertion order and we
4702     // can just use the builder.
4703     // At this point we generate the predication tree. There may be
4704     // duplications since this is a simple recursive scan, but future
4705     // optimizations will clean it up.
4706 
4707     unsigned NumIncoming = P->getNumIncomingValues();
4708 
4709     // Generate a sequence of selects of the form:
4710     // SELECT(Mask3, In3,
4711     //      SELECT(Mask2, In2,
4712     //                   ( ...)))
4713     VectorParts Entry(UF);
4714     for (unsigned In = 0; In < NumIncoming; In++) {
4715       VectorParts Cond =
4716           createEdgeMask(P->getIncomingBlock(In), P->getParent());
4717       const VectorParts &In0 = getVectorValue(P->getIncomingValue(In));
4718 
4719       for (unsigned part = 0; part < UF; ++part) {
4720         // We might have single edge PHIs (blocks) - use an identity
4721         // 'select' for the first PHI operand.
4722         if (In == 0)
4723           Entry[part] = Builder.CreateSelect(Cond[part], In0[part], In0[part]);
4724         else
4725           // Select between the current value and the previous incoming edge
4726           // based on the incoming mask.
4727           Entry[part] = Builder.CreateSelect(Cond[part], In0[part], Entry[part],
4728                                              "predphi");
4729       }
4730     }
4731     VectorLoopValueMap.initVector(P, Entry);
4732     return;
4733   }
4734 
4735   // This PHINode must be an induction variable.
4736   // Make sure that we know about it.
4737   assert(Legal->getInductionVars()->count(P) && "Not an induction variable");
4738 
4739   InductionDescriptor II = Legal->getInductionVars()->lookup(P);
4740   const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout();
4741 
4742   // FIXME: The newly created binary instructions should contain nsw/nuw flags,
4743   // which can be found from the original scalar operations.
4744   switch (II.getKind()) {
4745   case InductionDescriptor::IK_NoInduction:
4746     llvm_unreachable("Unknown induction");
4747   case InductionDescriptor::IK_IntInduction:
4748   case InductionDescriptor::IK_FpInduction:
4749     return widenIntOrFpInduction(P);
4750   case InductionDescriptor::IK_PtrInduction: {
4751     // Handle the pointer induction variable case.
4752     assert(P->getType()->isPointerTy() && "Unexpected type.");
4753     // This is the normalized GEP that starts counting at zero.
4754     Value *PtrInd = Induction;
4755     PtrInd = Builder.CreateSExtOrTrunc(PtrInd, II.getStep()->getType());
4756     // Determine the number of scalars we need to generate for each unroll
4757     // iteration. If the instruction is uniform, we only need to generate the
4758     // first lane. Otherwise, we generate all VF values.
4759     unsigned Lanes = Cost->isUniformAfterVectorization(P, VF) ? 1 : VF;
4760     // These are the scalar results. Notice that we don't generate vector GEPs
4761     // because scalar GEPs result in better code.
4762     ScalarParts Entry(UF);
4763     for (unsigned Part = 0; Part < UF; ++Part) {
4764       Entry[Part].resize(VF);
4765       for (unsigned Lane = 0; Lane < Lanes; ++Lane) {
4766         Constant *Idx = ConstantInt::get(PtrInd->getType(), Lane + Part * VF);
4767         Value *GlobalIdx = Builder.CreateAdd(PtrInd, Idx);
4768         Value *SclrGep = II.transform(Builder, GlobalIdx, PSE.getSE(), DL);
4769         SclrGep->setName("next.gep");
4770         Entry[Part][Lane] = SclrGep;
4771       }
4772     }
4773     VectorLoopValueMap.initScalar(P, Entry);
4774     return;
4775   }
4776   }
4777 }
4778 
4779 /// A helper function for checking whether an integer division-related
4780 /// instruction may divide by zero (in which case it must be predicated if
4781 /// executed conditionally in the scalar code).
4782 /// TODO: It may be worthwhile to generalize and check isKnownNonZero().
4783 /// Non-zero divisors that are non compile-time constants will not be
4784 /// converted into multiplication, so we will still end up scalarizing
4785 /// the division, but can do so w/o predication.
4786 static bool mayDivideByZero(Instruction &I) {
4787   assert((I.getOpcode() == Instruction::UDiv ||
4788           I.getOpcode() == Instruction::SDiv ||
4789           I.getOpcode() == Instruction::URem ||
4790           I.getOpcode() == Instruction::SRem) &&
4791          "Unexpected instruction");
4792   Value *Divisor = I.getOperand(1);
4793   auto *CInt = dyn_cast<ConstantInt>(Divisor);
4794   return !CInt || CInt->isZero();
4795 }
4796 
4797 void InnerLoopVectorizer::vectorizeBlockInLoop(BasicBlock *BB) {
4798   // For each instruction in the old loop.
4799   for (Instruction &I : *BB) {
4800 
4801     // If the instruction will become trivially dead when vectorized, we don't
4802     // need to generate it.
4803     if (DeadInstructions.count(&I))
4804       continue;
4805 
4806     // Scalarize instructions that should remain scalar after vectorization.
4807     if (VF > 1 &&
4808         !(isa<BranchInst>(&I) || isa<PHINode>(&I) ||
4809           isa<DbgInfoIntrinsic>(&I)) &&
4810         shouldScalarizeInstruction(&I)) {
4811       scalarizeInstruction(&I, Legal->isScalarWithPredication(&I));
4812       continue;
4813     }
4814 
4815     switch (I.getOpcode()) {
4816     case Instruction::Br:
4817       // Nothing to do for PHIs and BR, since we already took care of the
4818       // loop control flow instructions.
4819       continue;
4820     case Instruction::PHI: {
4821       // Vectorize PHINodes.
4822       widenPHIInstruction(&I, UF, VF);
4823       continue;
4824     } // End of PHI.
4825 
4826     case Instruction::UDiv:
4827     case Instruction::SDiv:
4828     case Instruction::SRem:
4829     case Instruction::URem:
4830       // Scalarize with predication if this instruction may divide by zero and
4831       // block execution is conditional, otherwise fallthrough.
4832       if (Legal->isScalarWithPredication(&I)) {
4833         scalarizeInstruction(&I, true);
4834         continue;
4835       }
4836     case Instruction::Add:
4837     case Instruction::FAdd:
4838     case Instruction::Sub:
4839     case Instruction::FSub:
4840     case Instruction::Mul:
4841     case Instruction::FMul:
4842     case Instruction::FDiv:
4843     case Instruction::FRem:
4844     case Instruction::Shl:
4845     case Instruction::LShr:
4846     case Instruction::AShr:
4847     case Instruction::And:
4848     case Instruction::Or:
4849     case Instruction::Xor: {
4850       // Just widen binops.
4851       auto *BinOp = cast<BinaryOperator>(&I);
4852       setDebugLocFromInst(Builder, BinOp);
4853       const VectorParts &A = getVectorValue(BinOp->getOperand(0));
4854       const VectorParts &B = getVectorValue(BinOp->getOperand(1));
4855 
4856       // Use this vector value for all users of the original instruction.
4857       VectorParts Entry(UF);
4858       for (unsigned Part = 0; Part < UF; ++Part) {
4859         Value *V = Builder.CreateBinOp(BinOp->getOpcode(), A[Part], B[Part]);
4860 
4861         if (BinaryOperator *VecOp = dyn_cast<BinaryOperator>(V))
4862           VecOp->copyIRFlags(BinOp);
4863 
4864         Entry[Part] = V;
4865       }
4866 
4867       VectorLoopValueMap.initVector(&I, Entry);
4868       addMetadata(Entry, BinOp);
4869       break;
4870     }
4871     case Instruction::Select: {
4872       // Widen selects.
4873       // If the selector is loop invariant we can create a select
4874       // instruction with a scalar condition. Otherwise, use vector-select.
4875       auto *SE = PSE.getSE();
4876       bool InvariantCond =
4877           SE->isLoopInvariant(PSE.getSCEV(I.getOperand(0)), OrigLoop);
4878       setDebugLocFromInst(Builder, &I);
4879 
4880       // The condition can be loop invariant  but still defined inside the
4881       // loop. This means that we can't just use the original 'cond' value.
4882       // We have to take the 'vectorized' value and pick the first lane.
4883       // Instcombine will make this a no-op.
4884       const VectorParts &Cond = getVectorValue(I.getOperand(0));
4885       const VectorParts &Op0 = getVectorValue(I.getOperand(1));
4886       const VectorParts &Op1 = getVectorValue(I.getOperand(2));
4887 
4888       auto *ScalarCond = getScalarValue(I.getOperand(0), 0, 0);
4889 
4890       VectorParts Entry(UF);
4891       for (unsigned Part = 0; Part < UF; ++Part) {
4892         Entry[Part] = Builder.CreateSelect(
4893             InvariantCond ? ScalarCond : Cond[Part], Op0[Part], Op1[Part]);
4894       }
4895 
4896       VectorLoopValueMap.initVector(&I, Entry);
4897       addMetadata(Entry, &I);
4898       break;
4899     }
4900 
4901     case Instruction::ICmp:
4902     case Instruction::FCmp: {
4903       // Widen compares. Generate vector compares.
4904       bool FCmp = (I.getOpcode() == Instruction::FCmp);
4905       auto *Cmp = dyn_cast<CmpInst>(&I);
4906       setDebugLocFromInst(Builder, Cmp);
4907       const VectorParts &A = getVectorValue(Cmp->getOperand(0));
4908       const VectorParts &B = getVectorValue(Cmp->getOperand(1));
4909       VectorParts Entry(UF);
4910       for (unsigned Part = 0; Part < UF; ++Part) {
4911         Value *C = nullptr;
4912         if (FCmp) {
4913           C = Builder.CreateFCmp(Cmp->getPredicate(), A[Part], B[Part]);
4914           cast<FCmpInst>(C)->copyFastMathFlags(Cmp);
4915         } else {
4916           C = Builder.CreateICmp(Cmp->getPredicate(), A[Part], B[Part]);
4917         }
4918         Entry[Part] = C;
4919       }
4920 
4921       VectorLoopValueMap.initVector(&I, Entry);
4922       addMetadata(Entry, &I);
4923       break;
4924     }
4925 
4926     case Instruction::Store:
4927     case Instruction::Load:
4928       vectorizeMemoryInstruction(&I);
4929       break;
4930     case Instruction::ZExt:
4931     case Instruction::SExt:
4932     case Instruction::FPToUI:
4933     case Instruction::FPToSI:
4934     case Instruction::FPExt:
4935     case Instruction::PtrToInt:
4936     case Instruction::IntToPtr:
4937     case Instruction::SIToFP:
4938     case Instruction::UIToFP:
4939     case Instruction::Trunc:
4940     case Instruction::FPTrunc:
4941     case Instruction::BitCast: {
4942       auto *CI = dyn_cast<CastInst>(&I);
4943       setDebugLocFromInst(Builder, CI);
4944 
4945       // Optimize the special case where the source is a constant integer
4946       // induction variable. Notice that we can only optimize the 'trunc' case
4947       // because (a) FP conversions lose precision, (b) sext/zext may wrap, and
4948       // (c) other casts depend on pointer size.
4949       if (Cost->isOptimizableIVTruncate(CI, VF)) {
4950         widenIntOrFpInduction(cast<PHINode>(CI->getOperand(0)),
4951                               cast<TruncInst>(CI));
4952         break;
4953       }
4954 
4955       /// Vectorize casts.
4956       Type *DestTy =
4957           (VF == 1) ? CI->getType() : VectorType::get(CI->getType(), VF);
4958 
4959       const VectorParts &A = getVectorValue(CI->getOperand(0));
4960       VectorParts Entry(UF);
4961       for (unsigned Part = 0; Part < UF; ++Part)
4962         Entry[Part] = Builder.CreateCast(CI->getOpcode(), A[Part], DestTy);
4963       VectorLoopValueMap.initVector(&I, Entry);
4964       addMetadata(Entry, &I);
4965       break;
4966     }
4967 
4968     case Instruction::Call: {
4969       // Ignore dbg intrinsics.
4970       if (isa<DbgInfoIntrinsic>(I))
4971         break;
4972       setDebugLocFromInst(Builder, &I);
4973 
4974       Module *M = BB->getParent()->getParent();
4975       auto *CI = cast<CallInst>(&I);
4976 
4977       StringRef FnName = CI->getCalledFunction()->getName();
4978       Function *F = CI->getCalledFunction();
4979       Type *RetTy = ToVectorTy(CI->getType(), VF);
4980       SmallVector<Type *, 4> Tys;
4981       for (Value *ArgOperand : CI->arg_operands())
4982         Tys.push_back(ToVectorTy(ArgOperand->getType(), VF));
4983 
4984       Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
4985       if (ID && (ID == Intrinsic::assume || ID == Intrinsic::lifetime_end ||
4986                  ID == Intrinsic::lifetime_start)) {
4987         scalarizeInstruction(&I);
4988         break;
4989       }
4990       // The flag shows whether we use Intrinsic or a usual Call for vectorized
4991       // version of the instruction.
4992       // Is it beneficial to perform intrinsic call compared to lib call?
4993       bool NeedToScalarize;
4994       unsigned CallCost = getVectorCallCost(CI, VF, *TTI, TLI, NeedToScalarize);
4995       bool UseVectorIntrinsic =
4996           ID && getVectorIntrinsicCost(CI, VF, *TTI, TLI) <= CallCost;
4997       if (!UseVectorIntrinsic && NeedToScalarize) {
4998         scalarizeInstruction(&I);
4999         break;
5000       }
5001 
5002       VectorParts Entry(UF);
5003       for (unsigned Part = 0; Part < UF; ++Part) {
5004         SmallVector<Value *, 4> Args;
5005         for (unsigned i = 0, ie = CI->getNumArgOperands(); i != ie; ++i) {
5006           Value *Arg = CI->getArgOperand(i);
5007           // Some intrinsics have a scalar argument - don't replace it with a
5008           // vector.
5009           if (!UseVectorIntrinsic || !hasVectorInstrinsicScalarOpd(ID, i)) {
5010             const VectorParts &VectorArg = getVectorValue(CI->getArgOperand(i));
5011             Arg = VectorArg[Part];
5012           }
5013           Args.push_back(Arg);
5014         }
5015 
5016         Function *VectorF;
5017         if (UseVectorIntrinsic) {
5018           // Use vector version of the intrinsic.
5019           Type *TysForDecl[] = {CI->getType()};
5020           if (VF > 1)
5021             TysForDecl[0] = VectorType::get(CI->getType()->getScalarType(), VF);
5022           VectorF = Intrinsic::getDeclaration(M, ID, TysForDecl);
5023         } else {
5024           // Use vector version of the library call.
5025           StringRef VFnName = TLI->getVectorizedFunction(FnName, VF);
5026           assert(!VFnName.empty() && "Vector function name is empty.");
5027           VectorF = M->getFunction(VFnName);
5028           if (!VectorF) {
5029             // Generate a declaration
5030             FunctionType *FTy = FunctionType::get(RetTy, Tys, false);
5031             VectorF =
5032                 Function::Create(FTy, Function::ExternalLinkage, VFnName, M);
5033             VectorF->copyAttributesFrom(F);
5034           }
5035         }
5036         assert(VectorF && "Can't create vector function.");
5037 
5038         SmallVector<OperandBundleDef, 1> OpBundles;
5039         CI->getOperandBundlesAsDefs(OpBundles);
5040         CallInst *V = Builder.CreateCall(VectorF, Args, OpBundles);
5041 
5042         if (isa<FPMathOperator>(V))
5043           V->copyFastMathFlags(CI);
5044 
5045         Entry[Part] = V;
5046       }
5047 
5048       VectorLoopValueMap.initVector(&I, Entry);
5049       addMetadata(Entry, &I);
5050       break;
5051     }
5052 
5053     default:
5054       // All other instructions are unsupported. Scalarize them.
5055       scalarizeInstruction(&I);
5056       break;
5057     } // end of switch.
5058   }   // end of for_each instr.
5059 }
5060 
5061 void InnerLoopVectorizer::updateAnalysis() {
5062   // Forget the original basic block.
5063   PSE.getSE()->forgetLoop(OrigLoop);
5064 
5065   // Update the dominator tree information.
5066   assert(DT->properlyDominates(LoopBypassBlocks.front(), LoopExitBlock) &&
5067          "Entry does not dominate exit.");
5068 
5069   // We don't predicate stores by this point, so the vector body should be a
5070   // single loop.
5071   DT->addNewBlock(LoopVectorBody, LoopVectorPreHeader);
5072 
5073   DT->addNewBlock(LoopMiddleBlock, LoopVectorBody);
5074   DT->addNewBlock(LoopScalarPreHeader, LoopBypassBlocks[0]);
5075   DT->changeImmediateDominator(LoopScalarBody, LoopScalarPreHeader);
5076   DT->changeImmediateDominator(LoopExitBlock, LoopBypassBlocks[0]);
5077 
5078   DEBUG(DT->verifyDomTree());
5079 }
5080 
5081 /// \brief Check whether it is safe to if-convert this phi node.
5082 ///
5083 /// Phi nodes with constant expressions that can trap are not safe to if
5084 /// convert.
5085 static bool canIfConvertPHINodes(BasicBlock *BB) {
5086   for (Instruction &I : *BB) {
5087     auto *Phi = dyn_cast<PHINode>(&I);
5088     if (!Phi)
5089       return true;
5090     for (Value *V : Phi->incoming_values())
5091       if (auto *C = dyn_cast<Constant>(V))
5092         if (C->canTrap())
5093           return false;
5094   }
5095   return true;
5096 }
5097 
5098 bool LoopVectorizationLegality::canVectorizeWithIfConvert() {
5099   if (!EnableIfConversion) {
5100     ORE->emit(createMissedAnalysis("IfConversionDisabled")
5101               << "if-conversion is disabled");
5102     return false;
5103   }
5104 
5105   assert(TheLoop->getNumBlocks() > 1 && "Single block loops are vectorizable");
5106 
5107   // A list of pointers that we can safely read and write to.
5108   SmallPtrSet<Value *, 8> SafePointes;
5109 
5110   // Collect safe addresses.
5111   for (BasicBlock *BB : TheLoop->blocks()) {
5112     if (blockNeedsPredication(BB))
5113       continue;
5114 
5115     for (Instruction &I : *BB)
5116       if (auto *Ptr = getPointerOperand(&I))
5117         SafePointes.insert(Ptr);
5118   }
5119 
5120   // Collect the blocks that need predication.
5121   BasicBlock *Header = TheLoop->getHeader();
5122   for (BasicBlock *BB : TheLoop->blocks()) {
5123     // We don't support switch statements inside loops.
5124     if (!isa<BranchInst>(BB->getTerminator())) {
5125       ORE->emit(createMissedAnalysis("LoopContainsSwitch", BB->getTerminator())
5126                 << "loop contains a switch statement");
5127       return false;
5128     }
5129 
5130     // We must be able to predicate all blocks that need to be predicated.
5131     if (blockNeedsPredication(BB)) {
5132       if (!blockCanBePredicated(BB, SafePointes)) {
5133         ORE->emit(createMissedAnalysis("NoCFGForSelect", BB->getTerminator())
5134                   << "control flow cannot be substituted for a select");
5135         return false;
5136       }
5137     } else if (BB != Header && !canIfConvertPHINodes(BB)) {
5138       ORE->emit(createMissedAnalysis("NoCFGForSelect", BB->getTerminator())
5139                 << "control flow cannot be substituted for a select");
5140       return false;
5141     }
5142   }
5143 
5144   // We can if-convert this loop.
5145   return true;
5146 }
5147 
5148 bool LoopVectorizationLegality::canVectorize() {
5149   // We must have a loop in canonical form. Loops with indirectbr in them cannot
5150   // be canonicalized.
5151   if (!TheLoop->getLoopPreheader()) {
5152     ORE->emit(createMissedAnalysis("CFGNotUnderstood")
5153               << "loop control flow is not understood by vectorizer");
5154     return false;
5155   }
5156 
5157   // FIXME: The code is currently dead, since the loop gets sent to
5158   // LoopVectorizationLegality is already an innermost loop.
5159   //
5160   // We can only vectorize innermost loops.
5161   if (!TheLoop->empty()) {
5162     ORE->emit(createMissedAnalysis("NotInnermostLoop")
5163               << "loop is not the innermost loop");
5164     return false;
5165   }
5166 
5167   // We must have a single backedge.
5168   if (TheLoop->getNumBackEdges() != 1) {
5169     ORE->emit(createMissedAnalysis("CFGNotUnderstood")
5170               << "loop control flow is not understood by vectorizer");
5171     return false;
5172   }
5173 
5174   // We must have a single exiting block.
5175   if (!TheLoop->getExitingBlock()) {
5176     ORE->emit(createMissedAnalysis("CFGNotUnderstood")
5177               << "loop control flow is not understood by vectorizer");
5178     return false;
5179   }
5180 
5181   // We only handle bottom-tested loops, i.e. loop in which the condition is
5182   // checked at the end of each iteration. With that we can assume that all
5183   // instructions in the loop are executed the same number of times.
5184   if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) {
5185     ORE->emit(createMissedAnalysis("CFGNotUnderstood")
5186               << "loop control flow is not understood by vectorizer");
5187     return false;
5188   }
5189 
5190   // We need to have a loop header.
5191   DEBUG(dbgs() << "LV: Found a loop: " << TheLoop->getHeader()->getName()
5192                << '\n');
5193 
5194   // Check if we can if-convert non-single-bb loops.
5195   unsigned NumBlocks = TheLoop->getNumBlocks();
5196   if (NumBlocks != 1 && !canVectorizeWithIfConvert()) {
5197     DEBUG(dbgs() << "LV: Can't if-convert the loop.\n");
5198     return false;
5199   }
5200 
5201   // ScalarEvolution needs to be able to find the exit count.
5202   const SCEV *ExitCount = PSE.getBackedgeTakenCount();
5203   if (ExitCount == PSE.getSE()->getCouldNotCompute()) {
5204     ORE->emit(createMissedAnalysis("CantComputeNumberOfIterations")
5205               << "could not determine number of loop iterations");
5206     DEBUG(dbgs() << "LV: SCEV could not compute the loop exit count.\n");
5207     return false;
5208   }
5209 
5210   // Check if we can vectorize the instructions and CFG in this loop.
5211   if (!canVectorizeInstrs()) {
5212     DEBUG(dbgs() << "LV: Can't vectorize the instructions or CFG\n");
5213     return false;
5214   }
5215 
5216   // Go over each instruction and look at memory deps.
5217   if (!canVectorizeMemory()) {
5218     DEBUG(dbgs() << "LV: Can't vectorize due to memory conflicts\n");
5219     return false;
5220   }
5221 
5222   DEBUG(dbgs() << "LV: We can vectorize this loop"
5223                << (LAI->getRuntimePointerChecking()->Need
5224                        ? " (with a runtime bound check)"
5225                        : "")
5226                << "!\n");
5227 
5228   bool UseInterleaved = TTI->enableInterleavedAccessVectorization();
5229 
5230   // If an override option has been passed in for interleaved accesses, use it.
5231   if (EnableInterleavedMemAccesses.getNumOccurrences() > 0)
5232     UseInterleaved = EnableInterleavedMemAccesses;
5233 
5234   // Analyze interleaved memory accesses.
5235   if (UseInterleaved)
5236     InterleaveInfo.analyzeInterleaving(*getSymbolicStrides());
5237 
5238   unsigned SCEVThreshold = VectorizeSCEVCheckThreshold;
5239   if (Hints->getForce() == LoopVectorizeHints::FK_Enabled)
5240     SCEVThreshold = PragmaVectorizeSCEVCheckThreshold;
5241 
5242   if (PSE.getUnionPredicate().getComplexity() > SCEVThreshold) {
5243     ORE->emit(createMissedAnalysis("TooManySCEVRunTimeChecks")
5244               << "Too many SCEV assumptions need to be made and checked "
5245               << "at runtime");
5246     DEBUG(dbgs() << "LV: Too many SCEV checks needed.\n");
5247     return false;
5248   }
5249 
5250   // Okay! We can vectorize. At this point we don't have any other mem analysis
5251   // which may limit our maximum vectorization factor, so just return true with
5252   // no restrictions.
5253   return true;
5254 }
5255 
5256 static Type *convertPointerToIntegerType(const DataLayout &DL, Type *Ty) {
5257   if (Ty->isPointerTy())
5258     return DL.getIntPtrType(Ty);
5259 
5260   // It is possible that char's or short's overflow when we ask for the loop's
5261   // trip count, work around this by changing the type size.
5262   if (Ty->getScalarSizeInBits() < 32)
5263     return Type::getInt32Ty(Ty->getContext());
5264 
5265   return Ty;
5266 }
5267 
5268 static Type *getWiderType(const DataLayout &DL, Type *Ty0, Type *Ty1) {
5269   Ty0 = convertPointerToIntegerType(DL, Ty0);
5270   Ty1 = convertPointerToIntegerType(DL, Ty1);
5271   if (Ty0->getScalarSizeInBits() > Ty1->getScalarSizeInBits())
5272     return Ty0;
5273   return Ty1;
5274 }
5275 
5276 /// \brief Check that the instruction has outside loop users and is not an
5277 /// identified reduction variable.
5278 static bool hasOutsideLoopUser(const Loop *TheLoop, Instruction *Inst,
5279                                SmallPtrSetImpl<Value *> &AllowedExit) {
5280   // Reduction and Induction instructions are allowed to have exit users. All
5281   // other instructions must not have external users.
5282   if (!AllowedExit.count(Inst))
5283     // Check that all of the users of the loop are inside the BB.
5284     for (User *U : Inst->users()) {
5285       Instruction *UI = cast<Instruction>(U);
5286       // This user may be a reduction exit value.
5287       if (!TheLoop->contains(UI)) {
5288         DEBUG(dbgs() << "LV: Found an outside user for : " << *UI << '\n');
5289         return true;
5290       }
5291     }
5292   return false;
5293 }
5294 
5295 void LoopVectorizationLegality::addInductionPhi(
5296     PHINode *Phi, const InductionDescriptor &ID,
5297     SmallPtrSetImpl<Value *> &AllowedExit) {
5298   Inductions[Phi] = ID;
5299   Type *PhiTy = Phi->getType();
5300   const DataLayout &DL = Phi->getModule()->getDataLayout();
5301 
5302   // Get the widest type.
5303   if (!PhiTy->isFloatingPointTy()) {
5304     if (!WidestIndTy)
5305       WidestIndTy = convertPointerToIntegerType(DL, PhiTy);
5306     else
5307       WidestIndTy = getWiderType(DL, PhiTy, WidestIndTy);
5308   }
5309 
5310   // Int inductions are special because we only allow one IV.
5311   if (ID.getKind() == InductionDescriptor::IK_IntInduction &&
5312       ID.getConstIntStepValue() &&
5313       ID.getConstIntStepValue()->isOne() &&
5314       isa<Constant>(ID.getStartValue()) &&
5315       cast<Constant>(ID.getStartValue())->isNullValue()) {
5316 
5317     // Use the phi node with the widest type as induction. Use the last
5318     // one if there are multiple (no good reason for doing this other
5319     // than it is expedient). We've checked that it begins at zero and
5320     // steps by one, so this is a canonical induction variable.
5321     if (!PrimaryInduction || PhiTy == WidestIndTy)
5322       PrimaryInduction = Phi;
5323   }
5324 
5325   // Both the PHI node itself, and the "post-increment" value feeding
5326   // back into the PHI node may have external users.
5327   AllowedExit.insert(Phi);
5328   AllowedExit.insert(Phi->getIncomingValueForBlock(TheLoop->getLoopLatch()));
5329 
5330   DEBUG(dbgs() << "LV: Found an induction variable.\n");
5331   return;
5332 }
5333 
5334 bool LoopVectorizationLegality::canVectorizeInstrs() {
5335   BasicBlock *Header = TheLoop->getHeader();
5336 
5337   // Look for the attribute signaling the absence of NaNs.
5338   Function &F = *Header->getParent();
5339   HasFunNoNaNAttr =
5340       F.getFnAttribute("no-nans-fp-math").getValueAsString() == "true";
5341 
5342   // For each block in the loop.
5343   for (BasicBlock *BB : TheLoop->blocks()) {
5344     // Scan the instructions in the block and look for hazards.
5345     for (Instruction &I : *BB) {
5346       if (auto *Phi = dyn_cast<PHINode>(&I)) {
5347         Type *PhiTy = Phi->getType();
5348         // Check that this PHI type is allowed.
5349         if (!PhiTy->isIntegerTy() && !PhiTy->isFloatingPointTy() &&
5350             !PhiTy->isPointerTy()) {
5351           ORE->emit(createMissedAnalysis("CFGNotUnderstood", Phi)
5352                     << "loop control flow is not understood by vectorizer");
5353           DEBUG(dbgs() << "LV: Found an non-int non-pointer PHI.\n");
5354           return false;
5355         }
5356 
5357         // If this PHINode is not in the header block, then we know that we
5358         // can convert it to select during if-conversion. No need to check if
5359         // the PHIs in this block are induction or reduction variables.
5360         if (BB != Header) {
5361           // Check that this instruction has no outside users or is an
5362           // identified reduction value with an outside user.
5363           if (!hasOutsideLoopUser(TheLoop, Phi, AllowedExit))
5364             continue;
5365           ORE->emit(createMissedAnalysis("NeitherInductionNorReduction", Phi)
5366                     << "value could not be identified as "
5367                        "an induction or reduction variable");
5368           return false;
5369         }
5370 
5371         // We only allow if-converted PHIs with exactly two incoming values.
5372         if (Phi->getNumIncomingValues() != 2) {
5373           ORE->emit(createMissedAnalysis("CFGNotUnderstood", Phi)
5374                     << "control flow not understood by vectorizer");
5375           DEBUG(dbgs() << "LV: Found an invalid PHI.\n");
5376           return false;
5377         }
5378 
5379         RecurrenceDescriptor RedDes;
5380         if (RecurrenceDescriptor::isReductionPHI(Phi, TheLoop, RedDes)) {
5381           if (RedDes.hasUnsafeAlgebra())
5382             Requirements->addUnsafeAlgebraInst(RedDes.getUnsafeAlgebraInst());
5383           AllowedExit.insert(RedDes.getLoopExitInstr());
5384           Reductions[Phi] = RedDes;
5385           continue;
5386         }
5387 
5388         InductionDescriptor ID;
5389         if (InductionDescriptor::isInductionPHI(Phi, TheLoop, PSE, ID)) {
5390           addInductionPhi(Phi, ID, AllowedExit);
5391           if (ID.hasUnsafeAlgebra() && !HasFunNoNaNAttr)
5392             Requirements->addUnsafeAlgebraInst(ID.getUnsafeAlgebraInst());
5393           continue;
5394         }
5395 
5396         if (RecurrenceDescriptor::isFirstOrderRecurrence(Phi, TheLoop, DT)) {
5397           FirstOrderRecurrences.insert(Phi);
5398           continue;
5399         }
5400 
5401         // As a last resort, coerce the PHI to a AddRec expression
5402         // and re-try classifying it a an induction PHI.
5403         if (InductionDescriptor::isInductionPHI(Phi, TheLoop, PSE, ID, true)) {
5404           addInductionPhi(Phi, ID, AllowedExit);
5405           continue;
5406         }
5407 
5408         ORE->emit(createMissedAnalysis("NonReductionValueUsedOutsideLoop", Phi)
5409                   << "value that could not be identified as "
5410                      "reduction is used outside the loop");
5411         DEBUG(dbgs() << "LV: Found an unidentified PHI." << *Phi << "\n");
5412         return false;
5413       } // end of PHI handling
5414 
5415       // We handle calls that:
5416       //   * Are debug info intrinsics.
5417       //   * Have a mapping to an IR intrinsic.
5418       //   * Have a vector version available.
5419       auto *CI = dyn_cast<CallInst>(&I);
5420       if (CI && !getVectorIntrinsicIDForCall(CI, TLI) &&
5421           !isa<DbgInfoIntrinsic>(CI) &&
5422           !(CI->getCalledFunction() && TLI &&
5423             TLI->isFunctionVectorizable(CI->getCalledFunction()->getName()))) {
5424         ORE->emit(createMissedAnalysis("CantVectorizeCall", CI)
5425                   << "call instruction cannot be vectorized");
5426         DEBUG(dbgs() << "LV: Found a non-intrinsic, non-libfunc callsite.\n");
5427         return false;
5428       }
5429 
5430       // Intrinsics such as powi,cttz and ctlz are legal to vectorize if the
5431       // second argument is the same (i.e. loop invariant)
5432       if (CI && hasVectorInstrinsicScalarOpd(
5433                     getVectorIntrinsicIDForCall(CI, TLI), 1)) {
5434         auto *SE = PSE.getSE();
5435         if (!SE->isLoopInvariant(PSE.getSCEV(CI->getOperand(1)), TheLoop)) {
5436           ORE->emit(createMissedAnalysis("CantVectorizeIntrinsic", CI)
5437                     << "intrinsic instruction cannot be vectorized");
5438           DEBUG(dbgs() << "LV: Found unvectorizable intrinsic " << *CI << "\n");
5439           return false;
5440         }
5441       }
5442 
5443       // Check that the instruction return type is vectorizable.
5444       // Also, we can't vectorize extractelement instructions.
5445       if ((!VectorType::isValidElementType(I.getType()) &&
5446            !I.getType()->isVoidTy()) ||
5447           isa<ExtractElementInst>(I)) {
5448         ORE->emit(createMissedAnalysis("CantVectorizeInstructionReturnType", &I)
5449                   << "instruction return type cannot be vectorized");
5450         DEBUG(dbgs() << "LV: Found unvectorizable type.\n");
5451         return false;
5452       }
5453 
5454       // Check that the stored type is vectorizable.
5455       if (auto *ST = dyn_cast<StoreInst>(&I)) {
5456         Type *T = ST->getValueOperand()->getType();
5457         if (!VectorType::isValidElementType(T)) {
5458           ORE->emit(createMissedAnalysis("CantVectorizeStore", ST)
5459                     << "store instruction cannot be vectorized");
5460           return false;
5461         }
5462 
5463         // FP instructions can allow unsafe algebra, thus vectorizable by
5464         // non-IEEE-754 compliant SIMD units.
5465         // This applies to floating-point math operations and calls, not memory
5466         // operations, shuffles, or casts, as they don't change precision or
5467         // semantics.
5468       } else if (I.getType()->isFloatingPointTy() && (CI || I.isBinaryOp()) &&
5469                  !I.hasUnsafeAlgebra()) {
5470         DEBUG(dbgs() << "LV: Found FP op with unsafe algebra.\n");
5471         Hints->setPotentiallyUnsafe();
5472       }
5473 
5474       // Reduction instructions are allowed to have exit users.
5475       // All other instructions must not have external users.
5476       if (hasOutsideLoopUser(TheLoop, &I, AllowedExit)) {
5477         ORE->emit(createMissedAnalysis("ValueUsedOutsideLoop", &I)
5478                   << "value cannot be used outside the loop");
5479         return false;
5480       }
5481 
5482     } // next instr.
5483   }
5484 
5485   if (!PrimaryInduction) {
5486     DEBUG(dbgs() << "LV: Did not find one integer induction var.\n");
5487     if (Inductions.empty()) {
5488       ORE->emit(createMissedAnalysis("NoInductionVariable")
5489                 << "loop induction variable could not be identified");
5490       return false;
5491     }
5492   }
5493 
5494   // Now we know the widest induction type, check if our found induction
5495   // is the same size. If it's not, unset it here and InnerLoopVectorizer
5496   // will create another.
5497   if (PrimaryInduction && WidestIndTy != PrimaryInduction->getType())
5498     PrimaryInduction = nullptr;
5499 
5500   return true;
5501 }
5502 
5503 void LoopVectorizationCostModel::collectLoopScalars(unsigned VF) {
5504 
5505   // We should not collect Scalars more than once per VF. Right now,
5506   // this function is called from collectUniformsAndScalars(), which
5507   // already does this check. Collecting Scalars for VF=1 does not make any
5508   // sense.
5509 
5510   assert(VF >= 2 && !Scalars.count(VF) &&
5511          "This function should not be visited twice for the same VF");
5512 
5513   // If an instruction is uniform after vectorization, it will remain scalar.
5514   Scalars[VF].insert(Uniforms[VF].begin(), Uniforms[VF].end());
5515 
5516   // Collect the getelementptr instructions that will not be vectorized. A
5517   // getelementptr instruction is only vectorized if it is used for a legal
5518   // gather or scatter operation.
5519   for (auto *BB : TheLoop->blocks())
5520     for (auto &I : *BB) {
5521       if (auto *GEP = dyn_cast<GetElementPtrInst>(&I)) {
5522         Scalars[VF].insert(GEP);
5523         continue;
5524       }
5525       auto *Ptr = getPointerOperand(&I);
5526       if (!Ptr)
5527         continue;
5528       auto *GEP = getGEPInstruction(Ptr);
5529       if (GEP && getWideningDecision(&I, VF) == CM_GatherScatter)
5530         Scalars[VF].erase(GEP);
5531     }
5532 
5533   // An induction variable will remain scalar if all users of the induction
5534   // variable and induction variable update remain scalar.
5535   auto *Latch = TheLoop->getLoopLatch();
5536   for (auto &Induction : *Legal->getInductionVars()) {
5537     auto *Ind = Induction.first;
5538     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
5539 
5540     // Determine if all users of the induction variable are scalar after
5541     // vectorization.
5542     auto ScalarInd = all_of(Ind->users(), [&](User *U) -> bool {
5543       auto *I = cast<Instruction>(U);
5544       return I == IndUpdate || !TheLoop->contains(I) || Scalars[VF].count(I);
5545     });
5546     if (!ScalarInd)
5547       continue;
5548 
5549     // Determine if all users of the induction variable update instruction are
5550     // scalar after vectorization.
5551     auto ScalarIndUpdate = all_of(IndUpdate->users(), [&](User *U) -> bool {
5552       auto *I = cast<Instruction>(U);
5553       return I == Ind || !TheLoop->contains(I) || Scalars[VF].count(I);
5554     });
5555     if (!ScalarIndUpdate)
5556       continue;
5557 
5558     // The induction variable and its update instruction will remain scalar.
5559     Scalars[VF].insert(Ind);
5560     Scalars[VF].insert(IndUpdate);
5561   }
5562 }
5563 
5564 bool LoopVectorizationLegality::isScalarWithPredication(Instruction *I) {
5565   if (!blockNeedsPredication(I->getParent()))
5566     return false;
5567   switch(I->getOpcode()) {
5568   default:
5569     break;
5570   case Instruction::Store:
5571     return !isMaskRequired(I);
5572   case Instruction::UDiv:
5573   case Instruction::SDiv:
5574   case Instruction::SRem:
5575   case Instruction::URem:
5576     return mayDivideByZero(*I);
5577   }
5578   return false;
5579 }
5580 
5581 bool LoopVectorizationLegality::memoryInstructionCanBeWidened(Instruction *I,
5582                                                               unsigned VF) {
5583   // Get and ensure we have a valid memory instruction.
5584   LoadInst *LI = dyn_cast<LoadInst>(I);
5585   StoreInst *SI = dyn_cast<StoreInst>(I);
5586   assert((LI || SI) && "Invalid memory instruction");
5587 
5588   auto *Ptr = getPointerOperand(I);
5589 
5590   // In order to be widened, the pointer should be consecutive, first of all.
5591   if (!isConsecutivePtr(Ptr))
5592     return false;
5593 
5594   // If the instruction is a store located in a predicated block, it will be
5595   // scalarized.
5596   if (isScalarWithPredication(I))
5597     return false;
5598 
5599   // If the instruction's allocated size doesn't equal it's type size, it
5600   // requires padding and will be scalarized.
5601   auto &DL = I->getModule()->getDataLayout();
5602   auto *ScalarTy = LI ? LI->getType() : SI->getValueOperand()->getType();
5603   if (hasIrregularType(ScalarTy, DL, VF))
5604     return false;
5605 
5606   return true;
5607 }
5608 
5609 void LoopVectorizationCostModel::collectLoopUniforms(unsigned VF) {
5610 
5611   // We should not collect Uniforms more than once per VF. Right now,
5612   // this function is called from collectUniformsAndScalars(), which
5613   // already does this check. Collecting Uniforms for VF=1 does not make any
5614   // sense.
5615 
5616   assert(VF >= 2 && !Uniforms.count(VF) &&
5617          "This function should not be visited twice for the same VF");
5618 
5619   // Visit the list of Uniforms. If we'll not find any uniform value, we'll
5620   // not analyze again.  Uniforms.count(VF) will return 1.
5621   Uniforms[VF].clear();
5622 
5623   // We now know that the loop is vectorizable!
5624   // Collect instructions inside the loop that will remain uniform after
5625   // vectorization.
5626 
5627   // Global values, params and instructions outside of current loop are out of
5628   // scope.
5629   auto isOutOfScope = [&](Value *V) -> bool {
5630     Instruction *I = dyn_cast<Instruction>(V);
5631     return (!I || !TheLoop->contains(I));
5632   };
5633 
5634   SetVector<Instruction *> Worklist;
5635   BasicBlock *Latch = TheLoop->getLoopLatch();
5636 
5637   // Start with the conditional branch. If the branch condition is an
5638   // instruction contained in the loop that is only used by the branch, it is
5639   // uniform.
5640   auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0));
5641   if (Cmp && TheLoop->contains(Cmp) && Cmp->hasOneUse()) {
5642     Worklist.insert(Cmp);
5643     DEBUG(dbgs() << "LV: Found uniform instruction: " << *Cmp << "\n");
5644   }
5645 
5646   // Holds consecutive and consecutive-like pointers. Consecutive-like pointers
5647   // are pointers that are treated like consecutive pointers during
5648   // vectorization. The pointer operands of interleaved accesses are an
5649   // example.
5650   SmallSetVector<Instruction *, 8> ConsecutiveLikePtrs;
5651 
5652   // Holds pointer operands of instructions that are possibly non-uniform.
5653   SmallPtrSet<Instruction *, 8> PossibleNonUniformPtrs;
5654 
5655   auto isUniformDecision = [&](Instruction *I, unsigned VF) {
5656     InstWidening WideningDecision = getWideningDecision(I, VF);
5657     assert(WideningDecision != CM_Unknown &&
5658            "Widening decision should be ready at this moment");
5659 
5660     return (WideningDecision == CM_Widen ||
5661             WideningDecision == CM_Interleave);
5662   };
5663   // Iterate over the instructions in the loop, and collect all
5664   // consecutive-like pointer operands in ConsecutiveLikePtrs. If it's possible
5665   // that a consecutive-like pointer operand will be scalarized, we collect it
5666   // in PossibleNonUniformPtrs instead. We use two sets here because a single
5667   // getelementptr instruction can be used by both vectorized and scalarized
5668   // memory instructions. For example, if a loop loads and stores from the same
5669   // location, but the store is conditional, the store will be scalarized, and
5670   // the getelementptr won't remain uniform.
5671   for (auto *BB : TheLoop->blocks())
5672     for (auto &I : *BB) {
5673 
5674       // If there's no pointer operand, there's nothing to do.
5675       auto *Ptr = dyn_cast_or_null<Instruction>(getPointerOperand(&I));
5676       if (!Ptr)
5677         continue;
5678 
5679       // True if all users of Ptr are memory accesses that have Ptr as their
5680       // pointer operand.
5681       auto UsersAreMemAccesses = all_of(Ptr->users(), [&](User *U) -> bool {
5682         return getPointerOperand(U) == Ptr;
5683       });
5684 
5685       // Ensure the memory instruction will not be scalarized or used by
5686       // gather/scatter, making its pointer operand non-uniform. If the pointer
5687       // operand is used by any instruction other than a memory access, we
5688       // conservatively assume the pointer operand may be non-uniform.
5689       if (!UsersAreMemAccesses || !isUniformDecision(&I, VF))
5690         PossibleNonUniformPtrs.insert(Ptr);
5691 
5692       // If the memory instruction will be vectorized and its pointer operand
5693       // is consecutive-like, or interleaving - the pointer operand should
5694       // remain uniform.
5695       else
5696         ConsecutiveLikePtrs.insert(Ptr);
5697     }
5698 
5699   // Add to the Worklist all consecutive and consecutive-like pointers that
5700   // aren't also identified as possibly non-uniform.
5701   for (auto *V : ConsecutiveLikePtrs)
5702     if (!PossibleNonUniformPtrs.count(V)) {
5703       DEBUG(dbgs() << "LV: Found uniform instruction: " << *V << "\n");
5704       Worklist.insert(V);
5705     }
5706 
5707   // Expand Worklist in topological order: whenever a new instruction
5708   // is added , its users should be either already inside Worklist, or
5709   // out of scope. It ensures a uniform instruction will only be used
5710   // by uniform instructions or out of scope instructions.
5711   unsigned idx = 0;
5712   while (idx != Worklist.size()) {
5713     Instruction *I = Worklist[idx++];
5714 
5715     for (auto OV : I->operand_values()) {
5716       if (isOutOfScope(OV))
5717         continue;
5718       auto *OI = cast<Instruction>(OV);
5719       if (all_of(OI->users(), [&](User *U) -> bool {
5720             auto *J = cast<Instruction>(U);
5721             return !TheLoop->contains(J) || Worklist.count(J) ||
5722                    (OI == getPointerOperand(J) && isUniformDecision(J, VF));
5723           })) {
5724         Worklist.insert(OI);
5725         DEBUG(dbgs() << "LV: Found uniform instruction: " << *OI << "\n");
5726       }
5727     }
5728   }
5729 
5730   // Returns true if Ptr is the pointer operand of a memory access instruction
5731   // I, and I is known to not require scalarization.
5732   auto isVectorizedMemAccessUse = [&](Instruction *I, Value *Ptr) -> bool {
5733     return getPointerOperand(I) == Ptr && isUniformDecision(I, VF);
5734   };
5735 
5736   // For an instruction to be added into Worklist above, all its users inside
5737   // the loop should also be in Worklist. However, this condition cannot be
5738   // true for phi nodes that form a cyclic dependence. We must process phi
5739   // nodes separately. An induction variable will remain uniform if all users
5740   // of the induction variable and induction variable update remain uniform.
5741   // The code below handles both pointer and non-pointer induction variables.
5742   for (auto &Induction : *Legal->getInductionVars()) {
5743     auto *Ind = Induction.first;
5744     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
5745 
5746     // Determine if all users of the induction variable are uniform after
5747     // vectorization.
5748     auto UniformInd = all_of(Ind->users(), [&](User *U) -> bool {
5749       auto *I = cast<Instruction>(U);
5750       return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) ||
5751              isVectorizedMemAccessUse(I, Ind);
5752     });
5753     if (!UniformInd)
5754       continue;
5755 
5756     // Determine if all users of the induction variable update instruction are
5757     // uniform after vectorization.
5758     auto UniformIndUpdate = all_of(IndUpdate->users(), [&](User *U) -> bool {
5759       auto *I = cast<Instruction>(U);
5760       return I == Ind || !TheLoop->contains(I) || Worklist.count(I) ||
5761              isVectorizedMemAccessUse(I, IndUpdate);
5762     });
5763     if (!UniformIndUpdate)
5764       continue;
5765 
5766     // The induction variable and its update instruction will remain uniform.
5767     Worklist.insert(Ind);
5768     Worklist.insert(IndUpdate);
5769     DEBUG(dbgs() << "LV: Found uniform instruction: " << *Ind << "\n");
5770     DEBUG(dbgs() << "LV: Found uniform instruction: " << *IndUpdate << "\n");
5771   }
5772 
5773   Uniforms[VF].insert(Worklist.begin(), Worklist.end());
5774 }
5775 
5776 bool LoopVectorizationLegality::canVectorizeMemory() {
5777   LAI = &(*GetLAA)(*TheLoop);
5778   InterleaveInfo.setLAI(LAI);
5779   const OptimizationRemarkAnalysis *LAR = LAI->getReport();
5780   if (LAR) {
5781     OptimizationRemarkAnalysis VR(Hints->vectorizeAnalysisPassName(),
5782                                   "loop not vectorized: ", *LAR);
5783     ORE->emit(VR);
5784   }
5785   if (!LAI->canVectorizeMemory())
5786     return false;
5787 
5788   if (LAI->hasStoreToLoopInvariantAddress()) {
5789     ORE->emit(createMissedAnalysis("CantVectorizeStoreToLoopInvariantAddress")
5790               << "write to a loop invariant address could not be vectorized");
5791     DEBUG(dbgs() << "LV: We don't allow storing to uniform addresses\n");
5792     return false;
5793   }
5794 
5795   Requirements->addRuntimePointerChecks(LAI->getNumRuntimePointerChecks());
5796   PSE.addPredicate(LAI->getPSE().getUnionPredicate());
5797 
5798   return true;
5799 }
5800 
5801 bool LoopVectorizationLegality::isInductionVariable(const Value *V) {
5802   Value *In0 = const_cast<Value *>(V);
5803   PHINode *PN = dyn_cast_or_null<PHINode>(In0);
5804   if (!PN)
5805     return false;
5806 
5807   return Inductions.count(PN);
5808 }
5809 
5810 bool LoopVectorizationLegality::isFirstOrderRecurrence(const PHINode *Phi) {
5811   return FirstOrderRecurrences.count(Phi);
5812 }
5813 
5814 bool LoopVectorizationLegality::blockNeedsPredication(BasicBlock *BB) {
5815   return LoopAccessInfo::blockNeedsPredication(BB, TheLoop, DT);
5816 }
5817 
5818 bool LoopVectorizationLegality::blockCanBePredicated(
5819     BasicBlock *BB, SmallPtrSetImpl<Value *> &SafePtrs) {
5820   const bool IsAnnotatedParallel = TheLoop->isAnnotatedParallel();
5821 
5822   for (Instruction &I : *BB) {
5823     // Check that we don't have a constant expression that can trap as operand.
5824     for (Value *Operand : I.operands()) {
5825       if (auto *C = dyn_cast<Constant>(Operand))
5826         if (C->canTrap())
5827           return false;
5828     }
5829     // We might be able to hoist the load.
5830     if (I.mayReadFromMemory()) {
5831       auto *LI = dyn_cast<LoadInst>(&I);
5832       if (!LI)
5833         return false;
5834       if (!SafePtrs.count(LI->getPointerOperand())) {
5835         if (isLegalMaskedLoad(LI->getType(), LI->getPointerOperand()) ||
5836             isLegalMaskedGather(LI->getType())) {
5837           MaskedOp.insert(LI);
5838           continue;
5839         }
5840         // !llvm.mem.parallel_loop_access implies if-conversion safety.
5841         if (IsAnnotatedParallel)
5842           continue;
5843         return false;
5844       }
5845     }
5846 
5847     if (I.mayWriteToMemory()) {
5848       auto *SI = dyn_cast<StoreInst>(&I);
5849       // We only support predication of stores in basic blocks with one
5850       // predecessor.
5851       if (!SI)
5852         return false;
5853 
5854       // Build a masked store if it is legal for the target.
5855       if (isLegalMaskedStore(SI->getValueOperand()->getType(),
5856                              SI->getPointerOperand()) ||
5857           isLegalMaskedScatter(SI->getValueOperand()->getType())) {
5858         MaskedOp.insert(SI);
5859         continue;
5860       }
5861 
5862       bool isSafePtr = (SafePtrs.count(SI->getPointerOperand()) != 0);
5863       bool isSinglePredecessor = SI->getParent()->getSinglePredecessor();
5864 
5865       if (++NumPredStores > NumberOfStoresToPredicate || !isSafePtr ||
5866           !isSinglePredecessor)
5867         return false;
5868     }
5869     if (I.mayThrow())
5870       return false;
5871   }
5872 
5873   return true;
5874 }
5875 
5876 void InterleavedAccessInfo::collectConstStrideAccesses(
5877     MapVector<Instruction *, StrideDescriptor> &AccessStrideInfo,
5878     const ValueToValueMap &Strides) {
5879 
5880   auto &DL = TheLoop->getHeader()->getModule()->getDataLayout();
5881 
5882   // Since it's desired that the load/store instructions be maintained in
5883   // "program order" for the interleaved access analysis, we have to visit the
5884   // blocks in the loop in reverse postorder (i.e., in a topological order).
5885   // Such an ordering will ensure that any load/store that may be executed
5886   // before a second load/store will precede the second load/store in
5887   // AccessStrideInfo.
5888   LoopBlocksDFS DFS(TheLoop);
5889   DFS.perform(LI);
5890   for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO()))
5891     for (auto &I : *BB) {
5892       auto *LI = dyn_cast<LoadInst>(&I);
5893       auto *SI = dyn_cast<StoreInst>(&I);
5894       if (!LI && !SI)
5895         continue;
5896 
5897       Value *Ptr = getPointerOperand(&I);
5898       // We don't check wrapping here because we don't know yet if Ptr will be
5899       // part of a full group or a group with gaps. Checking wrapping for all
5900       // pointers (even those that end up in groups with no gaps) will be overly
5901       // conservative. For full groups, wrapping should be ok since if we would
5902       // wrap around the address space we would do a memory access at nullptr
5903       // even without the transformation. The wrapping checks are therefore
5904       // deferred until after we've formed the interleaved groups.
5905       int64_t Stride = getPtrStride(PSE, Ptr, TheLoop, Strides,
5906                                     /*Assume=*/true, /*ShouldCheckWrap=*/false);
5907 
5908       const SCEV *Scev = replaceSymbolicStrideSCEV(PSE, Strides, Ptr);
5909       PointerType *PtrTy = dyn_cast<PointerType>(Ptr->getType());
5910       uint64_t Size = DL.getTypeAllocSize(PtrTy->getElementType());
5911 
5912       // An alignment of 0 means target ABI alignment.
5913       unsigned Align = getMemInstAlignment(&I);
5914       if (!Align)
5915         Align = DL.getABITypeAlignment(PtrTy->getElementType());
5916 
5917       AccessStrideInfo[&I] = StrideDescriptor(Stride, Scev, Size, Align);
5918     }
5919 }
5920 
5921 // Analyze interleaved accesses and collect them into interleaved load and
5922 // store groups.
5923 //
5924 // When generating code for an interleaved load group, we effectively hoist all
5925 // loads in the group to the location of the first load in program order. When
5926 // generating code for an interleaved store group, we sink all stores to the
5927 // location of the last store. This code motion can change the order of load
5928 // and store instructions and may break dependences.
5929 //
5930 // The code generation strategy mentioned above ensures that we won't violate
5931 // any write-after-read (WAR) dependences.
5932 //
5933 // E.g., for the WAR dependence:  a = A[i];      // (1)
5934 //                                A[i] = b;      // (2)
5935 //
5936 // The store group of (2) is always inserted at or below (2), and the load
5937 // group of (1) is always inserted at or above (1). Thus, the instructions will
5938 // never be reordered. All other dependences are checked to ensure the
5939 // correctness of the instruction reordering.
5940 //
5941 // The algorithm visits all memory accesses in the loop in bottom-up program
5942 // order. Program order is established by traversing the blocks in the loop in
5943 // reverse postorder when collecting the accesses.
5944 //
5945 // We visit the memory accesses in bottom-up order because it can simplify the
5946 // construction of store groups in the presence of write-after-write (WAW)
5947 // dependences.
5948 //
5949 // E.g., for the WAW dependence:  A[i] = a;      // (1)
5950 //                                A[i] = b;      // (2)
5951 //                                A[i + 1] = c;  // (3)
5952 //
5953 // We will first create a store group with (3) and (2). (1) can't be added to
5954 // this group because it and (2) are dependent. However, (1) can be grouped
5955 // with other accesses that may precede it in program order. Note that a
5956 // bottom-up order does not imply that WAW dependences should not be checked.
5957 void InterleavedAccessInfo::analyzeInterleaving(
5958     const ValueToValueMap &Strides) {
5959   DEBUG(dbgs() << "LV: Analyzing interleaved accesses...\n");
5960 
5961   // Holds all accesses with a constant stride.
5962   MapVector<Instruction *, StrideDescriptor> AccessStrideInfo;
5963   collectConstStrideAccesses(AccessStrideInfo, Strides);
5964 
5965   if (AccessStrideInfo.empty())
5966     return;
5967 
5968   // Collect the dependences in the loop.
5969   collectDependences();
5970 
5971   // Holds all interleaved store groups temporarily.
5972   SmallSetVector<InterleaveGroup *, 4> StoreGroups;
5973   // Holds all interleaved load groups temporarily.
5974   SmallSetVector<InterleaveGroup *, 4> LoadGroups;
5975 
5976   // Search in bottom-up program order for pairs of accesses (A and B) that can
5977   // form interleaved load or store groups. In the algorithm below, access A
5978   // precedes access B in program order. We initialize a group for B in the
5979   // outer loop of the algorithm, and then in the inner loop, we attempt to
5980   // insert each A into B's group if:
5981   //
5982   //  1. A and B have the same stride,
5983   //  2. A and B have the same memory object size, and
5984   //  3. A belongs in B's group according to its distance from B.
5985   //
5986   // Special care is taken to ensure group formation will not break any
5987   // dependences.
5988   for (auto BI = AccessStrideInfo.rbegin(), E = AccessStrideInfo.rend();
5989        BI != E; ++BI) {
5990     Instruction *B = BI->first;
5991     StrideDescriptor DesB = BI->second;
5992 
5993     // Initialize a group for B if it has an allowable stride. Even if we don't
5994     // create a group for B, we continue with the bottom-up algorithm to ensure
5995     // we don't break any of B's dependences.
5996     InterleaveGroup *Group = nullptr;
5997     if (isStrided(DesB.Stride)) {
5998       Group = getInterleaveGroup(B);
5999       if (!Group) {
6000         DEBUG(dbgs() << "LV: Creating an interleave group with:" << *B << '\n');
6001         Group = createInterleaveGroup(B, DesB.Stride, DesB.Align);
6002       }
6003       if (B->mayWriteToMemory())
6004         StoreGroups.insert(Group);
6005       else
6006         LoadGroups.insert(Group);
6007     }
6008 
6009     for (auto AI = std::next(BI); AI != E; ++AI) {
6010       Instruction *A = AI->first;
6011       StrideDescriptor DesA = AI->second;
6012 
6013       // Our code motion strategy implies that we can't have dependences
6014       // between accesses in an interleaved group and other accesses located
6015       // between the first and last member of the group. Note that this also
6016       // means that a group can't have more than one member at a given offset.
6017       // The accesses in a group can have dependences with other accesses, but
6018       // we must ensure we don't extend the boundaries of the group such that
6019       // we encompass those dependent accesses.
6020       //
6021       // For example, assume we have the sequence of accesses shown below in a
6022       // stride-2 loop:
6023       //
6024       //  (1, 2) is a group | A[i]   = a;  // (1)
6025       //                    | A[i-1] = b;  // (2) |
6026       //                      A[i-3] = c;  // (3)
6027       //                      A[i]   = d;  // (4) | (2, 4) is not a group
6028       //
6029       // Because accesses (2) and (3) are dependent, we can group (2) with (1)
6030       // but not with (4). If we did, the dependent access (3) would be within
6031       // the boundaries of the (2, 4) group.
6032       if (!canReorderMemAccessesForInterleavedGroups(&*AI, &*BI)) {
6033 
6034         // If a dependence exists and A is already in a group, we know that A
6035         // must be a store since A precedes B and WAR dependences are allowed.
6036         // Thus, A would be sunk below B. We release A's group to prevent this
6037         // illegal code motion. A will then be free to form another group with
6038         // instructions that precede it.
6039         if (isInterleaved(A)) {
6040           InterleaveGroup *StoreGroup = getInterleaveGroup(A);
6041           StoreGroups.remove(StoreGroup);
6042           releaseGroup(StoreGroup);
6043         }
6044 
6045         // If a dependence exists and A is not already in a group (or it was
6046         // and we just released it), B might be hoisted above A (if B is a
6047         // load) or another store might be sunk below A (if B is a store). In
6048         // either case, we can't add additional instructions to B's group. B
6049         // will only form a group with instructions that it precedes.
6050         break;
6051       }
6052 
6053       // At this point, we've checked for illegal code motion. If either A or B
6054       // isn't strided, there's nothing left to do.
6055       if (!isStrided(DesA.Stride) || !isStrided(DesB.Stride))
6056         continue;
6057 
6058       // Ignore A if it's already in a group or isn't the same kind of memory
6059       // operation as B.
6060       if (isInterleaved(A) || A->mayReadFromMemory() != B->mayReadFromMemory())
6061         continue;
6062 
6063       // Check rules 1 and 2. Ignore A if its stride or size is different from
6064       // that of B.
6065       if (DesA.Stride != DesB.Stride || DesA.Size != DesB.Size)
6066         continue;
6067 
6068       // Ignore A if the memory object of A and B don't belong to the same
6069       // address space
6070       if (getMemInstAddressSpace(A) != getMemInstAddressSpace(B))
6071         continue;
6072 
6073       // Calculate the distance from A to B.
6074       const SCEVConstant *DistToB = dyn_cast<SCEVConstant>(
6075           PSE.getSE()->getMinusSCEV(DesA.Scev, DesB.Scev));
6076       if (!DistToB)
6077         continue;
6078       int64_t DistanceToB = DistToB->getAPInt().getSExtValue();
6079 
6080       // Check rule 3. Ignore A if its distance to B is not a multiple of the
6081       // size.
6082       if (DistanceToB % static_cast<int64_t>(DesB.Size))
6083         continue;
6084 
6085       // Ignore A if either A or B is in a predicated block. Although we
6086       // currently prevent group formation for predicated accesses, we may be
6087       // able to relax this limitation in the future once we handle more
6088       // complicated blocks.
6089       if (isPredicated(A->getParent()) || isPredicated(B->getParent()))
6090         continue;
6091 
6092       // The index of A is the index of B plus A's distance to B in multiples
6093       // of the size.
6094       int IndexA =
6095           Group->getIndex(B) + DistanceToB / static_cast<int64_t>(DesB.Size);
6096 
6097       // Try to insert A into B's group.
6098       if (Group->insertMember(A, IndexA, DesA.Align)) {
6099         DEBUG(dbgs() << "LV: Inserted:" << *A << '\n'
6100                      << "    into the interleave group with" << *B << '\n');
6101         InterleaveGroupMap[A] = Group;
6102 
6103         // Set the first load in program order as the insert position.
6104         if (A->mayReadFromMemory())
6105           Group->setInsertPos(A);
6106       }
6107     } // Iteration over A accesses.
6108   } // Iteration over B accesses.
6109 
6110   // Remove interleaved store groups with gaps.
6111   for (InterleaveGroup *Group : StoreGroups)
6112     if (Group->getNumMembers() != Group->getFactor())
6113       releaseGroup(Group);
6114 
6115   // Remove interleaved groups with gaps (currently only loads) whose memory
6116   // accesses may wrap around. We have to revisit the getPtrStride analysis,
6117   // this time with ShouldCheckWrap=true, since collectConstStrideAccesses does
6118   // not check wrapping (see documentation there).
6119   // FORNOW we use Assume=false;
6120   // TODO: Change to Assume=true but making sure we don't exceed the threshold
6121   // of runtime SCEV assumptions checks (thereby potentially failing to
6122   // vectorize altogether).
6123   // Additional optional optimizations:
6124   // TODO: If we are peeling the loop and we know that the first pointer doesn't
6125   // wrap then we can deduce that all pointers in the group don't wrap.
6126   // This means that we can forcefully peel the loop in order to only have to
6127   // check the first pointer for no-wrap. When we'll change to use Assume=true
6128   // we'll only need at most one runtime check per interleaved group.
6129   //
6130   for (InterleaveGroup *Group : LoadGroups) {
6131 
6132     // Case 1: A full group. Can Skip the checks; For full groups, if the wide
6133     // load would wrap around the address space we would do a memory access at
6134     // nullptr even without the transformation.
6135     if (Group->getNumMembers() == Group->getFactor())
6136       continue;
6137 
6138     // Case 2: If first and last members of the group don't wrap this implies
6139     // that all the pointers in the group don't wrap.
6140     // So we check only group member 0 (which is always guaranteed to exist),
6141     // and group member Factor - 1; If the latter doesn't exist we rely on
6142     // peeling (if it is a non-reveresed accsess -- see Case 3).
6143     Value *FirstMemberPtr = getPointerOperand(Group->getMember(0));
6144     if (!getPtrStride(PSE, FirstMemberPtr, TheLoop, Strides, /*Assume=*/false,
6145                       /*ShouldCheckWrap=*/true)) {
6146       DEBUG(dbgs() << "LV: Invalidate candidate interleaved group due to "
6147                       "first group member potentially pointer-wrapping.\n");
6148       releaseGroup(Group);
6149       continue;
6150     }
6151     Instruction *LastMember = Group->getMember(Group->getFactor() - 1);
6152     if (LastMember) {
6153       Value *LastMemberPtr = getPointerOperand(LastMember);
6154       if (!getPtrStride(PSE, LastMemberPtr, TheLoop, Strides, /*Assume=*/false,
6155                         /*ShouldCheckWrap=*/true)) {
6156         DEBUG(dbgs() << "LV: Invalidate candidate interleaved group due to "
6157                         "last group member potentially pointer-wrapping.\n");
6158         releaseGroup(Group);
6159       }
6160     } else {
6161       // Case 3: A non-reversed interleaved load group with gaps: We need
6162       // to execute at least one scalar epilogue iteration. This will ensure
6163       // we don't speculatively access memory out-of-bounds. We only need
6164       // to look for a member at index factor - 1, since every group must have
6165       // a member at index zero.
6166       if (Group->isReverse()) {
6167         releaseGroup(Group);
6168         continue;
6169       }
6170       DEBUG(dbgs() << "LV: Interleaved group requires epilogue iteration.\n");
6171       RequiresScalarEpilogue = true;
6172     }
6173   }
6174 }
6175 
6176 Optional<unsigned> LoopVectorizationCostModel::computeMaxVF(bool OptForSize) {
6177   if (!EnableCondStoresVectorization && Legal->getNumPredStores()) {
6178     ORE->emit(createMissedAnalysis("ConditionalStore")
6179               << "store that is conditionally executed prevents vectorization");
6180     DEBUG(dbgs() << "LV: No vectorization. There are conditional stores.\n");
6181     return None;
6182   }
6183 
6184   if (!OptForSize) // Remaining checks deal with scalar loop when OptForSize.
6185     return computeFeasibleMaxVF(OptForSize);
6186 
6187   if (Legal->getRuntimePointerChecking()->Need) {
6188     ORE->emit(createMissedAnalysis("CantVersionLoopWithOptForSize")
6189               << "runtime pointer checks needed. Enable vectorization of this "
6190                  "loop with '#pragma clang loop vectorize(enable)' when "
6191                  "compiling with -Os/-Oz");
6192     DEBUG(dbgs()
6193           << "LV: Aborting. Runtime ptr check is required with -Os/-Oz.\n");
6194     return None;
6195   }
6196 
6197   // If we optimize the program for size, avoid creating the tail loop.
6198   unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop);
6199   DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n');
6200 
6201   // If we don't know the precise trip count, don't try to vectorize.
6202   if (TC < 2) {
6203     ORE->emit(
6204         createMissedAnalysis("UnknownLoopCountComplexCFG")
6205         << "unable to calculate the loop count due to complex control flow");
6206     DEBUG(dbgs() << "LV: Aborting. A tail loop is required with -Os/-Oz.\n");
6207     return None;
6208   }
6209 
6210   unsigned MaxVF = computeFeasibleMaxVF(OptForSize);
6211 
6212   if (TC % MaxVF != 0) {
6213     // If the trip count that we found modulo the vectorization factor is not
6214     // zero then we require a tail.
6215     // FIXME: look for a smaller MaxVF that does divide TC rather than give up.
6216     // FIXME: return None if loop requiresScalarEpilog(<MaxVF>), or look for a
6217     //        smaller MaxVF that does not require a scalar epilog.
6218 
6219     ORE->emit(createMissedAnalysis("NoTailLoopWithOptForSize")
6220               << "cannot optimize for size and vectorize at the "
6221                  "same time. Enable vectorization of this loop "
6222                  "with '#pragma clang loop vectorize(enable)' "
6223                  "when compiling with -Os/-Oz");
6224     DEBUG(dbgs() << "LV: Aborting. A tail loop is required with -Os/-Oz.\n");
6225     return None;
6226   }
6227 
6228   return MaxVF;
6229 }
6230 
6231 unsigned LoopVectorizationCostModel::computeFeasibleMaxVF(bool OptForSize) {
6232   MinBWs = computeMinimumValueSizes(TheLoop->getBlocks(), *DB, &TTI);
6233   unsigned SmallestType, WidestType;
6234   std::tie(SmallestType, WidestType) = getSmallestAndWidestTypes();
6235   unsigned WidestRegister = TTI.getRegisterBitWidth(true);
6236   unsigned MaxSafeDepDist = -1U;
6237 
6238   // Get the maximum safe dependence distance in bits computed by LAA. If the
6239   // loop contains any interleaved accesses, we divide the dependence distance
6240   // by the maximum interleave factor of all interleaved groups. Note that
6241   // although the division ensures correctness, this is a fairly conservative
6242   // computation because the maximum distance computed by LAA may not involve
6243   // any of the interleaved accesses.
6244   if (Legal->getMaxSafeDepDistBytes() != -1U)
6245     MaxSafeDepDist =
6246         Legal->getMaxSafeDepDistBytes() * 8 / Legal->getMaxInterleaveFactor();
6247 
6248   WidestRegister =
6249       ((WidestRegister < MaxSafeDepDist) ? WidestRegister : MaxSafeDepDist);
6250   unsigned MaxVectorSize = WidestRegister / WidestType;
6251 
6252   DEBUG(dbgs() << "LV: The Smallest and Widest types: " << SmallestType << " / "
6253                << WidestType << " bits.\n");
6254   DEBUG(dbgs() << "LV: The Widest register is: " << WidestRegister
6255                << " bits.\n");
6256 
6257   if (MaxVectorSize == 0) {
6258     DEBUG(dbgs() << "LV: The target has no vector registers.\n");
6259     MaxVectorSize = 1;
6260   }
6261 
6262   assert(MaxVectorSize <= 64 && "Did not expect to pack so many elements"
6263                                 " into one vector!");
6264 
6265   unsigned MaxVF = MaxVectorSize;
6266   if (MaximizeBandwidth && !OptForSize) {
6267     // Collect all viable vectorization factors.
6268     SmallVector<unsigned, 8> VFs;
6269     unsigned NewMaxVectorSize = WidestRegister / SmallestType;
6270     for (unsigned VS = MaxVectorSize; VS <= NewMaxVectorSize; VS *= 2)
6271       VFs.push_back(VS);
6272 
6273     // For each VF calculate its register usage.
6274     auto RUs = calculateRegisterUsage(VFs);
6275 
6276     // Select the largest VF which doesn't require more registers than existing
6277     // ones.
6278     unsigned TargetNumRegisters = TTI.getNumberOfRegisters(true);
6279     for (int i = RUs.size() - 1; i >= 0; --i) {
6280       if (RUs[i].MaxLocalUsers <= TargetNumRegisters) {
6281         MaxVF = VFs[i];
6282         break;
6283       }
6284     }
6285   }
6286   return MaxVF;
6287 }
6288 
6289 LoopVectorizationCostModel::VectorizationFactor
6290 LoopVectorizationCostModel::selectVectorizationFactor(unsigned MaxVF) {
6291   float Cost = expectedCost(1).first;
6292 #ifndef NDEBUG
6293   const float ScalarCost = Cost;
6294 #endif /* NDEBUG */
6295   unsigned Width = 1;
6296   DEBUG(dbgs() << "LV: Scalar loop costs: " << (int)ScalarCost << ".\n");
6297 
6298   bool ForceVectorization = Hints->getForce() == LoopVectorizeHints::FK_Enabled;
6299   // Ignore scalar width, because the user explicitly wants vectorization.
6300   if (ForceVectorization && MaxVF > 1) {
6301     Width = 2;
6302     Cost = expectedCost(Width).first / (float)Width;
6303   }
6304 
6305   for (unsigned i = 2; i <= MaxVF; i *= 2) {
6306     // Notice that the vector loop needs to be executed less times, so
6307     // we need to divide the cost of the vector loops by the width of
6308     // the vector elements.
6309     VectorizationCostTy C = expectedCost(i);
6310     float VectorCost = C.first / (float)i;
6311     DEBUG(dbgs() << "LV: Vector loop of width " << i
6312                  << " costs: " << (int)VectorCost << ".\n");
6313     if (!C.second && !ForceVectorization) {
6314       DEBUG(
6315           dbgs() << "LV: Not considering vector loop of width " << i
6316                  << " because it will not generate any vector instructions.\n");
6317       continue;
6318     }
6319     if (VectorCost < Cost) {
6320       Cost = VectorCost;
6321       Width = i;
6322     }
6323   }
6324 
6325   DEBUG(if (ForceVectorization && Width > 1 && Cost >= ScalarCost) dbgs()
6326         << "LV: Vectorization seems to be not beneficial, "
6327         << "but was forced by a user.\n");
6328   DEBUG(dbgs() << "LV: Selecting VF: " << Width << ".\n");
6329   VectorizationFactor Factor = {Width, (unsigned)(Width * Cost)};
6330   return Factor;
6331 }
6332 
6333 std::pair<unsigned, unsigned>
6334 LoopVectorizationCostModel::getSmallestAndWidestTypes() {
6335   unsigned MinWidth = -1U;
6336   unsigned MaxWidth = 8;
6337   const DataLayout &DL = TheFunction->getParent()->getDataLayout();
6338 
6339   // For each block.
6340   for (BasicBlock *BB : TheLoop->blocks()) {
6341     // For each instruction in the loop.
6342     for (Instruction &I : *BB) {
6343       Type *T = I.getType();
6344 
6345       // Skip ignored values.
6346       if (ValuesToIgnore.count(&I))
6347         continue;
6348 
6349       // Only examine Loads, Stores and PHINodes.
6350       if (!isa<LoadInst>(I) && !isa<StoreInst>(I) && !isa<PHINode>(I))
6351         continue;
6352 
6353       // Examine PHI nodes that are reduction variables. Update the type to
6354       // account for the recurrence type.
6355       if (auto *PN = dyn_cast<PHINode>(&I)) {
6356         if (!Legal->isReductionVariable(PN))
6357           continue;
6358         RecurrenceDescriptor RdxDesc = (*Legal->getReductionVars())[PN];
6359         T = RdxDesc.getRecurrenceType();
6360       }
6361 
6362       // Examine the stored values.
6363       if (auto *ST = dyn_cast<StoreInst>(&I))
6364         T = ST->getValueOperand()->getType();
6365 
6366       // Ignore loaded pointer types and stored pointer types that are not
6367       // vectorizable.
6368       //
6369       // FIXME: The check here attempts to predict whether a load or store will
6370       //        be vectorized. We only know this for certain after a VF has
6371       //        been selected. Here, we assume that if an access can be
6372       //        vectorized, it will be. We should also look at extending this
6373       //        optimization to non-pointer types.
6374       //
6375       if (T->isPointerTy() && !isConsecutiveLoadOrStore(&I) &&
6376           !Legal->isAccessInterleaved(&I) && !Legal->isLegalGatherOrScatter(&I))
6377         continue;
6378 
6379       MinWidth = std::min(MinWidth,
6380                           (unsigned)DL.getTypeSizeInBits(T->getScalarType()));
6381       MaxWidth = std::max(MaxWidth,
6382                           (unsigned)DL.getTypeSizeInBits(T->getScalarType()));
6383     }
6384   }
6385 
6386   return {MinWidth, MaxWidth};
6387 }
6388 
6389 unsigned LoopVectorizationCostModel::selectInterleaveCount(bool OptForSize,
6390                                                            unsigned VF,
6391                                                            unsigned LoopCost) {
6392 
6393   // -- The interleave heuristics --
6394   // We interleave the loop in order to expose ILP and reduce the loop overhead.
6395   // There are many micro-architectural considerations that we can't predict
6396   // at this level. For example, frontend pressure (on decode or fetch) due to
6397   // code size, or the number and capabilities of the execution ports.
6398   //
6399   // We use the following heuristics to select the interleave count:
6400   // 1. If the code has reductions, then we interleave to break the cross
6401   // iteration dependency.
6402   // 2. If the loop is really small, then we interleave to reduce the loop
6403   // overhead.
6404   // 3. We don't interleave if we think that we will spill registers to memory
6405   // due to the increased register pressure.
6406 
6407   // When we optimize for size, we don't interleave.
6408   if (OptForSize)
6409     return 1;
6410 
6411   // We used the distance for the interleave count.
6412   if (Legal->getMaxSafeDepDistBytes() != -1U)
6413     return 1;
6414 
6415   // Do not interleave loops with a relatively small trip count.
6416   unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop);
6417   if (TC > 1 && TC < TinyTripCountInterleaveThreshold)
6418     return 1;
6419 
6420   unsigned TargetNumRegisters = TTI.getNumberOfRegisters(VF > 1);
6421   DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters
6422                << " registers\n");
6423 
6424   if (VF == 1) {
6425     if (ForceTargetNumScalarRegs.getNumOccurrences() > 0)
6426       TargetNumRegisters = ForceTargetNumScalarRegs;
6427   } else {
6428     if (ForceTargetNumVectorRegs.getNumOccurrences() > 0)
6429       TargetNumRegisters = ForceTargetNumVectorRegs;
6430   }
6431 
6432   RegisterUsage R = calculateRegisterUsage({VF})[0];
6433   // We divide by these constants so assume that we have at least one
6434   // instruction that uses at least one register.
6435   R.MaxLocalUsers = std::max(R.MaxLocalUsers, 1U);
6436   R.NumInstructions = std::max(R.NumInstructions, 1U);
6437 
6438   // We calculate the interleave count using the following formula.
6439   // Subtract the number of loop invariants from the number of available
6440   // registers. These registers are used by all of the interleaved instances.
6441   // Next, divide the remaining registers by the number of registers that is
6442   // required by the loop, in order to estimate how many parallel instances
6443   // fit without causing spills. All of this is rounded down if necessary to be
6444   // a power of two. We want power of two interleave count to simplify any
6445   // addressing operations or alignment considerations.
6446   unsigned IC = PowerOf2Floor((TargetNumRegisters - R.LoopInvariantRegs) /
6447                               R.MaxLocalUsers);
6448 
6449   // Don't count the induction variable as interleaved.
6450   if (EnableIndVarRegisterHeur)
6451     IC = PowerOf2Floor((TargetNumRegisters - R.LoopInvariantRegs - 1) /
6452                        std::max(1U, (R.MaxLocalUsers - 1)));
6453 
6454   // Clamp the interleave ranges to reasonable counts.
6455   unsigned MaxInterleaveCount = TTI.getMaxInterleaveFactor(VF);
6456 
6457   // Check if the user has overridden the max.
6458   if (VF == 1) {
6459     if (ForceTargetMaxScalarInterleaveFactor.getNumOccurrences() > 0)
6460       MaxInterleaveCount = ForceTargetMaxScalarInterleaveFactor;
6461   } else {
6462     if (ForceTargetMaxVectorInterleaveFactor.getNumOccurrences() > 0)
6463       MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor;
6464   }
6465 
6466   // If we did not calculate the cost for VF (because the user selected the VF)
6467   // then we calculate the cost of VF here.
6468   if (LoopCost == 0)
6469     LoopCost = expectedCost(VF).first;
6470 
6471   // Clamp the calculated IC to be between the 1 and the max interleave count
6472   // that the target allows.
6473   if (IC > MaxInterleaveCount)
6474     IC = MaxInterleaveCount;
6475   else if (IC < 1)
6476     IC = 1;
6477 
6478   // Interleave if we vectorized this loop and there is a reduction that could
6479   // benefit from interleaving.
6480   if (VF > 1 && Legal->getReductionVars()->size()) {
6481     DEBUG(dbgs() << "LV: Interleaving because of reductions.\n");
6482     return IC;
6483   }
6484 
6485   // Note that if we've already vectorized the loop we will have done the
6486   // runtime check and so interleaving won't require further checks.
6487   bool InterleavingRequiresRuntimePointerCheck =
6488       (VF == 1 && Legal->getRuntimePointerChecking()->Need);
6489 
6490   // We want to interleave small loops in order to reduce the loop overhead and
6491   // potentially expose ILP opportunities.
6492   DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n');
6493   if (!InterleavingRequiresRuntimePointerCheck && LoopCost < SmallLoopCost) {
6494     // We assume that the cost overhead is 1 and we use the cost model
6495     // to estimate the cost of the loop and interleave until the cost of the
6496     // loop overhead is about 5% of the cost of the loop.
6497     unsigned SmallIC =
6498         std::min(IC, (unsigned)PowerOf2Floor(SmallLoopCost / LoopCost));
6499 
6500     // Interleave until store/load ports (estimated by max interleave count) are
6501     // saturated.
6502     unsigned NumStores = Legal->getNumStores();
6503     unsigned NumLoads = Legal->getNumLoads();
6504     unsigned StoresIC = IC / (NumStores ? NumStores : 1);
6505     unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1);
6506 
6507     // If we have a scalar reduction (vector reductions are already dealt with
6508     // by this point), we can increase the critical path length if the loop
6509     // we're interleaving is inside another loop. Limit, by default to 2, so the
6510     // critical path only gets increased by one reduction operation.
6511     if (Legal->getReductionVars()->size() && TheLoop->getLoopDepth() > 1) {
6512       unsigned F = static_cast<unsigned>(MaxNestedScalarReductionIC);
6513       SmallIC = std::min(SmallIC, F);
6514       StoresIC = std::min(StoresIC, F);
6515       LoadsIC = std::min(LoadsIC, F);
6516     }
6517 
6518     if (EnableLoadStoreRuntimeInterleave &&
6519         std::max(StoresIC, LoadsIC) > SmallIC) {
6520       DEBUG(dbgs() << "LV: Interleaving to saturate store or load ports.\n");
6521       return std::max(StoresIC, LoadsIC);
6522     }
6523 
6524     DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n");
6525     return SmallIC;
6526   }
6527 
6528   // Interleave if this is a large loop (small loops are already dealt with by
6529   // this point) that could benefit from interleaving.
6530   bool HasReductions = (Legal->getReductionVars()->size() > 0);
6531   if (TTI.enableAggressiveInterleaving(HasReductions)) {
6532     DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n");
6533     return IC;
6534   }
6535 
6536   DEBUG(dbgs() << "LV: Not Interleaving.\n");
6537   return 1;
6538 }
6539 
6540 SmallVector<LoopVectorizationCostModel::RegisterUsage, 8>
6541 LoopVectorizationCostModel::calculateRegisterUsage(ArrayRef<unsigned> VFs) {
6542   // This function calculates the register usage by measuring the highest number
6543   // of values that are alive at a single location. Obviously, this is a very
6544   // rough estimation. We scan the loop in a topological order in order and
6545   // assign a number to each instruction. We use RPO to ensure that defs are
6546   // met before their users. We assume that each instruction that has in-loop
6547   // users starts an interval. We record every time that an in-loop value is
6548   // used, so we have a list of the first and last occurrences of each
6549   // instruction. Next, we transpose this data structure into a multi map that
6550   // holds the list of intervals that *end* at a specific location. This multi
6551   // map allows us to perform a linear search. We scan the instructions linearly
6552   // and record each time that a new interval starts, by placing it in a set.
6553   // If we find this value in the multi-map then we remove it from the set.
6554   // The max register usage is the maximum size of the set.
6555   // We also search for instructions that are defined outside the loop, but are
6556   // used inside the loop. We need this number separately from the max-interval
6557   // usage number because when we unroll, loop-invariant values do not take
6558   // more register.
6559   LoopBlocksDFS DFS(TheLoop);
6560   DFS.perform(LI);
6561 
6562   RegisterUsage RU;
6563   RU.NumInstructions = 0;
6564 
6565   // Each 'key' in the map opens a new interval. The values
6566   // of the map are the index of the 'last seen' usage of the
6567   // instruction that is the key.
6568   typedef DenseMap<Instruction *, unsigned> IntervalMap;
6569   // Maps instruction to its index.
6570   DenseMap<unsigned, Instruction *> IdxToInstr;
6571   // Marks the end of each interval.
6572   IntervalMap EndPoint;
6573   // Saves the list of instruction indices that are used in the loop.
6574   SmallSet<Instruction *, 8> Ends;
6575   // Saves the list of values that are used in the loop but are
6576   // defined outside the loop, such as arguments and constants.
6577   SmallPtrSet<Value *, 8> LoopInvariants;
6578 
6579   unsigned Index = 0;
6580   for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) {
6581     RU.NumInstructions += BB->size();
6582     for (Instruction &I : *BB) {
6583       IdxToInstr[Index++] = &I;
6584 
6585       // Save the end location of each USE.
6586       for (Value *U : I.operands()) {
6587         auto *Instr = dyn_cast<Instruction>(U);
6588 
6589         // Ignore non-instruction values such as arguments, constants, etc.
6590         if (!Instr)
6591           continue;
6592 
6593         // If this instruction is outside the loop then record it and continue.
6594         if (!TheLoop->contains(Instr)) {
6595           LoopInvariants.insert(Instr);
6596           continue;
6597         }
6598 
6599         // Overwrite previous end points.
6600         EndPoint[Instr] = Index;
6601         Ends.insert(Instr);
6602       }
6603     }
6604   }
6605 
6606   // Saves the list of intervals that end with the index in 'key'.
6607   typedef SmallVector<Instruction *, 2> InstrList;
6608   DenseMap<unsigned, InstrList> TransposeEnds;
6609 
6610   // Transpose the EndPoints to a list of values that end at each index.
6611   for (auto &Interval : EndPoint)
6612     TransposeEnds[Interval.second].push_back(Interval.first);
6613 
6614   SmallSet<Instruction *, 8> OpenIntervals;
6615 
6616   // Get the size of the widest register.
6617   unsigned MaxSafeDepDist = -1U;
6618   if (Legal->getMaxSafeDepDistBytes() != -1U)
6619     MaxSafeDepDist = Legal->getMaxSafeDepDistBytes() * 8;
6620   unsigned WidestRegister =
6621       std::min(TTI.getRegisterBitWidth(true), MaxSafeDepDist);
6622   const DataLayout &DL = TheFunction->getParent()->getDataLayout();
6623 
6624   SmallVector<RegisterUsage, 8> RUs(VFs.size());
6625   SmallVector<unsigned, 8> MaxUsages(VFs.size(), 0);
6626 
6627   DEBUG(dbgs() << "LV(REG): Calculating max register usage:\n");
6628 
6629   // A lambda that gets the register usage for the given type and VF.
6630   auto GetRegUsage = [&DL, WidestRegister](Type *Ty, unsigned VF) {
6631     if (Ty->isTokenTy())
6632       return 0U;
6633     unsigned TypeSize = DL.getTypeSizeInBits(Ty->getScalarType());
6634     return std::max<unsigned>(1, VF * TypeSize / WidestRegister);
6635   };
6636 
6637   for (unsigned int i = 0; i < Index; ++i) {
6638     Instruction *I = IdxToInstr[i];
6639 
6640     // Remove all of the instructions that end at this location.
6641     InstrList &List = TransposeEnds[i];
6642     for (Instruction *ToRemove : List)
6643       OpenIntervals.erase(ToRemove);
6644 
6645     // Ignore instructions that are never used within the loop.
6646     if (!Ends.count(I))
6647       continue;
6648 
6649     // Skip ignored values.
6650     if (ValuesToIgnore.count(I))
6651       continue;
6652 
6653     // For each VF find the maximum usage of registers.
6654     for (unsigned j = 0, e = VFs.size(); j < e; ++j) {
6655       if (VFs[j] == 1) {
6656         MaxUsages[j] = std::max(MaxUsages[j], OpenIntervals.size());
6657         continue;
6658       }
6659       collectUniformsAndScalars(VFs[j]);
6660       // Count the number of live intervals.
6661       unsigned RegUsage = 0;
6662       for (auto Inst : OpenIntervals) {
6663         // Skip ignored values for VF > 1.
6664         if (VecValuesToIgnore.count(Inst) ||
6665             isScalarAfterVectorization(Inst, VFs[j]))
6666           continue;
6667         RegUsage += GetRegUsage(Inst->getType(), VFs[j]);
6668       }
6669       MaxUsages[j] = std::max(MaxUsages[j], RegUsage);
6670     }
6671 
6672     DEBUG(dbgs() << "LV(REG): At #" << i << " Interval # "
6673                  << OpenIntervals.size() << '\n');
6674 
6675     // Add the current instruction to the list of open intervals.
6676     OpenIntervals.insert(I);
6677   }
6678 
6679   for (unsigned i = 0, e = VFs.size(); i < e; ++i) {
6680     unsigned Invariant = 0;
6681     if (VFs[i] == 1)
6682       Invariant = LoopInvariants.size();
6683     else {
6684       for (auto Inst : LoopInvariants)
6685         Invariant += GetRegUsage(Inst->getType(), VFs[i]);
6686     }
6687 
6688     DEBUG(dbgs() << "LV(REG): VF = " << VFs[i] << '\n');
6689     DEBUG(dbgs() << "LV(REG): Found max usage: " << MaxUsages[i] << '\n');
6690     DEBUG(dbgs() << "LV(REG): Found invariant usage: " << Invariant << '\n');
6691     DEBUG(dbgs() << "LV(REG): LoopSize: " << RU.NumInstructions << '\n');
6692 
6693     RU.LoopInvariantRegs = Invariant;
6694     RU.MaxLocalUsers = MaxUsages[i];
6695     RUs[i] = RU;
6696   }
6697 
6698   return RUs;
6699 }
6700 
6701 void LoopVectorizationCostModel::collectInstsToScalarize(unsigned VF) {
6702 
6703   // If we aren't vectorizing the loop, or if we've already collected the
6704   // instructions to scalarize, there's nothing to do. Collection may already
6705   // have occurred if we have a user-selected VF and are now computing the
6706   // expected cost for interleaving.
6707   if (VF < 2 || InstsToScalarize.count(VF))
6708     return;
6709 
6710   // Initialize a mapping for VF in InstsToScalalarize. If we find that it's
6711   // not profitable to scalarize any instructions, the presence of VF in the
6712   // map will indicate that we've analyzed it already.
6713   ScalarCostsTy &ScalarCostsVF = InstsToScalarize[VF];
6714 
6715   // Find all the instructions that are scalar with predication in the loop and
6716   // determine if it would be better to not if-convert the blocks they are in.
6717   // If so, we also record the instructions to scalarize.
6718   for (BasicBlock *BB : TheLoop->blocks()) {
6719     if (!Legal->blockNeedsPredication(BB))
6720       continue;
6721     for (Instruction &I : *BB)
6722       if (Legal->isScalarWithPredication(&I)) {
6723         ScalarCostsTy ScalarCosts;
6724         if (computePredInstDiscount(&I, ScalarCosts, VF) >= 0)
6725           ScalarCostsVF.insert(ScalarCosts.begin(), ScalarCosts.end());
6726       }
6727   }
6728 }
6729 
6730 int LoopVectorizationCostModel::computePredInstDiscount(
6731     Instruction *PredInst, DenseMap<Instruction *, unsigned> &ScalarCosts,
6732     unsigned VF) {
6733 
6734   assert(!isUniformAfterVectorization(PredInst, VF) &&
6735          "Instruction marked uniform-after-vectorization will be predicated");
6736 
6737   // Initialize the discount to zero, meaning that the scalar version and the
6738   // vector version cost the same.
6739   int Discount = 0;
6740 
6741   // Holds instructions to analyze. The instructions we visit are mapped in
6742   // ScalarCosts. Those instructions are the ones that would be scalarized if
6743   // we find that the scalar version costs less.
6744   SmallVector<Instruction *, 8> Worklist;
6745 
6746   // Returns true if the given instruction can be scalarized.
6747   auto canBeScalarized = [&](Instruction *I) -> bool {
6748 
6749     // We only attempt to scalarize instructions forming a single-use chain
6750     // from the original predicated block that would otherwise be vectorized.
6751     // Although not strictly necessary, we give up on instructions we know will
6752     // already be scalar to avoid traversing chains that are unlikely to be
6753     // beneficial.
6754     if (!I->hasOneUse() || PredInst->getParent() != I->getParent() ||
6755         isScalarAfterVectorization(I, VF))
6756       return false;
6757 
6758     // If the instruction is scalar with predication, it will be analyzed
6759     // separately. We ignore it within the context of PredInst.
6760     if (Legal->isScalarWithPredication(I))
6761       return false;
6762 
6763     // If any of the instruction's operands are uniform after vectorization,
6764     // the instruction cannot be scalarized. This prevents, for example, a
6765     // masked load from being scalarized.
6766     //
6767     // We assume we will only emit a value for lane zero of an instruction
6768     // marked uniform after vectorization, rather than VF identical values.
6769     // Thus, if we scalarize an instruction that uses a uniform, we would
6770     // create uses of values corresponding to the lanes we aren't emitting code
6771     // for. This behavior can be changed by allowing getScalarValue to clone
6772     // the lane zero values for uniforms rather than asserting.
6773     for (Use &U : I->operands())
6774       if (auto *J = dyn_cast<Instruction>(U.get()))
6775         if (isUniformAfterVectorization(J, VF))
6776           return false;
6777 
6778     // Otherwise, we can scalarize the instruction.
6779     return true;
6780   };
6781 
6782   // Returns true if an operand that cannot be scalarized must be extracted
6783   // from a vector. We will account for this scalarization overhead below. Note
6784   // that the non-void predicated instructions are placed in their own blocks,
6785   // and their return values are inserted into vectors. Thus, an extract would
6786   // still be required.
6787   auto needsExtract = [&](Instruction *I) -> bool {
6788     return TheLoop->contains(I) && !isScalarAfterVectorization(I, VF);
6789   };
6790 
6791   // Compute the expected cost discount from scalarizing the entire expression
6792   // feeding the predicated instruction. We currently only consider expressions
6793   // that are single-use instruction chains.
6794   Worklist.push_back(PredInst);
6795   while (!Worklist.empty()) {
6796     Instruction *I = Worklist.pop_back_val();
6797 
6798     // If we've already analyzed the instruction, there's nothing to do.
6799     if (ScalarCosts.count(I))
6800       continue;
6801 
6802     // Compute the cost of the vector instruction. Note that this cost already
6803     // includes the scalarization overhead of the predicated instruction.
6804     unsigned VectorCost = getInstructionCost(I, VF).first;
6805 
6806     // Compute the cost of the scalarized instruction. This cost is the cost of
6807     // the instruction as if it wasn't if-converted and instead remained in the
6808     // predicated block. We will scale this cost by block probability after
6809     // computing the scalarization overhead.
6810     unsigned ScalarCost = VF * getInstructionCost(I, 1).first;
6811 
6812     // Compute the scalarization overhead of needed insertelement instructions
6813     // and phi nodes.
6814     if (Legal->isScalarWithPredication(I) && !I->getType()->isVoidTy()) {
6815       ScalarCost += TTI.getScalarizationOverhead(ToVectorTy(I->getType(), VF),
6816                                                  true, false);
6817       ScalarCost += VF * TTI.getCFInstrCost(Instruction::PHI);
6818     }
6819 
6820     // Compute the scalarization overhead of needed extractelement
6821     // instructions. For each of the instruction's operands, if the operand can
6822     // be scalarized, add it to the worklist; otherwise, account for the
6823     // overhead.
6824     for (Use &U : I->operands())
6825       if (auto *J = dyn_cast<Instruction>(U.get())) {
6826         assert(VectorType::isValidElementType(J->getType()) &&
6827                "Instruction has non-scalar type");
6828         if (canBeScalarized(J))
6829           Worklist.push_back(J);
6830         else if (needsExtract(J))
6831           ScalarCost += TTI.getScalarizationOverhead(
6832                               ToVectorTy(J->getType(),VF), false, true);
6833       }
6834 
6835     // Scale the total scalar cost by block probability.
6836     ScalarCost /= getReciprocalPredBlockProb();
6837 
6838     // Compute the discount. A non-negative discount means the vector version
6839     // of the instruction costs more, and scalarizing would be beneficial.
6840     Discount += VectorCost - ScalarCost;
6841     ScalarCosts[I] = ScalarCost;
6842   }
6843 
6844   return Discount;
6845 }
6846 
6847 LoopVectorizationCostModel::VectorizationCostTy
6848 LoopVectorizationCostModel::expectedCost(unsigned VF) {
6849   VectorizationCostTy Cost;
6850 
6851   // Collect Uniform and Scalar instructions after vectorization with VF.
6852   collectUniformsAndScalars(VF);
6853 
6854   // Collect the instructions (and their associated costs) that will be more
6855   // profitable to scalarize.
6856   collectInstsToScalarize(VF);
6857 
6858   // For each block.
6859   for (BasicBlock *BB : TheLoop->blocks()) {
6860     VectorizationCostTy BlockCost;
6861 
6862     // For each instruction in the old loop.
6863     for (Instruction &I : *BB) {
6864       // Skip dbg intrinsics.
6865       if (isa<DbgInfoIntrinsic>(I))
6866         continue;
6867 
6868       // Skip ignored values.
6869       if (ValuesToIgnore.count(&I))
6870         continue;
6871 
6872       VectorizationCostTy C = getInstructionCost(&I, VF);
6873 
6874       // Check if we should override the cost.
6875       if (ForceTargetInstructionCost.getNumOccurrences() > 0)
6876         C.first = ForceTargetInstructionCost;
6877 
6878       BlockCost.first += C.first;
6879       BlockCost.second |= C.second;
6880       DEBUG(dbgs() << "LV: Found an estimated cost of " << C.first << " for VF "
6881                    << VF << " For instruction: " << I << '\n');
6882     }
6883 
6884     // If we are vectorizing a predicated block, it will have been
6885     // if-converted. This means that the block's instructions (aside from
6886     // stores and instructions that may divide by zero) will now be
6887     // unconditionally executed. For the scalar case, we may not always execute
6888     // the predicated block. Thus, scale the block's cost by the probability of
6889     // executing it.
6890     if (VF == 1 && Legal->blockNeedsPredication(BB))
6891       BlockCost.first /= getReciprocalPredBlockProb();
6892 
6893     Cost.first += BlockCost.first;
6894     Cost.second |= BlockCost.second;
6895   }
6896 
6897   return Cost;
6898 }
6899 
6900 /// \brief Gets Address Access SCEV after verifying that the access pattern
6901 /// is loop invariant except the induction variable dependence.
6902 ///
6903 /// This SCEV can be sent to the Target in order to estimate the address
6904 /// calculation cost.
6905 static const SCEV *getAddressAccessSCEV(
6906               Value *Ptr,
6907               LoopVectorizationLegality *Legal,
6908               ScalarEvolution *SE,
6909               const Loop *TheLoop) {
6910   auto *Gep = dyn_cast<GetElementPtrInst>(Ptr);
6911   if (!Gep)
6912     return nullptr;
6913 
6914   // We are looking for a gep with all loop invariant indices except for one
6915   // which should be an induction variable.
6916   unsigned NumOperands = Gep->getNumOperands();
6917   for (unsigned i = 1; i < NumOperands; ++i) {
6918     Value *Opd = Gep->getOperand(i);
6919     if (!SE->isLoopInvariant(SE->getSCEV(Opd), TheLoop) &&
6920         !Legal->isInductionVariable(Opd))
6921       return nullptr;
6922   }
6923 
6924   // Now we know we have a GEP ptr, %inv, %ind, %inv. return the Ptr SCEV.
6925   return SE->getSCEV(Ptr);
6926 }
6927 
6928 static bool isStrideMul(Instruction *I, LoopVectorizationLegality *Legal) {
6929   return Legal->hasStride(I->getOperand(0)) ||
6930          Legal->hasStride(I->getOperand(1));
6931 }
6932 
6933 unsigned LoopVectorizationCostModel::getMemInstScalarizationCost(Instruction *I,
6934                                                                  unsigned VF) {
6935   Type *ValTy = getMemInstValueType(I);
6936   auto SE = PSE.getSE();
6937 
6938   unsigned Alignment = getMemInstAlignment(I);
6939   unsigned AS = getMemInstAddressSpace(I);
6940   Value *Ptr = getPointerOperand(I);
6941   Type *PtrTy = ToVectorTy(Ptr->getType(), VF);
6942 
6943   // Figure out whether the access is strided and get the stride value
6944   // if it's known in compile time
6945   const SCEV *PtrSCEV = getAddressAccessSCEV(Ptr, Legal, SE, TheLoop);
6946 
6947   // Get the cost of the scalar memory instruction and address computation.
6948   unsigned Cost = VF * TTI.getAddressComputationCost(PtrTy, SE, PtrSCEV);
6949 
6950   Cost += VF *
6951           TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), Alignment,
6952                               AS);
6953 
6954   // Get the overhead of the extractelement and insertelement instructions
6955   // we might create due to scalarization.
6956   Cost += getScalarizationOverhead(I, VF, TTI);
6957 
6958   // If we have a predicated store, it may not be executed for each vector
6959   // lane. Scale the cost by the probability of executing the predicated
6960   // block.
6961   if (Legal->isScalarWithPredication(I))
6962     Cost /= getReciprocalPredBlockProb();
6963 
6964   return Cost;
6965 }
6966 
6967 unsigned LoopVectorizationCostModel::getConsecutiveMemOpCost(Instruction *I,
6968                                                              unsigned VF) {
6969   Type *ValTy = getMemInstValueType(I);
6970   Type *VectorTy = ToVectorTy(ValTy, VF);
6971   unsigned Alignment = getMemInstAlignment(I);
6972   Value *Ptr = getPointerOperand(I);
6973   unsigned AS = getMemInstAddressSpace(I);
6974   int ConsecutiveStride = Legal->isConsecutivePtr(Ptr);
6975 
6976   assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) &&
6977          "Stride should be 1 or -1 for consecutive memory access");
6978   unsigned Cost = 0;
6979   if (Legal->isMaskRequired(I))
6980     Cost += TTI.getMaskedMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS);
6981   else
6982     Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS);
6983 
6984   bool Reverse = ConsecutiveStride < 0;
6985   if (Reverse)
6986     Cost += TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, 0);
6987   return Cost;
6988 }
6989 
6990 unsigned LoopVectorizationCostModel::getUniformMemOpCost(Instruction *I,
6991                                                          unsigned VF) {
6992   LoadInst *LI = cast<LoadInst>(I);
6993   Type *ValTy = LI->getType();
6994   Type *VectorTy = ToVectorTy(ValTy, VF);
6995   unsigned Alignment = LI->getAlignment();
6996   unsigned AS = LI->getPointerAddressSpace();
6997 
6998   return TTI.getAddressComputationCost(ValTy) +
6999          TTI.getMemoryOpCost(Instruction::Load, ValTy, Alignment, AS) +
7000          TTI.getShuffleCost(TargetTransformInfo::SK_Broadcast, VectorTy);
7001 }
7002 
7003 unsigned LoopVectorizationCostModel::getGatherScatterCost(Instruction *I,
7004                                                           unsigned VF) {
7005   Type *ValTy = getMemInstValueType(I);
7006   Type *VectorTy = ToVectorTy(ValTy, VF);
7007   unsigned Alignment = getMemInstAlignment(I);
7008   Value *Ptr = getPointerOperand(I);
7009 
7010   return TTI.getAddressComputationCost(VectorTy) +
7011          TTI.getGatherScatterOpCost(I->getOpcode(), VectorTy, Ptr,
7012                                     Legal->isMaskRequired(I), Alignment);
7013 }
7014 
7015 unsigned LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I,
7016                                                             unsigned VF) {
7017   Type *ValTy = getMemInstValueType(I);
7018   Type *VectorTy = ToVectorTy(ValTy, VF);
7019   unsigned AS = getMemInstAddressSpace(I);
7020 
7021   auto Group = Legal->getInterleavedAccessGroup(I);
7022   assert(Group && "Fail to get an interleaved access group.");
7023 
7024   unsigned InterleaveFactor = Group->getFactor();
7025   Type *WideVecTy = VectorType::get(ValTy, VF * InterleaveFactor);
7026 
7027   // Holds the indices of existing members in an interleaved load group.
7028   // An interleaved store group doesn't need this as it doesn't allow gaps.
7029   SmallVector<unsigned, 4> Indices;
7030   if (isa<LoadInst>(I)) {
7031     for (unsigned i = 0; i < InterleaveFactor; i++)
7032       if (Group->getMember(i))
7033         Indices.push_back(i);
7034   }
7035 
7036   // Calculate the cost of the whole interleaved group.
7037   unsigned Cost = TTI.getInterleavedMemoryOpCost(I->getOpcode(), WideVecTy,
7038                                                  Group->getFactor(), Indices,
7039                                                  Group->getAlignment(), AS);
7040 
7041   if (Group->isReverse())
7042     Cost += Group->getNumMembers() *
7043             TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, 0);
7044   return Cost;
7045 }
7046 
7047 unsigned LoopVectorizationCostModel::getMemoryInstructionCost(Instruction *I,
7048                                                               unsigned VF) {
7049 
7050   // Calculate scalar cost only. Vectorization cost should be ready at this
7051   // moment.
7052   if (VF == 1) {
7053     Type *ValTy = getMemInstValueType(I);
7054     unsigned Alignment = getMemInstAlignment(I);
7055     unsigned AS = getMemInstAlignment(I);
7056 
7057     return TTI.getAddressComputationCost(ValTy) +
7058            TTI.getMemoryOpCost(I->getOpcode(), ValTy, Alignment, AS);
7059   }
7060   return getWideningCost(I, VF);
7061 }
7062 
7063 LoopVectorizationCostModel::VectorizationCostTy
7064 LoopVectorizationCostModel::getInstructionCost(Instruction *I, unsigned VF) {
7065   // If we know that this instruction will remain uniform, check the cost of
7066   // the scalar version.
7067   if (isUniformAfterVectorization(I, VF))
7068     VF = 1;
7069 
7070   if (VF > 1 && isProfitableToScalarize(I, VF))
7071     return VectorizationCostTy(InstsToScalarize[VF][I], false);
7072 
7073   Type *VectorTy;
7074   unsigned C = getInstructionCost(I, VF, VectorTy);
7075 
7076   bool TypeNotScalarized =
7077       VF > 1 && !VectorTy->isVoidTy() && TTI.getNumberOfParts(VectorTy) < VF;
7078   return VectorizationCostTy(C, TypeNotScalarized);
7079 }
7080 
7081 void LoopVectorizationCostModel::setCostBasedWideningDecision(unsigned VF) {
7082   if (VF == 1)
7083     return;
7084   for (BasicBlock *BB : TheLoop->blocks()) {
7085     // For each instruction in the old loop.
7086     for (Instruction &I : *BB) {
7087       Value *Ptr = getPointerOperand(&I);
7088       if (!Ptr)
7089         continue;
7090 
7091       if (isa<LoadInst>(&I) && Legal->isUniform(Ptr)) {
7092         // Scalar load + broadcast
7093         unsigned Cost = getUniformMemOpCost(&I, VF);
7094         setWideningDecision(&I, VF, CM_Scalarize, Cost);
7095         continue;
7096       }
7097 
7098       // We assume that widening is the best solution when possible.
7099       if (Legal->memoryInstructionCanBeWidened(&I, VF)) {
7100         unsigned Cost = getConsecutiveMemOpCost(&I, VF);
7101         setWideningDecision(&I, VF, CM_Widen, Cost);
7102         continue;
7103       }
7104 
7105       // Choose between Interleaving, Gather/Scatter or Scalarization.
7106       unsigned InterleaveCost = UINT_MAX;
7107       unsigned NumAccesses = 1;
7108       if (Legal->isAccessInterleaved(&I)) {
7109         auto Group = Legal->getInterleavedAccessGroup(&I);
7110         assert(Group && "Fail to get an interleaved access group.");
7111 
7112         // Make one decision for the whole group.
7113         if (getWideningDecision(&I, VF) != CM_Unknown)
7114           continue;
7115 
7116         NumAccesses = Group->getNumMembers();
7117         InterleaveCost = getInterleaveGroupCost(&I, VF);
7118       }
7119 
7120       unsigned GatherScatterCost =
7121           Legal->isLegalGatherOrScatter(&I)
7122               ? getGatherScatterCost(&I, VF) * NumAccesses
7123               : UINT_MAX;
7124 
7125       unsigned ScalarizationCost =
7126           getMemInstScalarizationCost(&I, VF) * NumAccesses;
7127 
7128       // Choose better solution for the current VF,
7129       // write down this decision and use it during vectorization.
7130       unsigned Cost;
7131       InstWidening Decision;
7132       if (InterleaveCost <= GatherScatterCost &&
7133           InterleaveCost < ScalarizationCost) {
7134         Decision = CM_Interleave;
7135         Cost = InterleaveCost;
7136       } else if (GatherScatterCost < ScalarizationCost) {
7137         Decision = CM_GatherScatter;
7138         Cost = GatherScatterCost;
7139       } else {
7140         Decision = CM_Scalarize;
7141         Cost = ScalarizationCost;
7142       }
7143       // If the instructions belongs to an interleave group, the whole group
7144       // receives the same decision. The whole group receives the cost, but
7145       // the cost will actually be assigned to one instruction.
7146       if (auto Group = Legal->getInterleavedAccessGroup(&I))
7147         setWideningDecision(Group, VF, Decision, Cost);
7148       else
7149         setWideningDecision(&I, VF, Decision, Cost);
7150     }
7151   }
7152 }
7153 
7154 unsigned LoopVectorizationCostModel::getInstructionCost(Instruction *I,
7155                                                         unsigned VF,
7156                                                         Type *&VectorTy) {
7157   Type *RetTy = I->getType();
7158   if (canTruncateToMinimalBitwidth(I, VF))
7159     RetTy = IntegerType::get(RetTy->getContext(), MinBWs[I]);
7160   VectorTy = ToVectorTy(RetTy, VF);
7161   auto SE = PSE.getSE();
7162 
7163   // TODO: We need to estimate the cost of intrinsic calls.
7164   switch (I->getOpcode()) {
7165   case Instruction::GetElementPtr:
7166     // We mark this instruction as zero-cost because the cost of GEPs in
7167     // vectorized code depends on whether the corresponding memory instruction
7168     // is scalarized or not. Therefore, we handle GEPs with the memory
7169     // instruction cost.
7170     return 0;
7171   case Instruction::Br: {
7172     return TTI.getCFInstrCost(I->getOpcode());
7173   }
7174   case Instruction::PHI: {
7175     auto *Phi = cast<PHINode>(I);
7176 
7177     // First-order recurrences are replaced by vector shuffles inside the loop.
7178     if (VF > 1 && Legal->isFirstOrderRecurrence(Phi))
7179       return TTI.getShuffleCost(TargetTransformInfo::SK_ExtractSubvector,
7180                                 VectorTy, VF - 1, VectorTy);
7181 
7182     // TODO: IF-converted IFs become selects.
7183     return 0;
7184   }
7185   case Instruction::UDiv:
7186   case Instruction::SDiv:
7187   case Instruction::URem:
7188   case Instruction::SRem:
7189     // If we have a predicated instruction, it may not be executed for each
7190     // vector lane. Get the scalarization cost and scale this amount by the
7191     // probability of executing the predicated block. If the instruction is not
7192     // predicated, we fall through to the next case.
7193     if (VF > 1 && Legal->isScalarWithPredication(I)) {
7194       unsigned Cost = 0;
7195 
7196       // These instructions have a non-void type, so account for the phi nodes
7197       // that we will create. This cost is likely to be zero. The phi node
7198       // cost, if any, should be scaled by the block probability because it
7199       // models a copy at the end of each predicated block.
7200       Cost += VF * TTI.getCFInstrCost(Instruction::PHI);
7201 
7202       // The cost of the non-predicated instruction.
7203       Cost += VF * TTI.getArithmeticInstrCost(I->getOpcode(), RetTy);
7204 
7205       // The cost of insertelement and extractelement instructions needed for
7206       // scalarization.
7207       Cost += getScalarizationOverhead(I, VF, TTI);
7208 
7209       // Scale the cost by the probability of executing the predicated blocks.
7210       // This assumes the predicated block for each vector lane is equally
7211       // likely.
7212       return Cost / getReciprocalPredBlockProb();
7213     }
7214   case Instruction::Add:
7215   case Instruction::FAdd:
7216   case Instruction::Sub:
7217   case Instruction::FSub:
7218   case Instruction::Mul:
7219   case Instruction::FMul:
7220   case Instruction::FDiv:
7221   case Instruction::FRem:
7222   case Instruction::Shl:
7223   case Instruction::LShr:
7224   case Instruction::AShr:
7225   case Instruction::And:
7226   case Instruction::Or:
7227   case Instruction::Xor: {
7228     // Since we will replace the stride by 1 the multiplication should go away.
7229     if (I->getOpcode() == Instruction::Mul && isStrideMul(I, Legal))
7230       return 0;
7231     // Certain instructions can be cheaper to vectorize if they have a constant
7232     // second vector operand. One example of this are shifts on x86.
7233     TargetTransformInfo::OperandValueKind Op1VK =
7234         TargetTransformInfo::OK_AnyValue;
7235     TargetTransformInfo::OperandValueKind Op2VK =
7236         TargetTransformInfo::OK_AnyValue;
7237     TargetTransformInfo::OperandValueProperties Op1VP =
7238         TargetTransformInfo::OP_None;
7239     TargetTransformInfo::OperandValueProperties Op2VP =
7240         TargetTransformInfo::OP_None;
7241     Value *Op2 = I->getOperand(1);
7242 
7243     // Check for a splat or for a non uniform vector of constants.
7244     if (isa<ConstantInt>(Op2)) {
7245       ConstantInt *CInt = cast<ConstantInt>(Op2);
7246       if (CInt && CInt->getValue().isPowerOf2())
7247         Op2VP = TargetTransformInfo::OP_PowerOf2;
7248       Op2VK = TargetTransformInfo::OK_UniformConstantValue;
7249     } else if (isa<ConstantVector>(Op2) || isa<ConstantDataVector>(Op2)) {
7250       Op2VK = TargetTransformInfo::OK_NonUniformConstantValue;
7251       Constant *SplatValue = cast<Constant>(Op2)->getSplatValue();
7252       if (SplatValue) {
7253         ConstantInt *CInt = dyn_cast<ConstantInt>(SplatValue);
7254         if (CInt && CInt->getValue().isPowerOf2())
7255           Op2VP = TargetTransformInfo::OP_PowerOf2;
7256         Op2VK = TargetTransformInfo::OK_UniformConstantValue;
7257       }
7258     } else if (Legal->isUniform(Op2)) {
7259       Op2VK = TargetTransformInfo::OK_UniformValue;
7260     }
7261     SmallVector<const Value *, 4> Operands(I->operand_values());
7262     return TTI.getArithmeticInstrCost(I->getOpcode(), VectorTy, Op1VK,
7263                                       Op2VK, Op1VP, Op2VP, Operands);
7264   }
7265   case Instruction::Select: {
7266     SelectInst *SI = cast<SelectInst>(I);
7267     const SCEV *CondSCEV = SE->getSCEV(SI->getCondition());
7268     bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop));
7269     Type *CondTy = SI->getCondition()->getType();
7270     if (!ScalarCond)
7271       CondTy = VectorType::get(CondTy, VF);
7272 
7273     return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, CondTy);
7274   }
7275   case Instruction::ICmp:
7276   case Instruction::FCmp: {
7277     Type *ValTy = I->getOperand(0)->getType();
7278     Instruction *Op0AsInstruction = dyn_cast<Instruction>(I->getOperand(0));
7279     if (canTruncateToMinimalBitwidth(Op0AsInstruction, VF))
7280       ValTy = IntegerType::get(ValTy->getContext(), MinBWs[Op0AsInstruction]);
7281     VectorTy = ToVectorTy(ValTy, VF);
7282     return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy);
7283   }
7284   case Instruction::Store:
7285   case Instruction::Load: {
7286     VectorTy = ToVectorTy(getMemInstValueType(I), VF);
7287     return getMemoryInstructionCost(I, VF);
7288   }
7289   case Instruction::ZExt:
7290   case Instruction::SExt:
7291   case Instruction::FPToUI:
7292   case Instruction::FPToSI:
7293   case Instruction::FPExt:
7294   case Instruction::PtrToInt:
7295   case Instruction::IntToPtr:
7296   case Instruction::SIToFP:
7297   case Instruction::UIToFP:
7298   case Instruction::Trunc:
7299   case Instruction::FPTrunc:
7300   case Instruction::BitCast: {
7301     // We optimize the truncation of induction variables having constant
7302     // integer steps. The cost of these truncations is the same as the scalar
7303     // operation.
7304     if (isOptimizableIVTruncate(I, VF)) {
7305       auto *Trunc = cast<TruncInst>(I);
7306       return TTI.getCastInstrCost(Instruction::Trunc, Trunc->getDestTy(),
7307                                   Trunc->getSrcTy());
7308     }
7309 
7310     Type *SrcScalarTy = I->getOperand(0)->getType();
7311     Type *SrcVecTy = ToVectorTy(SrcScalarTy, VF);
7312     if (canTruncateToMinimalBitwidth(I, VF)) {
7313       // This cast is going to be shrunk. This may remove the cast or it might
7314       // turn it into slightly different cast. For example, if MinBW == 16,
7315       // "zext i8 %1 to i32" becomes "zext i8 %1 to i16".
7316       //
7317       // Calculate the modified src and dest types.
7318       Type *MinVecTy = VectorTy;
7319       if (I->getOpcode() == Instruction::Trunc) {
7320         SrcVecTy = smallestIntegerVectorType(SrcVecTy, MinVecTy);
7321         VectorTy =
7322             largestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy);
7323       } else if (I->getOpcode() == Instruction::ZExt ||
7324                  I->getOpcode() == Instruction::SExt) {
7325         SrcVecTy = largestIntegerVectorType(SrcVecTy, MinVecTy);
7326         VectorTy =
7327             smallestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy);
7328       }
7329     }
7330 
7331     return TTI.getCastInstrCost(I->getOpcode(), VectorTy, SrcVecTy);
7332   }
7333   case Instruction::Call: {
7334     bool NeedToScalarize;
7335     CallInst *CI = cast<CallInst>(I);
7336     unsigned CallCost = getVectorCallCost(CI, VF, TTI, TLI, NeedToScalarize);
7337     if (getVectorIntrinsicIDForCall(CI, TLI))
7338       return std::min(CallCost, getVectorIntrinsicCost(CI, VF, TTI, TLI));
7339     return CallCost;
7340   }
7341   default:
7342     // The cost of executing VF copies of the scalar instruction. This opcode
7343     // is unknown. Assume that it is the same as 'mul'.
7344     return VF * TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy) +
7345            getScalarizationOverhead(I, VF, TTI);
7346   } // end of switch.
7347 }
7348 
7349 char LoopVectorize::ID = 0;
7350 static const char lv_name[] = "Loop Vectorization";
7351 INITIALIZE_PASS_BEGIN(LoopVectorize, LV_NAME, lv_name, false, false)
7352 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
7353 INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass)
7354 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
7355 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
7356 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
7357 INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass)
7358 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
7359 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
7360 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
7361 INITIALIZE_PASS_DEPENDENCY(LoopAccessLegacyAnalysis)
7362 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass)
7363 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass)
7364 INITIALIZE_PASS_END(LoopVectorize, LV_NAME, lv_name, false, false)
7365 
7366 namespace llvm {
7367 Pass *createLoopVectorizePass(bool NoUnrolling, bool AlwaysVectorize) {
7368   return new LoopVectorize(NoUnrolling, AlwaysVectorize);
7369 }
7370 }
7371 
7372 bool LoopVectorizationCostModel::isConsecutiveLoadOrStore(Instruction *Inst) {
7373 
7374   // Check if the pointer operand of a load or store instruction is
7375   // consecutive.
7376   if (auto *Ptr = getPointerOperand(Inst))
7377     return Legal->isConsecutivePtr(Ptr);
7378   return false;
7379 }
7380 
7381 void LoopVectorizationCostModel::collectValuesToIgnore() {
7382   // Ignore ephemeral values.
7383   CodeMetrics::collectEphemeralValues(TheLoop, AC, ValuesToIgnore);
7384 
7385   // Ignore type-promoting instructions we identified during reduction
7386   // detection.
7387   for (auto &Reduction : *Legal->getReductionVars()) {
7388     RecurrenceDescriptor &RedDes = Reduction.second;
7389     SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts();
7390     VecValuesToIgnore.insert(Casts.begin(), Casts.end());
7391   }
7392 }
7393 
7394 LoopVectorizationCostModel::VectorizationFactor
7395 LoopVectorizationPlanner::plan(bool OptForSize, unsigned UserVF) {
7396 
7397   // Width 1 means no vectorize, cost 0 means uncomputed cost.
7398   const LoopVectorizationCostModel::VectorizationFactor NoVectorization = {1U,
7399                                                                            0U};
7400   Optional<unsigned> MaybeMaxVF = CM.computeMaxVF(OptForSize);
7401   if (!MaybeMaxVF.hasValue()) // Cases considered too costly to vectorize.
7402     return NoVectorization;
7403 
7404   if (UserVF) {
7405     DEBUG(dbgs() << "LV: Using user VF " << UserVF << ".\n");
7406     assert(isPowerOf2_32(UserVF) && "VF needs to be a power of two");
7407     // Collect the instructions (and their associated costs) that will be more
7408     // profitable to scalarize.
7409     CM.selectUserVectorizationFactor(UserVF);
7410     return {UserVF, 0};
7411   }
7412 
7413   unsigned MaxVF = MaybeMaxVF.getValue();
7414   assert(MaxVF != 0 && "MaxVF is zero.");
7415   if (MaxVF == 1)
7416     return NoVectorization;
7417 
7418   // Select the optimal vectorization factor.
7419   return CM.selectVectorizationFactor(MaxVF);
7420 }
7421 
7422 void InnerLoopUnroller::vectorizeMemoryInstruction(Instruction *Instr) {
7423   auto *SI = dyn_cast<StoreInst>(Instr);
7424   bool IfPredicateInstr = (SI && Legal->blockNeedsPredication(SI->getParent()));
7425 
7426   return scalarizeInstruction(Instr, IfPredicateInstr);
7427 }
7428 
7429 Value *InnerLoopUnroller::reverseVector(Value *Vec) { return Vec; }
7430 
7431 Value *InnerLoopUnroller::getBroadcastInstrs(Value *V) { return V; }
7432 
7433 Value *InnerLoopUnroller::getStepVector(Value *Val, int StartIdx, Value *Step,
7434                                         Instruction::BinaryOps BinOp) {
7435   // When unrolling and the VF is 1, we only need to add a simple scalar.
7436   Type *Ty = Val->getType();
7437   assert(!Ty->isVectorTy() && "Val must be a scalar");
7438 
7439   if (Ty->isFloatingPointTy()) {
7440     Constant *C = ConstantFP::get(Ty, (double)StartIdx);
7441 
7442     // Floating point operations had to be 'fast' to enable the unrolling.
7443     Value *MulOp = addFastMathFlag(Builder.CreateFMul(C, Step));
7444     return addFastMathFlag(Builder.CreateBinOp(BinOp, Val, MulOp));
7445   }
7446   Constant *C = ConstantInt::get(Ty, StartIdx);
7447   return Builder.CreateAdd(Val, Builder.CreateMul(C, Step), "induction");
7448 }
7449 
7450 static void AddRuntimeUnrollDisableMetaData(Loop *L) {
7451   SmallVector<Metadata *, 4> MDs;
7452   // Reserve first location for self reference to the LoopID metadata node.
7453   MDs.push_back(nullptr);
7454   bool IsUnrollMetadata = false;
7455   MDNode *LoopID = L->getLoopID();
7456   if (LoopID) {
7457     // First find existing loop unrolling disable metadata.
7458     for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) {
7459       auto *MD = dyn_cast<MDNode>(LoopID->getOperand(i));
7460       if (MD) {
7461         const auto *S = dyn_cast<MDString>(MD->getOperand(0));
7462         IsUnrollMetadata =
7463             S && S->getString().startswith("llvm.loop.unroll.disable");
7464       }
7465       MDs.push_back(LoopID->getOperand(i));
7466     }
7467   }
7468 
7469   if (!IsUnrollMetadata) {
7470     // Add runtime unroll disable metadata.
7471     LLVMContext &Context = L->getHeader()->getContext();
7472     SmallVector<Metadata *, 1> DisableOperands;
7473     DisableOperands.push_back(
7474         MDString::get(Context, "llvm.loop.unroll.runtime.disable"));
7475     MDNode *DisableNode = MDNode::get(Context, DisableOperands);
7476     MDs.push_back(DisableNode);
7477     MDNode *NewLoopID = MDNode::get(Context, MDs);
7478     // Set operand 0 to refer to the loop id itself.
7479     NewLoopID->replaceOperandWith(0, NewLoopID);
7480     L->setLoopID(NewLoopID);
7481   }
7482 }
7483 
7484 bool LoopVectorizePass::processLoop(Loop *L) {
7485   assert(L->empty() && "Only process inner loops.");
7486 
7487 #ifndef NDEBUG
7488   const std::string DebugLocStr = getDebugLocString(L);
7489 #endif /* NDEBUG */
7490 
7491   DEBUG(dbgs() << "\nLV: Checking a loop in \""
7492                << L->getHeader()->getParent()->getName() << "\" from "
7493                << DebugLocStr << "\n");
7494 
7495   LoopVectorizeHints Hints(L, DisableUnrolling, *ORE);
7496 
7497   DEBUG(dbgs() << "LV: Loop hints:"
7498                << " force="
7499                << (Hints.getForce() == LoopVectorizeHints::FK_Disabled
7500                        ? "disabled"
7501                        : (Hints.getForce() == LoopVectorizeHints::FK_Enabled
7502                               ? "enabled"
7503                               : "?"))
7504                << " width=" << Hints.getWidth()
7505                << " unroll=" << Hints.getInterleave() << "\n");
7506 
7507   // Function containing loop
7508   Function *F = L->getHeader()->getParent();
7509 
7510   // Looking at the diagnostic output is the only way to determine if a loop
7511   // was vectorized (other than looking at the IR or machine code), so it
7512   // is important to generate an optimization remark for each loop. Most of
7513   // these messages are generated as OptimizationRemarkAnalysis. Remarks
7514   // generated as OptimizationRemark and OptimizationRemarkMissed are
7515   // less verbose reporting vectorized loops and unvectorized loops that may
7516   // benefit from vectorization, respectively.
7517 
7518   if (!Hints.allowVectorization(F, L, AlwaysVectorize)) {
7519     DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n");
7520     return false;
7521   }
7522 
7523   // Check the loop for a trip count threshold:
7524   // do not vectorize loops with a tiny trip count.
7525   const unsigned MaxTC = SE->getSmallConstantMaxTripCount(L);
7526   if (MaxTC > 0u && MaxTC < TinyTripCountVectorThreshold) {
7527     DEBUG(dbgs() << "LV: Found a loop with a very small trip count. "
7528                  << "This loop is not worth vectorizing.");
7529     if (Hints.getForce() == LoopVectorizeHints::FK_Enabled)
7530       DEBUG(dbgs() << " But vectorizing was explicitly forced.\n");
7531     else {
7532       DEBUG(dbgs() << "\n");
7533       ORE->emit(createMissedAnalysis(Hints.vectorizeAnalysisPassName(),
7534                                      "NotBeneficial", L)
7535                 << "vectorization is not beneficial "
7536                    "and is not explicitly forced");
7537       return false;
7538     }
7539   }
7540 
7541   PredicatedScalarEvolution PSE(*SE, *L);
7542 
7543   // Check if it is legal to vectorize the loop.
7544   LoopVectorizationRequirements Requirements(*ORE);
7545   LoopVectorizationLegality LVL(L, PSE, DT, TLI, AA, F, TTI, GetLAA, LI, ORE,
7546                                 &Requirements, &Hints);
7547   if (!LVL.canVectorize()) {
7548     DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n");
7549     emitMissedWarning(F, L, Hints, ORE);
7550     return false;
7551   }
7552 
7553   // Check the function attributes to find out if this function should be
7554   // optimized for size.
7555   bool OptForSize =
7556       Hints.getForce() != LoopVectorizeHints::FK_Enabled && F->optForSize();
7557 
7558   // Compute the weighted frequency of this loop being executed and see if it
7559   // is less than 20% of the function entry baseline frequency. Note that we
7560   // always have a canonical loop here because we think we *can* vectorize.
7561   // FIXME: This is hidden behind a flag due to pervasive problems with
7562   // exactly what block frequency models.
7563   if (LoopVectorizeWithBlockFrequency) {
7564     BlockFrequency LoopEntryFreq = BFI->getBlockFreq(L->getLoopPreheader());
7565     if (Hints.getForce() != LoopVectorizeHints::FK_Enabled &&
7566         LoopEntryFreq < ColdEntryFreq)
7567       OptForSize = true;
7568   }
7569 
7570   // Check the function attributes to see if implicit floats are allowed.
7571   // FIXME: This check doesn't seem possibly correct -- what if the loop is
7572   // an integer loop and the vector instructions selected are purely integer
7573   // vector instructions?
7574   if (F->hasFnAttribute(Attribute::NoImplicitFloat)) {
7575     DEBUG(dbgs() << "LV: Can't vectorize when the NoImplicitFloat"
7576                     "attribute is used.\n");
7577     ORE->emit(createMissedAnalysis(Hints.vectorizeAnalysisPassName(),
7578                                    "NoImplicitFloat", L)
7579               << "loop not vectorized due to NoImplicitFloat attribute");
7580     emitMissedWarning(F, L, Hints, ORE);
7581     return false;
7582   }
7583 
7584   // Check if the target supports potentially unsafe FP vectorization.
7585   // FIXME: Add a check for the type of safety issue (denormal, signaling)
7586   // for the target we're vectorizing for, to make sure none of the
7587   // additional fp-math flags can help.
7588   if (Hints.isPotentiallyUnsafe() &&
7589       TTI->isFPVectorizationPotentiallyUnsafe()) {
7590     DEBUG(dbgs() << "LV: Potentially unsafe FP op prevents vectorization.\n");
7591     ORE->emit(
7592         createMissedAnalysis(Hints.vectorizeAnalysisPassName(), "UnsafeFP", L)
7593         << "loop not vectorized due to unsafe FP support.");
7594     emitMissedWarning(F, L, Hints, ORE);
7595     return false;
7596   }
7597 
7598   // Use the cost model.
7599   LoopVectorizationCostModel CM(L, PSE, LI, &LVL, *TTI, TLI, DB, AC, ORE, F,
7600                                 &Hints);
7601   CM.collectValuesToIgnore();
7602 
7603   // Use the planner for vectorization.
7604   LoopVectorizationPlanner LVP(CM);
7605 
7606   // Get user vectorization factor.
7607   unsigned UserVF = Hints.getWidth();
7608 
7609   // Plan how to best vectorize, return the best VF and its cost.
7610   LoopVectorizationCostModel::VectorizationFactor VF =
7611       LVP.plan(OptForSize, UserVF);
7612 
7613   // Select the interleave count.
7614   unsigned IC = CM.selectInterleaveCount(OptForSize, VF.Width, VF.Cost);
7615 
7616   // Get user interleave count.
7617   unsigned UserIC = Hints.getInterleave();
7618 
7619   // Identify the diagnostic messages that should be produced.
7620   std::pair<StringRef, std::string> VecDiagMsg, IntDiagMsg;
7621   bool VectorizeLoop = true, InterleaveLoop = true;
7622   if (Requirements.doesNotMeet(F, L, Hints)) {
7623     DEBUG(dbgs() << "LV: Not vectorizing: loop did not meet vectorization "
7624                     "requirements.\n");
7625     emitMissedWarning(F, L, Hints, ORE);
7626     return false;
7627   }
7628 
7629   if (VF.Width == 1) {
7630     DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n");
7631     VecDiagMsg = std::make_pair(
7632         "VectorizationNotBeneficial",
7633         "the cost-model indicates that vectorization is not beneficial");
7634     VectorizeLoop = false;
7635   }
7636 
7637   if (IC == 1 && UserIC <= 1) {
7638     // Tell the user interleaving is not beneficial.
7639     DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n");
7640     IntDiagMsg = std::make_pair(
7641         "InterleavingNotBeneficial",
7642         "the cost-model indicates that interleaving is not beneficial");
7643     InterleaveLoop = false;
7644     if (UserIC == 1) {
7645       IntDiagMsg.first = "InterleavingNotBeneficialAndDisabled";
7646       IntDiagMsg.second +=
7647           " and is explicitly disabled or interleave count is set to 1";
7648     }
7649   } else if (IC > 1 && UserIC == 1) {
7650     // Tell the user interleaving is beneficial, but it explicitly disabled.
7651     DEBUG(dbgs()
7652           << "LV: Interleaving is beneficial but is explicitly disabled.");
7653     IntDiagMsg = std::make_pair(
7654         "InterleavingBeneficialButDisabled",
7655         "the cost-model indicates that interleaving is beneficial "
7656         "but is explicitly disabled or interleave count is set to 1");
7657     InterleaveLoop = false;
7658   }
7659 
7660   // Override IC if user provided an interleave count.
7661   IC = UserIC > 0 ? UserIC : IC;
7662 
7663   // Emit diagnostic messages, if any.
7664   const char *VAPassName = Hints.vectorizeAnalysisPassName();
7665   if (!VectorizeLoop && !InterleaveLoop) {
7666     // Do not vectorize or interleaving the loop.
7667     ORE->emit(OptimizationRemarkMissed(VAPassName, VecDiagMsg.first,
7668                                          L->getStartLoc(), L->getHeader())
7669               << VecDiagMsg.second);
7670     ORE->emit(OptimizationRemarkMissed(LV_NAME, IntDiagMsg.first,
7671                                          L->getStartLoc(), L->getHeader())
7672               << IntDiagMsg.second);
7673     return false;
7674   } else if (!VectorizeLoop && InterleaveLoop) {
7675     DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n');
7676     ORE->emit(OptimizationRemarkAnalysis(VAPassName, VecDiagMsg.first,
7677                                          L->getStartLoc(), L->getHeader())
7678               << VecDiagMsg.second);
7679   } else if (VectorizeLoop && !InterleaveLoop) {
7680     DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width << ") in "
7681                  << DebugLocStr << '\n');
7682     ORE->emit(OptimizationRemarkAnalysis(LV_NAME, IntDiagMsg.first,
7683                                          L->getStartLoc(), L->getHeader())
7684               << IntDiagMsg.second);
7685   } else if (VectorizeLoop && InterleaveLoop) {
7686     DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width << ") in "
7687                  << DebugLocStr << '\n');
7688     DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n');
7689   }
7690 
7691   using namespace ore;
7692   if (!VectorizeLoop) {
7693     assert(IC > 1 && "interleave count should not be 1 or 0");
7694     // If we decided that it is not legal to vectorize the loop, then
7695     // interleave it.
7696     InnerLoopUnroller Unroller(L, PSE, LI, DT, TLI, TTI, AC, ORE, IC, &LVL,
7697                                &CM);
7698     Unroller.vectorize();
7699 
7700     ORE->emit(OptimizationRemark(LV_NAME, "Interleaved", L->getStartLoc(),
7701                                  L->getHeader())
7702               << "interleaved loop (interleaved count: "
7703               << NV("InterleaveCount", IC) << ")");
7704   } else {
7705     // If we decided that it is *legal* to vectorize the loop, then do it.
7706     InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, IC,
7707                            &LVL, &CM);
7708     LB.vectorize();
7709     ++LoopsVectorized;
7710 
7711     // Add metadata to disable runtime unrolling a scalar loop when there are
7712     // no runtime checks about strides and memory. A scalar loop that is
7713     // rarely used is not worth unrolling.
7714     if (!LB.areSafetyChecksAdded())
7715       AddRuntimeUnrollDisableMetaData(L);
7716 
7717     // Report the vectorization decision.
7718     ORE->emit(OptimizationRemark(LV_NAME, "Vectorized", L->getStartLoc(),
7719                                  L->getHeader())
7720               << "vectorized loop (vectorization width: "
7721               << NV("VectorizationFactor", VF.Width)
7722               << ", interleaved count: " << NV("InterleaveCount", IC) << ")");
7723   }
7724 
7725   // Mark the loop as already vectorized to avoid vectorizing again.
7726   Hints.setAlreadyVectorized();
7727 
7728   DEBUG(verifyFunction(*L->getHeader()->getParent()));
7729   return true;
7730 }
7731 
7732 bool LoopVectorizePass::runImpl(
7733     Function &F, ScalarEvolution &SE_, LoopInfo &LI_, TargetTransformInfo &TTI_,
7734     DominatorTree &DT_, BlockFrequencyInfo &BFI_, TargetLibraryInfo *TLI_,
7735     DemandedBits &DB_, AliasAnalysis &AA_, AssumptionCache &AC_,
7736     std::function<const LoopAccessInfo &(Loop &)> &GetLAA_,
7737     OptimizationRemarkEmitter &ORE_) {
7738 
7739   SE = &SE_;
7740   LI = &LI_;
7741   TTI = &TTI_;
7742   DT = &DT_;
7743   BFI = &BFI_;
7744   TLI = TLI_;
7745   AA = &AA_;
7746   AC = &AC_;
7747   GetLAA = &GetLAA_;
7748   DB = &DB_;
7749   ORE = &ORE_;
7750 
7751   // Compute some weights outside of the loop over the loops. Compute this
7752   // using a BranchProbability to re-use its scaling math.
7753   const BranchProbability ColdProb(1, 5); // 20%
7754   ColdEntryFreq = BlockFrequency(BFI->getEntryFreq()) * ColdProb;
7755 
7756   // Don't attempt if
7757   // 1. the target claims to have no vector registers, and
7758   // 2. interleaving won't help ILP.
7759   //
7760   // The second condition is necessary because, even if the target has no
7761   // vector registers, loop vectorization may still enable scalar
7762   // interleaving.
7763   if (!TTI->getNumberOfRegisters(true) && TTI->getMaxInterleaveFactor(1) < 2)
7764     return false;
7765 
7766   bool Changed = false;
7767 
7768   // The vectorizer requires loops to be in simplified form.
7769   // Since simplification may add new inner loops, it has to run before the
7770   // legality and profitability checks. This means running the loop vectorizer
7771   // will simplify all loops, regardless of whether anything end up being
7772   // vectorized.
7773   for (auto &L : *LI)
7774     Changed |= simplifyLoop(L, DT, LI, SE, AC, false /* PreserveLCSSA */);
7775 
7776   // Build up a worklist of inner-loops to vectorize. This is necessary as
7777   // the act of vectorizing or partially unrolling a loop creates new loops
7778   // and can invalidate iterators across the loops.
7779   SmallVector<Loop *, 8> Worklist;
7780 
7781   for (Loop *L : *LI)
7782     addAcyclicInnerLoop(*L, Worklist);
7783 
7784   LoopsAnalyzed += Worklist.size();
7785 
7786   // Now walk the identified inner loops.
7787   while (!Worklist.empty()) {
7788     Loop *L = Worklist.pop_back_val();
7789 
7790     // For the inner loops we actually process, form LCSSA to simplify the
7791     // transform.
7792     Changed |= formLCSSARecursively(*L, *DT, LI, SE);
7793 
7794     Changed |= processLoop(L);
7795   }
7796 
7797   // Process each loop nest in the function.
7798   return Changed;
7799 
7800 }
7801 
7802 
7803 PreservedAnalyses LoopVectorizePass::run(Function &F,
7804                                          FunctionAnalysisManager &AM) {
7805     auto &SE = AM.getResult<ScalarEvolutionAnalysis>(F);
7806     auto &LI = AM.getResult<LoopAnalysis>(F);
7807     auto &TTI = AM.getResult<TargetIRAnalysis>(F);
7808     auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
7809     auto &BFI = AM.getResult<BlockFrequencyAnalysis>(F);
7810     auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
7811     auto &AA = AM.getResult<AAManager>(F);
7812     auto &AC = AM.getResult<AssumptionAnalysis>(F);
7813     auto &DB = AM.getResult<DemandedBitsAnalysis>(F);
7814     auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F);
7815 
7816     auto &LAM = AM.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager();
7817     std::function<const LoopAccessInfo &(Loop &)> GetLAA =
7818         [&](Loop &L) -> const LoopAccessInfo & {
7819       LoopStandardAnalysisResults AR = {AA, AC, DT, LI, SE, TLI, TTI};
7820       return LAM.getResult<LoopAccessAnalysis>(L, AR);
7821     };
7822     bool Changed =
7823         runImpl(F, SE, LI, TTI, DT, BFI, &TLI, DB, AA, AC, GetLAA, ORE);
7824     if (!Changed)
7825       return PreservedAnalyses::all();
7826     PreservedAnalyses PA;
7827     PA.preserve<LoopAnalysis>();
7828     PA.preserve<DominatorTreeAnalysis>();
7829     PA.preserve<BasicAA>();
7830     PA.preserve<GlobalsAA>();
7831     return PA;
7832 }
7833