1 //===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops
11 // and generates target-independent LLVM-IR.
12 // The vectorizer uses the TargetTransformInfo analysis to estimate the costs
13 // of instructions in order to estimate the profitability of vectorization.
14 //
15 // The loop vectorizer combines consecutive loop iterations into a single
16 // 'wide' iteration. After this transformation the index is incremented
17 // by the SIMD vector width, and not by one.
18 //
19 // This pass has three parts:
20 // 1. The main loop pass that drives the different parts.
21 // 2. LoopVectorizationLegality - A unit that checks for the legality
22 //    of the vectorization.
23 // 3. InnerLoopVectorizer - A unit that performs the actual
24 //    widening of instructions.
25 // 4. LoopVectorizationCostModel - A unit that checks for the profitability
26 //    of vectorization. It decides on the optimal vector width, which
27 //    can be one, if vectorization is not profitable.
28 //
29 //===----------------------------------------------------------------------===//
30 //
31 // The reduction-variable vectorization is based on the paper:
32 //  D. Nuzman and R. Henderson. Multi-platform Auto-vectorization.
33 //
34 // Variable uniformity checks are inspired by:
35 //  Karrenberg, R. and Hack, S. Whole Function Vectorization.
36 //
37 // The interleaved access vectorization is based on the paper:
38 //  Dorit Nuzman, Ira Rosen and Ayal Zaks.  Auto-Vectorization of Interleaved
39 //  Data for SIMD
40 //
41 // Other ideas/concepts are from:
42 //  A. Zaks and D. Nuzman. Autovectorization in GCC-two years later.
43 //
44 //  S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua.  An Evaluation of
45 //  Vectorizing Compilers.
46 //
47 //===----------------------------------------------------------------------===//
48 
49 #include "llvm/Transforms/Vectorize/LoopVectorize.h"
50 #include "VPlan.h"
51 #include "llvm/ADT/DenseMap.h"
52 #include "llvm/ADT/Hashing.h"
53 #include "llvm/ADT/MapVector.h"
54 #include "llvm/ADT/Optional.h"
55 #include "llvm/ADT/SCCIterator.h"
56 #include "llvm/ADT/SetVector.h"
57 #include "llvm/ADT/SmallPtrSet.h"
58 #include "llvm/ADT/SmallSet.h"
59 #include "llvm/ADT/SmallVector.h"
60 #include "llvm/ADT/Statistic.h"
61 #include "llvm/ADT/StringExtras.h"
62 #include "llvm/Analysis/CodeMetrics.h"
63 #include "llvm/Analysis/GlobalsModRef.h"
64 #include "llvm/Analysis/LoopInfo.h"
65 #include "llvm/Analysis/LoopIterator.h"
66 #include "llvm/Analysis/LoopPass.h"
67 #include "llvm/Analysis/ScalarEvolutionExpander.h"
68 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
69 #include "llvm/Analysis/ValueTracking.h"
70 #include "llvm/Analysis/VectorUtils.h"
71 #include "llvm/IR/Constants.h"
72 #include "llvm/IR/DataLayout.h"
73 #include "llvm/IR/DebugInfo.h"
74 #include "llvm/IR/DerivedTypes.h"
75 #include "llvm/IR/DiagnosticInfo.h"
76 #include "llvm/IR/Dominators.h"
77 #include "llvm/IR/Function.h"
78 #include "llvm/IR/IRBuilder.h"
79 #include "llvm/IR/Instructions.h"
80 #include "llvm/IR/IntrinsicInst.h"
81 #include "llvm/IR/LLVMContext.h"
82 #include "llvm/IR/Module.h"
83 #include "llvm/IR/PatternMatch.h"
84 #include "llvm/IR/Type.h"
85 #include "llvm/IR/User.h"
86 #include "llvm/IR/Value.h"
87 #include "llvm/IR/ValueHandle.h"
88 #include "llvm/IR/Verifier.h"
89 #include "llvm/Pass.h"
90 #include "llvm/Support/BranchProbability.h"
91 #include "llvm/Support/CommandLine.h"
92 #include "llvm/Support/Debug.h"
93 #include "llvm/Support/raw_ostream.h"
94 #include "llvm/Transforms/Scalar.h"
95 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
96 #include "llvm/Transforms/Utils/Local.h"
97 #include "llvm/Transforms/Utils/LoopSimplify.h"
98 #include "llvm/Transforms/Utils/LoopUtils.h"
99 #include "llvm/Transforms/Utils/LoopVersioning.h"
100 #include "llvm/Transforms/Vectorize.h"
101 #include <algorithm>
102 #include <functional>
103 #include <map>
104 #include <tuple>
105 
106 using namespace llvm;
107 using namespace llvm::PatternMatch;
108 
109 #define LV_NAME "loop-vectorize"
110 #define DEBUG_TYPE LV_NAME
111 
112 STATISTIC(LoopsVectorized, "Number of loops vectorized");
113 STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization");
114 
115 static cl::opt<bool>
116     EnableIfConversion("enable-if-conversion", cl::init(true), cl::Hidden,
117                        cl::desc("Enable if-conversion during vectorization."));
118 
119 /// Loops with a known constant trip count below this number are vectorized only
120 /// if no scalar iteration overheads are incurred.
121 static cl::opt<unsigned> TinyTripCountVectorThreshold(
122     "vectorizer-min-trip-count", cl::init(16), cl::Hidden,
123     cl::desc("Loops with a constant trip count that is smaller than this "
124              "value are vectorized only if no scalar iteration overheads "
125              "are incurred."));
126 
127 static cl::opt<bool> MaximizeBandwidth(
128     "vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden,
129     cl::desc("Maximize bandwidth when selecting vectorization factor which "
130              "will be determined by the smallest type in loop."));
131 
132 static cl::opt<bool> EnableInterleavedMemAccesses(
133     "enable-interleaved-mem-accesses", cl::init(false), cl::Hidden,
134     cl::desc("Enable vectorization on interleaved memory accesses in a loop"));
135 
136 /// Maximum factor for an interleaved memory access.
137 static cl::opt<unsigned> MaxInterleaveGroupFactor(
138     "max-interleave-group-factor", cl::Hidden,
139     cl::desc("Maximum factor for an interleaved access group (default = 8)"),
140     cl::init(8));
141 
142 /// We don't interleave loops with a known constant trip count below this
143 /// number.
144 static const unsigned TinyTripCountInterleaveThreshold = 128;
145 
146 static cl::opt<unsigned> ForceTargetNumScalarRegs(
147     "force-target-num-scalar-regs", cl::init(0), cl::Hidden,
148     cl::desc("A flag that overrides the target's number of scalar registers."));
149 
150 static cl::opt<unsigned> ForceTargetNumVectorRegs(
151     "force-target-num-vector-regs", cl::init(0), cl::Hidden,
152     cl::desc("A flag that overrides the target's number of vector registers."));
153 
154 /// Maximum vectorization interleave count.
155 static const unsigned MaxInterleaveFactor = 16;
156 
157 static cl::opt<unsigned> ForceTargetMaxScalarInterleaveFactor(
158     "force-target-max-scalar-interleave", cl::init(0), cl::Hidden,
159     cl::desc("A flag that overrides the target's max interleave factor for "
160              "scalar loops."));
161 
162 static cl::opt<unsigned> ForceTargetMaxVectorInterleaveFactor(
163     "force-target-max-vector-interleave", cl::init(0), cl::Hidden,
164     cl::desc("A flag that overrides the target's max interleave factor for "
165              "vectorized loops."));
166 
167 static cl::opt<unsigned> ForceTargetInstructionCost(
168     "force-target-instruction-cost", cl::init(0), cl::Hidden,
169     cl::desc("A flag that overrides the target's expected cost for "
170              "an instruction to a single constant value. Mostly "
171              "useful for getting consistent testing."));
172 
173 static cl::opt<unsigned> SmallLoopCost(
174     "small-loop-cost", cl::init(20), cl::Hidden,
175     cl::desc(
176         "The cost of a loop that is considered 'small' by the interleaver."));
177 
178 static cl::opt<bool> LoopVectorizeWithBlockFrequency(
179     "loop-vectorize-with-block-frequency", cl::init(false), cl::Hidden,
180     cl::desc("Enable the use of the block frequency analysis to access PGO "
181              "heuristics minimizing code growth in cold regions and being more "
182              "aggressive in hot regions."));
183 
184 // Runtime interleave loops for load/store throughput.
185 static cl::opt<bool> EnableLoadStoreRuntimeInterleave(
186     "enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden,
187     cl::desc(
188         "Enable runtime interleaving until load/store ports are saturated"));
189 
190 /// The number of stores in a loop that are allowed to need predication.
191 static cl::opt<unsigned> NumberOfStoresToPredicate(
192     "vectorize-num-stores-pred", cl::init(1), cl::Hidden,
193     cl::desc("Max number of stores to be predicated behind an if."));
194 
195 static cl::opt<bool> EnableIndVarRegisterHeur(
196     "enable-ind-var-reg-heur", cl::init(true), cl::Hidden,
197     cl::desc("Count the induction variable only once when interleaving"));
198 
199 static cl::opt<bool> EnableCondStoresVectorization(
200     "enable-cond-stores-vec", cl::init(true), cl::Hidden,
201     cl::desc("Enable if predication of stores during vectorization."));
202 
203 static cl::opt<unsigned> MaxNestedScalarReductionIC(
204     "max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden,
205     cl::desc("The maximum interleave count to use when interleaving a scalar "
206              "reduction in a nested loop."));
207 
208 static cl::opt<unsigned> PragmaVectorizeMemoryCheckThreshold(
209     "pragma-vectorize-memory-check-threshold", cl::init(128), cl::Hidden,
210     cl::desc("The maximum allowed number of runtime memory checks with a "
211              "vectorize(enable) pragma."));
212 
213 static cl::opt<unsigned> VectorizeSCEVCheckThreshold(
214     "vectorize-scev-check-threshold", cl::init(16), cl::Hidden,
215     cl::desc("The maximum number of SCEV checks allowed."));
216 
217 static cl::opt<unsigned> PragmaVectorizeSCEVCheckThreshold(
218     "pragma-vectorize-scev-check-threshold", cl::init(128), cl::Hidden,
219     cl::desc("The maximum number of SCEV checks allowed with a "
220              "vectorize(enable) pragma"));
221 
222 /// Create an analysis remark that explains why vectorization failed
223 ///
224 /// \p PassName is the name of the pass (e.g. can be AlwaysPrint).  \p
225 /// RemarkName is the identifier for the remark.  If \p I is passed it is an
226 /// instruction that prevents vectorization.  Otherwise \p TheLoop is used for
227 /// the location of the remark.  \return the remark object that can be
228 /// streamed to.
229 static OptimizationRemarkAnalysis
230 createMissedAnalysis(const char *PassName, StringRef RemarkName, Loop *TheLoop,
231                      Instruction *I = nullptr) {
232   Value *CodeRegion = TheLoop->getHeader();
233   DebugLoc DL = TheLoop->getStartLoc();
234 
235   if (I) {
236     CodeRegion = I->getParent();
237     // If there is no debug location attached to the instruction, revert back to
238     // using the loop's.
239     if (I->getDebugLoc())
240       DL = I->getDebugLoc();
241   }
242 
243   OptimizationRemarkAnalysis R(PassName, RemarkName, DL, CodeRegion);
244   R << "loop not vectorized: ";
245   return R;
246 }
247 
248 namespace {
249 
250 // Forward declarations.
251 class LoopVectorizeHints;
252 class LoopVectorizationLegality;
253 class LoopVectorizationCostModel;
254 class LoopVectorizationRequirements;
255 class VPInterleaveRecipe;
256 class VPReplicateRecipe;
257 class VPWidenIntOrFpInductionRecipe;
258 class VPWidenRecipe;
259 
260 /// Returns true if the given loop body has a cycle, excluding the loop
261 /// itself.
262 static bool hasCyclesInLoopBody(const Loop &L) {
263   if (!L.empty())
264     return true;
265 
266   for (const auto &SCC :
267        make_range(scc_iterator<Loop, LoopBodyTraits>::begin(L),
268                   scc_iterator<Loop, LoopBodyTraits>::end(L))) {
269     if (SCC.size() > 1) {
270       DEBUG(dbgs() << "LVL: Detected a cycle in the loop body:\n");
271       DEBUG(L.dump());
272       return true;
273     }
274   }
275   return false;
276 }
277 
278 /// A helper function for converting Scalar types to vector types.
279 /// If the incoming type is void, we return void. If the VF is 1, we return
280 /// the scalar type.
281 static Type *ToVectorTy(Type *Scalar, unsigned VF) {
282   if (Scalar->isVoidTy() || VF == 1)
283     return Scalar;
284   return VectorType::get(Scalar, VF);
285 }
286 
287 // FIXME: The following helper functions have multiple implementations
288 // in the project. They can be effectively organized in a common Load/Store
289 // utilities unit.
290 
291 /// A helper function that returns the pointer operand of a load or store
292 /// instruction.
293 static Value *getPointerOperand(Value *I) {
294   if (auto *LI = dyn_cast<LoadInst>(I))
295     return LI->getPointerOperand();
296   if (auto *SI = dyn_cast<StoreInst>(I))
297     return SI->getPointerOperand();
298   return nullptr;
299 }
300 
301 /// A helper function that returns the type of loaded or stored value.
302 static Type *getMemInstValueType(Value *I) {
303   assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
304          "Expected Load or Store instruction");
305   if (auto *LI = dyn_cast<LoadInst>(I))
306     return LI->getType();
307   return cast<StoreInst>(I)->getValueOperand()->getType();
308 }
309 
310 /// A helper function that returns the alignment of load or store instruction.
311 static unsigned getMemInstAlignment(Value *I) {
312   assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
313          "Expected Load or Store instruction");
314   if (auto *LI = dyn_cast<LoadInst>(I))
315     return LI->getAlignment();
316   return cast<StoreInst>(I)->getAlignment();
317 }
318 
319 /// A helper function that returns the address space of the pointer operand of
320 /// load or store instruction.
321 static unsigned getMemInstAddressSpace(Value *I) {
322   assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
323          "Expected Load or Store instruction");
324   if (auto *LI = dyn_cast<LoadInst>(I))
325     return LI->getPointerAddressSpace();
326   return cast<StoreInst>(I)->getPointerAddressSpace();
327 }
328 
329 /// A helper function that returns true if the given type is irregular. The
330 /// type is irregular if its allocated size doesn't equal the store size of an
331 /// element of the corresponding vector type at the given vectorization factor.
332 static bool hasIrregularType(Type *Ty, const DataLayout &DL, unsigned VF) {
333 
334   // Determine if an array of VF elements of type Ty is "bitcast compatible"
335   // with a <VF x Ty> vector.
336   if (VF > 1) {
337     auto *VectorTy = VectorType::get(Ty, VF);
338     return VF * DL.getTypeAllocSize(Ty) != DL.getTypeStoreSize(VectorTy);
339   }
340 
341   // If the vectorization factor is one, we just check if an array of type Ty
342   // requires padding between elements.
343   return DL.getTypeAllocSizeInBits(Ty) != DL.getTypeSizeInBits(Ty);
344 }
345 
346 /// A helper function that returns the reciprocal of the block probability of
347 /// predicated blocks. If we return X, we are assuming the predicated block
348 /// will execute once for for every X iterations of the loop header.
349 ///
350 /// TODO: We should use actual block probability here, if available. Currently,
351 ///       we always assume predicated blocks have a 50% chance of executing.
352 static unsigned getReciprocalPredBlockProb() { return 2; }
353 
354 /// A helper function that adds a 'fast' flag to floating-point operations.
355 static Value *addFastMathFlag(Value *V) {
356   if (isa<FPMathOperator>(V)) {
357     FastMathFlags Flags;
358     Flags.setUnsafeAlgebra();
359     cast<Instruction>(V)->setFastMathFlags(Flags);
360   }
361   return V;
362 }
363 
364 /// A helper function that returns an integer or floating-point constant with
365 /// value C.
366 static Constant *getSignedIntOrFpConstant(Type *Ty, int64_t C) {
367   return Ty->isIntegerTy() ? ConstantInt::getSigned(Ty, C)
368                            : ConstantFP::get(Ty, C);
369 }
370 
371 } // end anonymous namespace
372 
373 namespace llvm {
374 /// InnerLoopVectorizer vectorizes loops which contain only one basic
375 /// block to a specified vectorization factor (VF).
376 /// This class performs the widening of scalars into vectors, or multiple
377 /// scalars. This class also implements the following features:
378 /// * It inserts an epilogue loop for handling loops that don't have iteration
379 ///   counts that are known to be a multiple of the vectorization factor.
380 /// * It handles the code generation for reduction variables.
381 /// * Scalarization (implementation using scalars) of un-vectorizable
382 ///   instructions.
383 /// InnerLoopVectorizer does not perform any vectorization-legality
384 /// checks, and relies on the caller to check for the different legality
385 /// aspects. The InnerLoopVectorizer relies on the
386 /// LoopVectorizationLegality class to provide information about the induction
387 /// and reduction variables that were found to a given vectorization factor.
388 class InnerLoopVectorizer {
389 public:
390   InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE,
391                       LoopInfo *LI, DominatorTree *DT,
392                       const TargetLibraryInfo *TLI,
393                       const TargetTransformInfo *TTI, AssumptionCache *AC,
394                       OptimizationRemarkEmitter *ORE, unsigned VecWidth,
395                       unsigned UnrollFactor, LoopVectorizationLegality *LVL,
396                       LoopVectorizationCostModel *CM)
397       : OrigLoop(OrigLoop), PSE(PSE), LI(LI), DT(DT), TLI(TLI), TTI(TTI),
398         AC(AC), ORE(ORE), VF(VecWidth), UF(UnrollFactor),
399         Builder(PSE.getSE()->getContext()), Induction(nullptr),
400         OldInduction(nullptr), VectorLoopValueMap(UnrollFactor, VecWidth),
401         TripCount(nullptr), VectorTripCount(nullptr), Legal(LVL), Cost(CM),
402         AddedSafetyChecks(false) {}
403 
404   /// Create a new empty loop. Unlink the old loop and connect the new one.
405   /// Return the pre-header block of the new loop.
406   BasicBlock *createVectorizedLoopSkeleton();
407 
408   /// Widen a single instruction within the innermost loop.
409   void widenInstruction(Instruction &I);
410 
411   /// Fix the vectorized code, taking care of header phi's, live-outs, and more.
412   void fixVectorizedLoop();
413 
414   // Return true if any runtime check is added.
415   bool areSafetyChecksAdded() { return AddedSafetyChecks; }
416 
417   virtual ~InnerLoopVectorizer() {}
418 
419   /// A type for vectorized values in the new loop. Each value from the
420   /// original loop, when vectorized, is represented by UF vector values in the
421   /// new unrolled loop, where UF is the unroll factor.
422   typedef SmallVector<Value *, 2> VectorParts;
423 
424   /// A helper function that computes the predicate of the block BB, assuming
425   /// that the header block of the loop is set to True. It returns the *entry*
426   /// mask for the block BB.
427   VectorParts createBlockInMask(BasicBlock *BB);
428 
429   /// Vectorize a single PHINode in a block. This method handles the induction
430   /// variable canonicalization. It supports both VF = 1 for unrolled loops and
431   /// arbitrary length vectors.
432   void widenPHIInstruction(Instruction *PN, unsigned UF, unsigned VF);
433 
434   /// A helper function to scalarize a single Instruction in the innermost loop.
435   /// Generates a sequence of scalar instances for each lane between \p MinLane
436   /// and \p MaxLane, times each part between \p MinPart and \p MaxPart,
437   /// inclusive..
438   void scalarizeInstruction(Instruction *Instr, const VPIteration &Instance,
439                             bool IfPredicateInstr);
440 
441   /// Widen an integer or floating-point induction variable \p IV. If \p Trunc
442   /// is provided, the integer induction variable will first be truncated to
443   /// the corresponding type.
444   void widenIntOrFpInduction(PHINode *IV, TruncInst *Trunc = nullptr);
445 
446   /// getOrCreateVectorValue and getOrCreateScalarValue coordinate to generate a
447   /// vector or scalar value on-demand if one is not yet available. When
448   /// vectorizing a loop, we visit the definition of an instruction before its
449   /// uses. When visiting the definition, we either vectorize or scalarize the
450   /// instruction, creating an entry for it in the corresponding map. (In some
451   /// cases, such as induction variables, we will create both vector and scalar
452   /// entries.) Then, as we encounter uses of the definition, we derive values
453   /// for each scalar or vector use unless such a value is already available.
454   /// For example, if we scalarize a definition and one of its uses is vector,
455   /// we build the required vector on-demand with an insertelement sequence
456   /// when visiting the use. Otherwise, if the use is scalar, we can use the
457   /// existing scalar definition.
458   ///
459   /// Return a value in the new loop corresponding to \p V from the original
460   /// loop at unroll index \p Part. If the value has already been vectorized,
461   /// the corresponding vector entry in VectorLoopValueMap is returned. If,
462   /// however, the value has a scalar entry in VectorLoopValueMap, we construct
463   /// a new vector value on-demand by inserting the scalar values into a vector
464   /// with an insertelement sequence. If the value has been neither vectorized
465   /// nor scalarized, it must be loop invariant, so we simply broadcast the
466   /// value into a vector.
467   Value *getOrCreateVectorValue(Value *V, unsigned Part);
468 
469   /// Return a value in the new loop corresponding to \p V from the original
470   /// loop at unroll and vector indices \p Instance. If the value has been
471   /// vectorized but not scalarized, the necessary extractelement instruction
472   /// will be generated.
473   Value *getOrCreateScalarValue(Value *V, const VPIteration &Instance);
474 
475   /// Construct the vector value of a scalarized value \p V one lane at a time.
476   void packScalarIntoVectorValue(Value *V, const VPIteration &Instance);
477 
478   /// Try to vectorize the interleaved access group that \p Instr belongs to.
479   void vectorizeInterleaveGroup(Instruction *Instr);
480 
481 protected:
482   /// A small list of PHINodes.
483   typedef SmallVector<PHINode *, 4> PhiVector;
484 
485   /// A type for scalarized values in the new loop. Each value from the
486   /// original loop, when scalarized, is represented by UF x VF scalar values
487   /// in the new unrolled loop, where UF is the unroll factor and VF is the
488   /// vectorization factor.
489   typedef SmallVector<SmallVector<Value *, 4>, 2> ScalarParts;
490 
491   // When we if-convert we need to create edge masks. We have to cache values
492   // so that we don't end up with exponential recursion/IR.
493   typedef DenseMap<std::pair<BasicBlock *, BasicBlock *>, VectorParts>
494       EdgeMaskCacheTy;
495   typedef DenseMap<BasicBlock *, VectorParts> BlockMaskCacheTy;
496 
497   /// Set up the values of the IVs correctly when exiting the vector loop.
498   void fixupIVUsers(PHINode *OrigPhi, const InductionDescriptor &II,
499                     Value *CountRoundDown, Value *EndValue,
500                     BasicBlock *MiddleBlock);
501 
502   /// Create a new induction variable inside L.
503   PHINode *createInductionVariable(Loop *L, Value *Start, Value *End,
504                                    Value *Step, Instruction *DL);
505 
506   /// Handle all cross-iteration phis in the header.
507   void fixCrossIterationPHIs();
508 
509   /// Fix a first-order recurrence. This is the second phase of vectorizing
510   /// this phi node.
511   void fixFirstOrderRecurrence(PHINode *Phi);
512 
513   /// Fix a reduction cross-iteration phi. This is the second phase of
514   /// vectorizing this phi node.
515   void fixReduction(PHINode *Phi);
516 
517   /// \brief The Loop exit block may have single value PHI nodes with some
518   /// incoming value. While vectorizing we only handled real values
519   /// that were defined inside the loop and we should have one value for
520   /// each predecessor of its parent basic block. See PR14725.
521   void fixLCSSAPHIs();
522 
523   /// Iteratively sink the scalarized operands of a predicated instruction into
524   /// the block that was created for it.
525   void sinkScalarOperands(Instruction *PredInst);
526 
527   /// Shrinks vector element sizes to the smallest bitwidth they can be legally
528   /// represented as.
529   void truncateToMinimalBitwidths();
530 
531   /// A helper function that computes the predicate of the edge between SRC
532   /// and DST.
533   VectorParts createEdgeMask(BasicBlock *Src, BasicBlock *Dst);
534 
535   /// Insert the new loop to the loop hierarchy and pass manager
536   /// and update the analysis passes.
537   void updateAnalysis();
538 
539   /// Vectorize Load and Store instructions,
540   virtual void vectorizeMemoryInstruction(Instruction *Instr);
541 
542   /// Create a broadcast instruction. This method generates a broadcast
543   /// instruction (shuffle) for loop invariant values and for the induction
544   /// value. If this is the induction variable then we extend it to N, N+1, ...
545   /// this is needed because each iteration in the loop corresponds to a SIMD
546   /// element.
547   virtual Value *getBroadcastInstrs(Value *V);
548 
549   /// This function adds (StartIdx, StartIdx + Step, StartIdx + 2*Step, ...)
550   /// to each vector element of Val. The sequence starts at StartIndex.
551   /// \p Opcode is relevant for FP induction variable.
552   virtual Value *getStepVector(Value *Val, int StartIdx, Value *Step,
553                                Instruction::BinaryOps Opcode =
554                                Instruction::BinaryOpsEnd);
555 
556   /// Compute scalar induction steps. \p ScalarIV is the scalar induction
557   /// variable on which to base the steps, \p Step is the size of the step, and
558   /// \p EntryVal is the value from the original loop that maps to the steps.
559   /// Note that \p EntryVal doesn't have to be an induction variable (e.g., it
560   /// can be a truncate instruction).
561   void buildScalarSteps(Value *ScalarIV, Value *Step, Value *EntryVal,
562                         const InductionDescriptor &ID);
563 
564   /// Create a vector induction phi node based on an existing scalar one. \p
565   /// EntryVal is the value from the original loop that maps to the vector phi
566   /// node, and \p Step is the loop-invariant step. If \p EntryVal is a
567   /// truncate instruction, instead of widening the original IV, we widen a
568   /// version of the IV truncated to \p EntryVal's type.
569   void createVectorIntOrFpInductionPHI(const InductionDescriptor &II,
570                                        Value *Step, Instruction *EntryVal);
571 
572   /// Returns true if an instruction \p I should be scalarized instead of
573   /// vectorized for the chosen vectorization factor.
574   bool shouldScalarizeInstruction(Instruction *I) const;
575 
576   /// Returns true if we should generate a scalar version of \p IV.
577   bool needsScalarInduction(Instruction *IV) const;
578 
579   /// Generate a shuffle sequence that will reverse the vector Vec.
580   virtual Value *reverseVector(Value *Vec);
581 
582   /// Returns (and creates if needed) the original loop trip count.
583   Value *getOrCreateTripCount(Loop *NewLoop);
584 
585   /// Returns (and creates if needed) the trip count of the widened loop.
586   Value *getOrCreateVectorTripCount(Loop *NewLoop);
587 
588   /// Returns a bitcasted value to the requested vector type.
589   /// Also handles bitcasts of vector<float> <-> vector<pointer> types.
590   Value *createBitOrPointerCast(Value *V, VectorType *DstVTy,
591                                 const DataLayout &DL);
592 
593   /// Emit a bypass check to see if the vector trip count is zero, including if
594   /// it overflows.
595   void emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass);
596   /// Emit a bypass check to see if all of the SCEV assumptions we've
597   /// had to make are correct.
598   void emitSCEVChecks(Loop *L, BasicBlock *Bypass);
599   /// Emit bypass checks to check any memory assumptions we may have made.
600   void emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass);
601 
602   /// Add additional metadata to \p To that was not present on \p Orig.
603   ///
604   /// Currently this is used to add the noalias annotations based on the
605   /// inserted memchecks.  Use this for instructions that are *cloned* into the
606   /// vector loop.
607   void addNewMetadata(Instruction *To, const Instruction *Orig);
608 
609   /// Add metadata from one instruction to another.
610   ///
611   /// This includes both the original MDs from \p From and additional ones (\see
612   /// addNewMetadata).  Use this for *newly created* instructions in the vector
613   /// loop.
614   void addMetadata(Instruction *To, Instruction *From);
615 
616   /// \brief Similar to the previous function but it adds the metadata to a
617   /// vector of instructions.
618   void addMetadata(ArrayRef<Value *> To, Instruction *From);
619 
620   /// \brief Set the debug location in the builder using the debug location in
621   /// the instruction.
622   void setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr);
623 
624   /// The original loop.
625   Loop *OrigLoop;
626   /// A wrapper around ScalarEvolution used to add runtime SCEV checks. Applies
627   /// dynamic knowledge to simplify SCEV expressions and converts them to a
628   /// more usable form.
629   PredicatedScalarEvolution &PSE;
630   /// Loop Info.
631   LoopInfo *LI;
632   /// Dominator Tree.
633   DominatorTree *DT;
634   /// Alias Analysis.
635   AliasAnalysis *AA;
636   /// Target Library Info.
637   const TargetLibraryInfo *TLI;
638   /// Target Transform Info.
639   const TargetTransformInfo *TTI;
640   /// Assumption Cache.
641   AssumptionCache *AC;
642   /// Interface to emit optimization remarks.
643   OptimizationRemarkEmitter *ORE;
644 
645   /// \brief LoopVersioning.  It's only set up (non-null) if memchecks were
646   /// used.
647   ///
648   /// This is currently only used to add no-alias metadata based on the
649   /// memchecks.  The actually versioning is performed manually.
650   std::unique_ptr<LoopVersioning> LVer;
651 
652   /// The vectorization SIMD factor to use. Each vector will have this many
653   /// vector elements.
654   unsigned VF;
655 
656   /// The vectorization unroll factor to use. Each scalar is vectorized to this
657   /// many different vector instructions.
658   unsigned UF;
659 
660   /// The builder that we use
661   IRBuilder<> Builder;
662 
663   // --- Vectorization state ---
664 
665   /// The vector-loop preheader.
666   BasicBlock *LoopVectorPreHeader;
667   /// The scalar-loop preheader.
668   BasicBlock *LoopScalarPreHeader;
669   /// Middle Block between the vector and the scalar.
670   BasicBlock *LoopMiddleBlock;
671   /// The ExitBlock of the scalar loop.
672   BasicBlock *LoopExitBlock;
673   /// The vector loop body.
674   BasicBlock *LoopVectorBody;
675   /// The scalar loop body.
676   BasicBlock *LoopScalarBody;
677   /// A list of all bypass blocks. The first block is the entry of the loop.
678   SmallVector<BasicBlock *, 4> LoopBypassBlocks;
679 
680   /// The new Induction variable which was added to the new block.
681   PHINode *Induction;
682   /// The induction variable of the old basic block.
683   PHINode *OldInduction;
684 
685   /// Maps values from the original loop to their corresponding values in the
686   /// vectorized loop. A key value can map to either vector values, scalar
687   /// values or both kinds of values, depending on whether the key was
688   /// vectorized and scalarized.
689   VectorizerValueMap VectorLoopValueMap;
690 
691   /// Store instructions that were predicated.
692   SmallVector<Instruction *, 4> PredicatedInstructions;
693   EdgeMaskCacheTy EdgeMaskCache;
694   BlockMaskCacheTy BlockMaskCache;
695   /// Trip count of the original loop.
696   Value *TripCount;
697   /// Trip count of the widened loop (TripCount - TripCount % (VF*UF))
698   Value *VectorTripCount;
699 
700   /// The legality analysis.
701   LoopVectorizationLegality *Legal;
702 
703   /// The profitablity analysis.
704   LoopVectorizationCostModel *Cost;
705 
706   // Record whether runtime checks are added.
707   bool AddedSafetyChecks;
708 
709   // Holds the end values for each induction variable. We save the end values
710   // so we can later fix-up the external users of the induction variables.
711   DenseMap<PHINode *, Value *> IVEndValues;
712 
713   friend class LoopVectorizationPlanner;
714 };
715 
716 class InnerLoopUnroller : public InnerLoopVectorizer {
717 public:
718   InnerLoopUnroller(Loop *OrigLoop, PredicatedScalarEvolution &PSE,
719                     LoopInfo *LI, DominatorTree *DT,
720                     const TargetLibraryInfo *TLI,
721                     const TargetTransformInfo *TTI, AssumptionCache *AC,
722                     OptimizationRemarkEmitter *ORE, unsigned UnrollFactor,
723                     LoopVectorizationLegality *LVL,
724                     LoopVectorizationCostModel *CM)
725       : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 1,
726                             UnrollFactor, LVL, CM) {}
727 
728 private:
729   Value *getBroadcastInstrs(Value *V) override;
730   Value *getStepVector(Value *Val, int StartIdx, Value *Step,
731                        Instruction::BinaryOps Opcode =
732                        Instruction::BinaryOpsEnd) override;
733   Value *reverseVector(Value *Vec) override;
734 };
735 
736 /// \brief Look for a meaningful debug location on the instruction or it's
737 /// operands.
738 static Instruction *getDebugLocFromInstOrOperands(Instruction *I) {
739   if (!I)
740     return I;
741 
742   DebugLoc Empty;
743   if (I->getDebugLoc() != Empty)
744     return I;
745 
746   for (User::op_iterator OI = I->op_begin(), OE = I->op_end(); OI != OE; ++OI) {
747     if (Instruction *OpInst = dyn_cast<Instruction>(*OI))
748       if (OpInst->getDebugLoc() != Empty)
749         return OpInst;
750   }
751 
752   return I;
753 }
754 
755 void InnerLoopVectorizer::setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr) {
756   if (const Instruction *Inst = dyn_cast_or_null<Instruction>(Ptr)) {
757     const DILocation *DIL = Inst->getDebugLoc();
758     if (DIL && Inst->getFunction()->isDebugInfoForProfiling())
759       B.SetCurrentDebugLocation(DIL->cloneWithDuplicationFactor(UF * VF));
760     else
761       B.SetCurrentDebugLocation(DIL);
762   } else
763     B.SetCurrentDebugLocation(DebugLoc());
764 }
765 
766 #ifndef NDEBUG
767 /// \return string containing a file name and a line # for the given loop.
768 static std::string getDebugLocString(const Loop *L) {
769   std::string Result;
770   if (L) {
771     raw_string_ostream OS(Result);
772     if (const DebugLoc LoopDbgLoc = L->getStartLoc())
773       LoopDbgLoc.print(OS);
774     else
775       // Just print the module name.
776       OS << L->getHeader()->getParent()->getParent()->getModuleIdentifier();
777     OS.flush();
778   }
779   return Result;
780 }
781 #endif
782 
783 void InnerLoopVectorizer::addNewMetadata(Instruction *To,
784                                          const Instruction *Orig) {
785   // If the loop was versioned with memchecks, add the corresponding no-alias
786   // metadata.
787   if (LVer && (isa<LoadInst>(Orig) || isa<StoreInst>(Orig)))
788     LVer->annotateInstWithNoAlias(To, Orig);
789 }
790 
791 void InnerLoopVectorizer::addMetadata(Instruction *To,
792                                       Instruction *From) {
793   propagateMetadata(To, From);
794   addNewMetadata(To, From);
795 }
796 
797 void InnerLoopVectorizer::addMetadata(ArrayRef<Value *> To,
798                                       Instruction *From) {
799   for (Value *V : To) {
800     if (Instruction *I = dyn_cast<Instruction>(V))
801       addMetadata(I, From);
802   }
803 }
804 
805 } // namespace llvm
806 
807 namespace {
808 
809 /// \brief The group of interleaved loads/stores sharing the same stride and
810 /// close to each other.
811 ///
812 /// Each member in this group has an index starting from 0, and the largest
813 /// index should be less than interleaved factor, which is equal to the absolute
814 /// value of the access's stride.
815 ///
816 /// E.g. An interleaved load group of factor 4:
817 ///        for (unsigned i = 0; i < 1024; i+=4) {
818 ///          a = A[i];                           // Member of index 0
819 ///          b = A[i+1];                         // Member of index 1
820 ///          d = A[i+3];                         // Member of index 3
821 ///          ...
822 ///        }
823 ///
824 ///      An interleaved store group of factor 4:
825 ///        for (unsigned i = 0; i < 1024; i+=4) {
826 ///          ...
827 ///          A[i]   = a;                         // Member of index 0
828 ///          A[i+1] = b;                         // Member of index 1
829 ///          A[i+2] = c;                         // Member of index 2
830 ///          A[i+3] = d;                         // Member of index 3
831 ///        }
832 ///
833 /// Note: the interleaved load group could have gaps (missing members), but
834 /// the interleaved store group doesn't allow gaps.
835 class InterleaveGroup {
836 public:
837   InterleaveGroup(Instruction *Instr, int Stride, unsigned Align)
838       : Align(Align), SmallestKey(0), LargestKey(0), InsertPos(Instr) {
839     assert(Align && "The alignment should be non-zero");
840 
841     Factor = std::abs(Stride);
842     assert(Factor > 1 && "Invalid interleave factor");
843 
844     Reverse = Stride < 0;
845     Members[0] = Instr;
846   }
847 
848   bool isReverse() const { return Reverse; }
849   unsigned getFactor() const { return Factor; }
850   unsigned getAlignment() const { return Align; }
851   unsigned getNumMembers() const { return Members.size(); }
852 
853   /// \brief Try to insert a new member \p Instr with index \p Index and
854   /// alignment \p NewAlign. The index is related to the leader and it could be
855   /// negative if it is the new leader.
856   ///
857   /// \returns false if the instruction doesn't belong to the group.
858   bool insertMember(Instruction *Instr, int Index, unsigned NewAlign) {
859     assert(NewAlign && "The new member's alignment should be non-zero");
860 
861     int Key = Index + SmallestKey;
862 
863     // Skip if there is already a member with the same index.
864     if (Members.count(Key))
865       return false;
866 
867     if (Key > LargestKey) {
868       // The largest index is always less than the interleave factor.
869       if (Index >= static_cast<int>(Factor))
870         return false;
871 
872       LargestKey = Key;
873     } else if (Key < SmallestKey) {
874       // The largest index is always less than the interleave factor.
875       if (LargestKey - Key >= static_cast<int>(Factor))
876         return false;
877 
878       SmallestKey = Key;
879     }
880 
881     // It's always safe to select the minimum alignment.
882     Align = std::min(Align, NewAlign);
883     Members[Key] = Instr;
884     return true;
885   }
886 
887   /// \brief Get the member with the given index \p Index
888   ///
889   /// \returns nullptr if contains no such member.
890   Instruction *getMember(unsigned Index) const {
891     int Key = SmallestKey + Index;
892     if (!Members.count(Key))
893       return nullptr;
894 
895     return Members.find(Key)->second;
896   }
897 
898   /// \brief Get the index for the given member. Unlike the key in the member
899   /// map, the index starts from 0.
900   unsigned getIndex(Instruction *Instr) const {
901     for (auto I : Members)
902       if (I.second == Instr)
903         return I.first - SmallestKey;
904 
905     llvm_unreachable("InterleaveGroup contains no such member");
906   }
907 
908   Instruction *getInsertPos() const { return InsertPos; }
909   void setInsertPos(Instruction *Inst) { InsertPos = Inst; }
910 
911 private:
912   unsigned Factor; // Interleave Factor.
913   bool Reverse;
914   unsigned Align;
915   DenseMap<int, Instruction *> Members;
916   int SmallestKey;
917   int LargestKey;
918 
919   // To avoid breaking dependences, vectorized instructions of an interleave
920   // group should be inserted at either the first load or the last store in
921   // program order.
922   //
923   // E.g. %even = load i32             // Insert Position
924   //      %add = add i32 %even         // Use of %even
925   //      %odd = load i32
926   //
927   //      store i32 %even
928   //      %odd = add i32               // Def of %odd
929   //      store i32 %odd               // Insert Position
930   Instruction *InsertPos;
931 };
932 
933 /// \brief Drive the analysis of interleaved memory accesses in the loop.
934 ///
935 /// Use this class to analyze interleaved accesses only when we can vectorize
936 /// a loop. Otherwise it's meaningless to do analysis as the vectorization
937 /// on interleaved accesses is unsafe.
938 ///
939 /// The analysis collects interleave groups and records the relationships
940 /// between the member and the group in a map.
941 class InterleavedAccessInfo {
942 public:
943   InterleavedAccessInfo(PredicatedScalarEvolution &PSE, Loop *L,
944                         DominatorTree *DT, LoopInfo *LI)
945       : PSE(PSE), TheLoop(L), DT(DT), LI(LI), LAI(nullptr),
946         RequiresScalarEpilogue(false) {}
947 
948   ~InterleavedAccessInfo() {
949     SmallSet<InterleaveGroup *, 4> DelSet;
950     // Avoid releasing a pointer twice.
951     for (auto &I : InterleaveGroupMap)
952       DelSet.insert(I.second);
953     for (auto *Ptr : DelSet)
954       delete Ptr;
955   }
956 
957   /// \brief Analyze the interleaved accesses and collect them in interleave
958   /// groups. Substitute symbolic strides using \p Strides.
959   void analyzeInterleaving(const ValueToValueMap &Strides);
960 
961   /// \brief Check if \p Instr belongs to any interleave group.
962   bool isInterleaved(Instruction *Instr) const {
963     return InterleaveGroupMap.count(Instr);
964   }
965 
966   /// \brief Get the interleave group that \p Instr belongs to.
967   ///
968   /// \returns nullptr if doesn't have such group.
969   InterleaveGroup *getInterleaveGroup(Instruction *Instr) const {
970     if (InterleaveGroupMap.count(Instr))
971       return InterleaveGroupMap.find(Instr)->second;
972     return nullptr;
973   }
974 
975   /// \brief Returns true if an interleaved group that may access memory
976   /// out-of-bounds requires a scalar epilogue iteration for correctness.
977   bool requiresScalarEpilogue() const { return RequiresScalarEpilogue; }
978 
979   /// \brief Initialize the LoopAccessInfo used for dependence checking.
980   void setLAI(const LoopAccessInfo *Info) { LAI = Info; }
981 
982 private:
983   /// A wrapper around ScalarEvolution, used to add runtime SCEV checks.
984   /// Simplifies SCEV expressions in the context of existing SCEV assumptions.
985   /// The interleaved access analysis can also add new predicates (for example
986   /// by versioning strides of pointers).
987   PredicatedScalarEvolution &PSE;
988   Loop *TheLoop;
989   DominatorTree *DT;
990   LoopInfo *LI;
991   const LoopAccessInfo *LAI;
992 
993   /// True if the loop may contain non-reversed interleaved groups with
994   /// out-of-bounds accesses. We ensure we don't speculatively access memory
995   /// out-of-bounds by executing at least one scalar epilogue iteration.
996   bool RequiresScalarEpilogue;
997 
998   /// Holds the relationships between the members and the interleave group.
999   DenseMap<Instruction *, InterleaveGroup *> InterleaveGroupMap;
1000 
1001   /// Holds dependences among the memory accesses in the loop. It maps a source
1002   /// access to a set of dependent sink accesses.
1003   DenseMap<Instruction *, SmallPtrSet<Instruction *, 2>> Dependences;
1004 
1005   /// \brief The descriptor for a strided memory access.
1006   struct StrideDescriptor {
1007     StrideDescriptor(int64_t Stride, const SCEV *Scev, uint64_t Size,
1008                      unsigned Align)
1009         : Stride(Stride), Scev(Scev), Size(Size), Align(Align) {}
1010 
1011     StrideDescriptor() = default;
1012 
1013     // The access's stride. It is negative for a reverse access.
1014     int64_t Stride = 0;
1015     const SCEV *Scev = nullptr; // The scalar expression of this access
1016     uint64_t Size = 0;          // The size of the memory object.
1017     unsigned Align = 0;         // The alignment of this access.
1018   };
1019 
1020   /// \brief A type for holding instructions and their stride descriptors.
1021   typedef std::pair<Instruction *, StrideDescriptor> StrideEntry;
1022 
1023   /// \brief Create a new interleave group with the given instruction \p Instr,
1024   /// stride \p Stride and alignment \p Align.
1025   ///
1026   /// \returns the newly created interleave group.
1027   InterleaveGroup *createInterleaveGroup(Instruction *Instr, int Stride,
1028                                          unsigned Align) {
1029     assert(!InterleaveGroupMap.count(Instr) &&
1030            "Already in an interleaved access group");
1031     InterleaveGroupMap[Instr] = new InterleaveGroup(Instr, Stride, Align);
1032     return InterleaveGroupMap[Instr];
1033   }
1034 
1035   /// \brief Release the group and remove all the relationships.
1036   void releaseGroup(InterleaveGroup *Group) {
1037     for (unsigned i = 0; i < Group->getFactor(); i++)
1038       if (Instruction *Member = Group->getMember(i))
1039         InterleaveGroupMap.erase(Member);
1040 
1041     delete Group;
1042   }
1043 
1044   /// \brief Collect all the accesses with a constant stride in program order.
1045   void collectConstStrideAccesses(
1046       MapVector<Instruction *, StrideDescriptor> &AccessStrideInfo,
1047       const ValueToValueMap &Strides);
1048 
1049   /// \brief Returns true if \p Stride is allowed in an interleaved group.
1050   static bool isStrided(int Stride) {
1051     unsigned Factor = std::abs(Stride);
1052     return Factor >= 2 && Factor <= MaxInterleaveGroupFactor;
1053   }
1054 
1055   /// \brief Returns true if \p BB is a predicated block.
1056   bool isPredicated(BasicBlock *BB) const {
1057     return LoopAccessInfo::blockNeedsPredication(BB, TheLoop, DT);
1058   }
1059 
1060   /// \brief Returns true if LoopAccessInfo can be used for dependence queries.
1061   bool areDependencesValid() const {
1062     return LAI && LAI->getDepChecker().getDependences();
1063   }
1064 
1065   /// \brief Returns true if memory accesses \p A and \p B can be reordered, if
1066   /// necessary, when constructing interleaved groups.
1067   ///
1068   /// \p A must precede \p B in program order. We return false if reordering is
1069   /// not necessary or is prevented because \p A and \p B may be dependent.
1070   bool canReorderMemAccessesForInterleavedGroups(StrideEntry *A,
1071                                                  StrideEntry *B) const {
1072 
1073     // Code motion for interleaved accesses can potentially hoist strided loads
1074     // and sink strided stores. The code below checks the legality of the
1075     // following two conditions:
1076     //
1077     // 1. Potentially moving a strided load (B) before any store (A) that
1078     //    precedes B, or
1079     //
1080     // 2. Potentially moving a strided store (A) after any load or store (B)
1081     //    that A precedes.
1082     //
1083     // It's legal to reorder A and B if we know there isn't a dependence from A
1084     // to B. Note that this determination is conservative since some
1085     // dependences could potentially be reordered safely.
1086 
1087     // A is potentially the source of a dependence.
1088     auto *Src = A->first;
1089     auto SrcDes = A->second;
1090 
1091     // B is potentially the sink of a dependence.
1092     auto *Sink = B->first;
1093     auto SinkDes = B->second;
1094 
1095     // Code motion for interleaved accesses can't violate WAR dependences.
1096     // Thus, reordering is legal if the source isn't a write.
1097     if (!Src->mayWriteToMemory())
1098       return true;
1099 
1100     // At least one of the accesses must be strided.
1101     if (!isStrided(SrcDes.Stride) && !isStrided(SinkDes.Stride))
1102       return true;
1103 
1104     // If dependence information is not available from LoopAccessInfo,
1105     // conservatively assume the instructions can't be reordered.
1106     if (!areDependencesValid())
1107       return false;
1108 
1109     // If we know there is a dependence from source to sink, assume the
1110     // instructions can't be reordered. Otherwise, reordering is legal.
1111     return !Dependences.count(Src) || !Dependences.lookup(Src).count(Sink);
1112   }
1113 
1114   /// \brief Collect the dependences from LoopAccessInfo.
1115   ///
1116   /// We process the dependences once during the interleaved access analysis to
1117   /// enable constant-time dependence queries.
1118   void collectDependences() {
1119     if (!areDependencesValid())
1120       return;
1121     auto *Deps = LAI->getDepChecker().getDependences();
1122     for (auto Dep : *Deps)
1123       Dependences[Dep.getSource(*LAI)].insert(Dep.getDestination(*LAI));
1124   }
1125 };
1126 
1127 /// Utility class for getting and setting loop vectorizer hints in the form
1128 /// of loop metadata.
1129 /// This class keeps a number of loop annotations locally (as member variables)
1130 /// and can, upon request, write them back as metadata on the loop. It will
1131 /// initially scan the loop for existing metadata, and will update the local
1132 /// values based on information in the loop.
1133 /// We cannot write all values to metadata, as the mere presence of some info,
1134 /// for example 'force', means a decision has been made. So, we need to be
1135 /// careful NOT to add them if the user hasn't specifically asked so.
1136 class LoopVectorizeHints {
1137   enum HintKind { HK_WIDTH, HK_UNROLL, HK_FORCE, HK_ISVECTORIZED };
1138 
1139   /// Hint - associates name and validation with the hint value.
1140   struct Hint {
1141     const char *Name;
1142     unsigned Value; // This may have to change for non-numeric values.
1143     HintKind Kind;
1144 
1145     Hint(const char *Name, unsigned Value, HintKind Kind)
1146         : Name(Name), Value(Value), Kind(Kind) {}
1147 
1148     bool validate(unsigned Val) {
1149       switch (Kind) {
1150       case HK_WIDTH:
1151         return isPowerOf2_32(Val) && Val <= VectorizerParams::MaxVectorWidth;
1152       case HK_UNROLL:
1153         return isPowerOf2_32(Val) && Val <= MaxInterleaveFactor;
1154       case HK_FORCE:
1155         return (Val <= 1);
1156       case HK_ISVECTORIZED:
1157         return (Val==0 || Val==1);
1158       }
1159       return false;
1160     }
1161   };
1162 
1163   /// Vectorization width.
1164   Hint Width;
1165   /// Vectorization interleave factor.
1166   Hint Interleave;
1167   /// Vectorization forced
1168   Hint Force;
1169 
1170   /// Already Vectorized
1171   Hint IsVectorized;
1172   /// Return the loop metadata prefix.
1173   static StringRef Prefix() { return "llvm.loop."; }
1174 
1175   /// True if there is any unsafe math in the loop.
1176   bool PotentiallyUnsafe;
1177 
1178 public:
1179   enum ForceKind {
1180     FK_Undefined = -1, ///< Not selected.
1181     FK_Disabled = 0,   ///< Forcing disabled.
1182     FK_Enabled = 1,    ///< Forcing enabled.
1183   };
1184 
1185   LoopVectorizeHints(const Loop *L, bool DisableInterleaving,
1186                      OptimizationRemarkEmitter &ORE)
1187       : Width("vectorize.width", VectorizerParams::VectorizationFactor,
1188               HK_WIDTH),
1189         Interleave("interleave.count", DisableInterleaving, HK_UNROLL),
1190         Force("vectorize.enable", FK_Undefined, HK_FORCE),
1191         IsVectorized("isvectorized", 0, HK_ISVECTORIZED),
1192         PotentiallyUnsafe(false), TheLoop(L), ORE(ORE) {
1193     // Populate values with existing loop metadata.
1194     getHintsFromMetadata();
1195 
1196     // force-vector-interleave overrides DisableInterleaving.
1197     if (VectorizerParams::isInterleaveForced())
1198       Interleave.Value = VectorizerParams::VectorizationInterleave;
1199 
1200     if (IsVectorized.Value != 1)
1201       // If the vectorization width and interleaving count are both 1 then
1202       // consider the loop to have been already vectorized because there's
1203       // nothing more that we can do.
1204       IsVectorized.Value = Width.Value == 1 && Interleave.Value == 1;
1205     DEBUG(if (DisableInterleaving && Interleave.Value == 1) dbgs()
1206           << "LV: Interleaving disabled by the pass manager\n");
1207   }
1208 
1209   /// Mark the loop L as already vectorized by setting the width to 1.
1210   void setAlreadyVectorized() {
1211     IsVectorized.Value = 1;
1212     Hint Hints[] = {IsVectorized};
1213     writeHintsToMetadata(Hints);
1214   }
1215 
1216   bool allowVectorization(Function *F, Loop *L, bool AlwaysVectorize) const {
1217     if (getForce() == LoopVectorizeHints::FK_Disabled) {
1218       DEBUG(dbgs() << "LV: Not vectorizing: #pragma vectorize disable.\n");
1219       emitRemarkWithHints();
1220       return false;
1221     }
1222 
1223     if (!AlwaysVectorize && getForce() != LoopVectorizeHints::FK_Enabled) {
1224       DEBUG(dbgs() << "LV: Not vectorizing: No #pragma vectorize enable.\n");
1225       emitRemarkWithHints();
1226       return false;
1227     }
1228 
1229     if (getIsVectorized() == 1) {
1230       DEBUG(dbgs() << "LV: Not vectorizing: Disabled/already vectorized.\n");
1231       // FIXME: Add interleave.disable metadata. This will allow
1232       // vectorize.disable to be used without disabling the pass and errors
1233       // to differentiate between disabled vectorization and a width of 1.
1234       ORE.emit([&]() {
1235         return OptimizationRemarkAnalysis(vectorizeAnalysisPassName(),
1236                                           "AllDisabled", L->getStartLoc(),
1237                                           L->getHeader())
1238                << "loop not vectorized: vectorization and interleaving are "
1239                   "explicitly disabled, or the loop has already been "
1240                   "vectorized";
1241       });
1242       return false;
1243     }
1244 
1245     return true;
1246   }
1247 
1248   /// Dumps all the hint information.
1249   void emitRemarkWithHints() const {
1250     using namespace ore;
1251     if (Force.Value == LoopVectorizeHints::FK_Disabled)
1252       ORE.emit(OptimizationRemarkMissed(LV_NAME, "MissedExplicitlyDisabled",
1253                                         TheLoop->getStartLoc(),
1254                                         TheLoop->getHeader())
1255                << "loop not vectorized: vectorization is explicitly disabled");
1256     else {
1257       OptimizationRemarkMissed R(LV_NAME, "MissedDetails",
1258                                  TheLoop->getStartLoc(), TheLoop->getHeader());
1259       R << "loop not vectorized";
1260       if (Force.Value == LoopVectorizeHints::FK_Enabled) {
1261         R << " (Force=" << NV("Force", true);
1262         if (Width.Value != 0)
1263           R << ", Vector Width=" << NV("VectorWidth", Width.Value);
1264         if (Interleave.Value != 0)
1265           R << ", Interleave Count=" << NV("InterleaveCount", Interleave.Value);
1266         R << ")";
1267       }
1268       ORE.emit(R);
1269     }
1270   }
1271 
1272   unsigned getWidth() const { return Width.Value; }
1273   unsigned getInterleave() const { return Interleave.Value; }
1274   unsigned getIsVectorized() const { return IsVectorized.Value; }
1275   enum ForceKind getForce() const { return (ForceKind)Force.Value; }
1276 
1277   /// \brief If hints are provided that force vectorization, use the AlwaysPrint
1278   /// pass name to force the frontend to print the diagnostic.
1279   const char *vectorizeAnalysisPassName() const {
1280     if (getWidth() == 1)
1281       return LV_NAME;
1282     if (getForce() == LoopVectorizeHints::FK_Disabled)
1283       return LV_NAME;
1284     if (getForce() == LoopVectorizeHints::FK_Undefined && getWidth() == 0)
1285       return LV_NAME;
1286     return OptimizationRemarkAnalysis::AlwaysPrint;
1287   }
1288 
1289   bool allowReordering() const {
1290     // When enabling loop hints are provided we allow the vectorizer to change
1291     // the order of operations that is given by the scalar loop. This is not
1292     // enabled by default because can be unsafe or inefficient. For example,
1293     // reordering floating-point operations will change the way round-off
1294     // error accumulates in the loop.
1295     return getForce() == LoopVectorizeHints::FK_Enabled || getWidth() > 1;
1296   }
1297 
1298   bool isPotentiallyUnsafe() const {
1299     // Avoid FP vectorization if the target is unsure about proper support.
1300     // This may be related to the SIMD unit in the target not handling
1301     // IEEE 754 FP ops properly, or bad single-to-double promotions.
1302     // Otherwise, a sequence of vectorized loops, even without reduction,
1303     // could lead to different end results on the destination vectors.
1304     return getForce() != LoopVectorizeHints::FK_Enabled && PotentiallyUnsafe;
1305   }
1306 
1307   void setPotentiallyUnsafe() { PotentiallyUnsafe = true; }
1308 
1309 private:
1310   /// Find hints specified in the loop metadata and update local values.
1311   void getHintsFromMetadata() {
1312     MDNode *LoopID = TheLoop->getLoopID();
1313     if (!LoopID)
1314       return;
1315 
1316     // First operand should refer to the loop id itself.
1317     assert(LoopID->getNumOperands() > 0 && "requires at least one operand");
1318     assert(LoopID->getOperand(0) == LoopID && "invalid loop id");
1319 
1320     for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) {
1321       const MDString *S = nullptr;
1322       SmallVector<Metadata *, 4> Args;
1323 
1324       // The expected hint is either a MDString or a MDNode with the first
1325       // operand a MDString.
1326       if (const MDNode *MD = dyn_cast<MDNode>(LoopID->getOperand(i))) {
1327         if (!MD || MD->getNumOperands() == 0)
1328           continue;
1329         S = dyn_cast<MDString>(MD->getOperand(0));
1330         for (unsigned i = 1, ie = MD->getNumOperands(); i < ie; ++i)
1331           Args.push_back(MD->getOperand(i));
1332       } else {
1333         S = dyn_cast<MDString>(LoopID->getOperand(i));
1334         assert(Args.size() == 0 && "too many arguments for MDString");
1335       }
1336 
1337       if (!S)
1338         continue;
1339 
1340       // Check if the hint starts with the loop metadata prefix.
1341       StringRef Name = S->getString();
1342       if (Args.size() == 1)
1343         setHint(Name, Args[0]);
1344     }
1345   }
1346 
1347   /// Checks string hint with one operand and set value if valid.
1348   void setHint(StringRef Name, Metadata *Arg) {
1349     if (!Name.startswith(Prefix()))
1350       return;
1351     Name = Name.substr(Prefix().size(), StringRef::npos);
1352 
1353     const ConstantInt *C = mdconst::dyn_extract<ConstantInt>(Arg);
1354     if (!C)
1355       return;
1356     unsigned Val = C->getZExtValue();
1357 
1358     Hint *Hints[] = {&Width, &Interleave, &Force, &IsVectorized};
1359     for (auto H : Hints) {
1360       if (Name == H->Name) {
1361         if (H->validate(Val))
1362           H->Value = Val;
1363         else
1364           DEBUG(dbgs() << "LV: ignoring invalid hint '" << Name << "'\n");
1365         break;
1366       }
1367     }
1368   }
1369 
1370   /// Create a new hint from name / value pair.
1371   MDNode *createHintMetadata(StringRef Name, unsigned V) const {
1372     LLVMContext &Context = TheLoop->getHeader()->getContext();
1373     Metadata *MDs[] = {MDString::get(Context, Name),
1374                        ConstantAsMetadata::get(
1375                            ConstantInt::get(Type::getInt32Ty(Context), V))};
1376     return MDNode::get(Context, MDs);
1377   }
1378 
1379   /// Matches metadata with hint name.
1380   bool matchesHintMetadataName(MDNode *Node, ArrayRef<Hint> HintTypes) {
1381     MDString *Name = dyn_cast<MDString>(Node->getOperand(0));
1382     if (!Name)
1383       return false;
1384 
1385     for (auto H : HintTypes)
1386       if (Name->getString().endswith(H.Name))
1387         return true;
1388     return false;
1389   }
1390 
1391   /// Sets current hints into loop metadata, keeping other values intact.
1392   void writeHintsToMetadata(ArrayRef<Hint> HintTypes) {
1393     if (HintTypes.size() == 0)
1394       return;
1395 
1396     // Reserve the first element to LoopID (see below).
1397     SmallVector<Metadata *, 4> MDs(1);
1398     // If the loop already has metadata, then ignore the existing operands.
1399     MDNode *LoopID = TheLoop->getLoopID();
1400     if (LoopID) {
1401       for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) {
1402         MDNode *Node = cast<MDNode>(LoopID->getOperand(i));
1403         // If node in update list, ignore old value.
1404         if (!matchesHintMetadataName(Node, HintTypes))
1405           MDs.push_back(Node);
1406       }
1407     }
1408 
1409     // Now, add the missing hints.
1410     for (auto H : HintTypes)
1411       MDs.push_back(createHintMetadata(Twine(Prefix(), H.Name).str(), H.Value));
1412 
1413     // Replace current metadata node with new one.
1414     LLVMContext &Context = TheLoop->getHeader()->getContext();
1415     MDNode *NewLoopID = MDNode::get(Context, MDs);
1416     // Set operand 0 to refer to the loop id itself.
1417     NewLoopID->replaceOperandWith(0, NewLoopID);
1418 
1419     TheLoop->setLoopID(NewLoopID);
1420   }
1421 
1422   /// The loop these hints belong to.
1423   const Loop *TheLoop;
1424 
1425   /// Interface to emit optimization remarks.
1426   OptimizationRemarkEmitter &ORE;
1427 };
1428 
1429 static void emitMissedWarning(Function *F, Loop *L,
1430                               const LoopVectorizeHints &LH,
1431                               OptimizationRemarkEmitter *ORE) {
1432   LH.emitRemarkWithHints();
1433 
1434   if (LH.getForce() == LoopVectorizeHints::FK_Enabled) {
1435     if (LH.getWidth() != 1)
1436       ORE->emit(DiagnosticInfoOptimizationFailure(
1437                     DEBUG_TYPE, "FailedRequestedVectorization",
1438                     L->getStartLoc(), L->getHeader())
1439                 << "loop not vectorized: "
1440                 << "failed explicitly specified loop vectorization");
1441     else if (LH.getInterleave() != 1)
1442       ORE->emit(DiagnosticInfoOptimizationFailure(
1443                     DEBUG_TYPE, "FailedRequestedInterleaving", L->getStartLoc(),
1444                     L->getHeader())
1445                 << "loop not interleaved: "
1446                 << "failed explicitly specified loop interleaving");
1447   }
1448 }
1449 
1450 /// LoopVectorizationLegality checks if it is legal to vectorize a loop, and
1451 /// to what vectorization factor.
1452 /// This class does not look at the profitability of vectorization, only the
1453 /// legality. This class has two main kinds of checks:
1454 /// * Memory checks - The code in canVectorizeMemory checks if vectorization
1455 ///   will change the order of memory accesses in a way that will change the
1456 ///   correctness of the program.
1457 /// * Scalars checks - The code in canVectorizeInstrs and canVectorizeMemory
1458 /// checks for a number of different conditions, such as the availability of a
1459 /// single induction variable, that all types are supported and vectorize-able,
1460 /// etc. This code reflects the capabilities of InnerLoopVectorizer.
1461 /// This class is also used by InnerLoopVectorizer for identifying
1462 /// induction variable and the different reduction variables.
1463 class LoopVectorizationLegality {
1464 public:
1465   LoopVectorizationLegality(
1466       Loop *L, PredicatedScalarEvolution &PSE, DominatorTree *DT,
1467       TargetLibraryInfo *TLI, AliasAnalysis *AA, Function *F,
1468       const TargetTransformInfo *TTI,
1469       std::function<const LoopAccessInfo &(Loop &)> *GetLAA, LoopInfo *LI,
1470       OptimizationRemarkEmitter *ORE, LoopVectorizationRequirements *R,
1471       LoopVectorizeHints *H)
1472       : NumPredStores(0), TheLoop(L), PSE(PSE), TLI(TLI), TTI(TTI), DT(DT),
1473         GetLAA(GetLAA), LAI(nullptr), ORE(ORE), InterleaveInfo(PSE, L, DT, LI),
1474         PrimaryInduction(nullptr), WidestIndTy(nullptr), HasFunNoNaNAttr(false),
1475         Requirements(R), Hints(H) {}
1476 
1477   /// ReductionList contains the reduction descriptors for all
1478   /// of the reductions that were found in the loop.
1479   typedef DenseMap<PHINode *, RecurrenceDescriptor> ReductionList;
1480 
1481   /// InductionList saves induction variables and maps them to the
1482   /// induction descriptor.
1483   typedef MapVector<PHINode *, InductionDescriptor> InductionList;
1484 
1485   /// RecurrenceSet contains the phi nodes that are recurrences other than
1486   /// inductions and reductions.
1487   typedef SmallPtrSet<const PHINode *, 8> RecurrenceSet;
1488 
1489   /// Returns true if it is legal to vectorize this loop.
1490   /// This does not mean that it is profitable to vectorize this
1491   /// loop, only that it is legal to do so.
1492   bool canVectorize();
1493 
1494   /// Returns the primary induction variable.
1495   PHINode *getPrimaryInduction() { return PrimaryInduction; }
1496 
1497   /// Returns the reduction variables found in the loop.
1498   ReductionList *getReductionVars() { return &Reductions; }
1499 
1500   /// Returns the induction variables found in the loop.
1501   InductionList *getInductionVars() { return &Inductions; }
1502 
1503   /// Return the first-order recurrences found in the loop.
1504   RecurrenceSet *getFirstOrderRecurrences() { return &FirstOrderRecurrences; }
1505 
1506   /// Return the set of instructions to sink to handle first-order recurrences.
1507   DenseMap<Instruction *, Instruction *> &getSinkAfter() { return SinkAfter; }
1508 
1509   /// Returns the widest induction type.
1510   Type *getWidestInductionType() { return WidestIndTy; }
1511 
1512   /// Returns True if V is an induction variable in this loop.
1513   bool isInductionVariable(const Value *V);
1514 
1515   /// Returns True if PN is a reduction variable in this loop.
1516   bool isReductionVariable(PHINode *PN) { return Reductions.count(PN); }
1517 
1518   /// Returns True if Phi is a first-order recurrence in this loop.
1519   bool isFirstOrderRecurrence(const PHINode *Phi);
1520 
1521   /// Return true if the block BB needs to be predicated in order for the loop
1522   /// to be vectorized.
1523   bool blockNeedsPredication(BasicBlock *BB);
1524 
1525   /// Check if this pointer is consecutive when vectorizing. This happens
1526   /// when the last index of the GEP is the induction variable, or that the
1527   /// pointer itself is an induction variable.
1528   /// This check allows us to vectorize A[idx] into a wide load/store.
1529   /// Returns:
1530   /// 0 - Stride is unknown or non-consecutive.
1531   /// 1 - Address is consecutive.
1532   /// -1 - Address is consecutive, and decreasing.
1533   int isConsecutivePtr(Value *Ptr);
1534 
1535   /// Returns true if the value V is uniform within the loop.
1536   bool isUniform(Value *V);
1537 
1538   /// Returns the information that we collected about runtime memory check.
1539   const RuntimePointerChecking *getRuntimePointerChecking() const {
1540     return LAI->getRuntimePointerChecking();
1541   }
1542 
1543   const LoopAccessInfo *getLAI() const { return LAI; }
1544 
1545   /// \brief Check if \p Instr belongs to any interleaved access group.
1546   bool isAccessInterleaved(Instruction *Instr) {
1547     return InterleaveInfo.isInterleaved(Instr);
1548   }
1549 
1550   /// \brief Get the interleaved access group that \p Instr belongs to.
1551   const InterleaveGroup *getInterleavedAccessGroup(Instruction *Instr) {
1552     return InterleaveInfo.getInterleaveGroup(Instr);
1553   }
1554 
1555   /// \brief Returns true if an interleaved group requires a scalar iteration
1556   /// to handle accesses with gaps.
1557   bool requiresScalarEpilogue() const {
1558     return InterleaveInfo.requiresScalarEpilogue();
1559   }
1560 
1561   unsigned getMaxSafeDepDistBytes() { return LAI->getMaxSafeDepDistBytes(); }
1562 
1563   uint64_t getMaxSafeRegisterWidth() const {
1564 	  return LAI->getDepChecker().getMaxSafeRegisterWidth();
1565   }
1566 
1567   bool hasStride(Value *V) { return LAI->hasStride(V); }
1568 
1569   /// Returns true if the target machine supports masked store operation
1570   /// for the given \p DataType and kind of access to \p Ptr.
1571   bool isLegalMaskedStore(Type *DataType, Value *Ptr) {
1572     return isConsecutivePtr(Ptr) && TTI->isLegalMaskedStore(DataType);
1573   }
1574   /// Returns true if the target machine supports masked load operation
1575   /// for the given \p DataType and kind of access to \p Ptr.
1576   bool isLegalMaskedLoad(Type *DataType, Value *Ptr) {
1577     return isConsecutivePtr(Ptr) && TTI->isLegalMaskedLoad(DataType);
1578   }
1579   /// Returns true if the target machine supports masked scatter operation
1580   /// for the given \p DataType.
1581   bool isLegalMaskedScatter(Type *DataType) {
1582     return TTI->isLegalMaskedScatter(DataType);
1583   }
1584   /// Returns true if the target machine supports masked gather operation
1585   /// for the given \p DataType.
1586   bool isLegalMaskedGather(Type *DataType) {
1587     return TTI->isLegalMaskedGather(DataType);
1588   }
1589   /// Returns true if the target machine can represent \p V as a masked gather
1590   /// or scatter operation.
1591   bool isLegalGatherOrScatter(Value *V) {
1592     auto *LI = dyn_cast<LoadInst>(V);
1593     auto *SI = dyn_cast<StoreInst>(V);
1594     if (!LI && !SI)
1595       return false;
1596     auto *Ptr = getPointerOperand(V);
1597     auto *Ty = cast<PointerType>(Ptr->getType())->getElementType();
1598     return (LI && isLegalMaskedGather(Ty)) || (SI && isLegalMaskedScatter(Ty));
1599   }
1600 
1601   /// Returns true if vector representation of the instruction \p I
1602   /// requires mask.
1603   bool isMaskRequired(const Instruction *I) { return (MaskedOp.count(I) != 0); }
1604   unsigned getNumStores() const { return LAI->getNumStores(); }
1605   unsigned getNumLoads() const { return LAI->getNumLoads(); }
1606   unsigned getNumPredStores() const { return NumPredStores; }
1607 
1608   /// Returns true if \p I is an instruction that will be scalarized with
1609   /// predication. Such instructions include conditional stores and
1610   /// instructions that may divide by zero.
1611   bool isScalarWithPredication(Instruction *I);
1612 
1613   /// Returns true if \p I is a memory instruction with consecutive memory
1614   /// access that can be widened.
1615   bool memoryInstructionCanBeWidened(Instruction *I, unsigned VF = 1);
1616 
1617   // Returns true if the NoNaN attribute is set on the function.
1618   bool hasFunNoNaNAttr() const { return HasFunNoNaNAttr; }
1619 
1620 private:
1621   /// Check if a single basic block loop is vectorizable.
1622   /// At this point we know that this is a loop with a constant trip count
1623   /// and we only need to check individual instructions.
1624   bool canVectorizeInstrs();
1625 
1626   /// When we vectorize loops we may change the order in which
1627   /// we read and write from memory. This method checks if it is
1628   /// legal to vectorize the code, considering only memory constrains.
1629   /// Returns true if the loop is vectorizable
1630   bool canVectorizeMemory();
1631 
1632   /// Return true if we can vectorize this loop using the IF-conversion
1633   /// transformation.
1634   bool canVectorizeWithIfConvert();
1635 
1636   /// Return true if all of the instructions in the block can be speculatively
1637   /// executed. \p SafePtrs is a list of addresses that are known to be legal
1638   /// and we know that we can read from them without segfault.
1639   bool blockCanBePredicated(BasicBlock *BB, SmallPtrSetImpl<Value *> &SafePtrs);
1640 
1641   /// Updates the vectorization state by adding \p Phi to the inductions list.
1642   /// This can set \p Phi as the main induction of the loop if \p Phi is a
1643   /// better choice for the main induction than the existing one.
1644   void addInductionPhi(PHINode *Phi, const InductionDescriptor &ID,
1645                        SmallPtrSetImpl<Value *> &AllowedExit);
1646 
1647   /// Create an analysis remark that explains why vectorization failed
1648   ///
1649   /// \p RemarkName is the identifier for the remark.  If \p I is passed it is
1650   /// an instruction that prevents vectorization.  Otherwise the loop is used
1651   /// for the location of the remark.  \return the remark object that can be
1652   /// streamed to.
1653   OptimizationRemarkAnalysis
1654   createMissedAnalysis(StringRef RemarkName, Instruction *I = nullptr) const {
1655     return ::createMissedAnalysis(Hints->vectorizeAnalysisPassName(),
1656                                   RemarkName, TheLoop, I);
1657   }
1658 
1659   /// \brief If an access has a symbolic strides, this maps the pointer value to
1660   /// the stride symbol.
1661   const ValueToValueMap *getSymbolicStrides() {
1662     // FIXME: Currently, the set of symbolic strides is sometimes queried before
1663     // it's collected.  This happens from canVectorizeWithIfConvert, when the
1664     // pointer is checked to reference consecutive elements suitable for a
1665     // masked access.
1666     return LAI ? &LAI->getSymbolicStrides() : nullptr;
1667   }
1668 
1669   unsigned NumPredStores;
1670 
1671   /// The loop that we evaluate.
1672   Loop *TheLoop;
1673   /// A wrapper around ScalarEvolution used to add runtime SCEV checks.
1674   /// Applies dynamic knowledge to simplify SCEV expressions in the context
1675   /// of existing SCEV assumptions. The analysis will also add a minimal set
1676   /// of new predicates if this is required to enable vectorization and
1677   /// unrolling.
1678   PredicatedScalarEvolution &PSE;
1679   /// Target Library Info.
1680   TargetLibraryInfo *TLI;
1681   /// Target Transform Info
1682   const TargetTransformInfo *TTI;
1683   /// Dominator Tree.
1684   DominatorTree *DT;
1685   // LoopAccess analysis.
1686   std::function<const LoopAccessInfo &(Loop &)> *GetLAA;
1687   // And the loop-accesses info corresponding to this loop.  This pointer is
1688   // null until canVectorizeMemory sets it up.
1689   const LoopAccessInfo *LAI;
1690   /// Interface to emit optimization remarks.
1691   OptimizationRemarkEmitter *ORE;
1692 
1693   /// The interleave access information contains groups of interleaved accesses
1694   /// with the same stride and close to each other.
1695   InterleavedAccessInfo InterleaveInfo;
1696 
1697   //  ---  vectorization state --- //
1698 
1699   /// Holds the primary induction variable. This is the counter of the
1700   /// loop.
1701   PHINode *PrimaryInduction;
1702   /// Holds the reduction variables.
1703   ReductionList Reductions;
1704   /// Holds all of the induction variables that we found in the loop.
1705   /// Notice that inductions don't need to start at zero and that induction
1706   /// variables can be pointers.
1707   InductionList Inductions;
1708   /// Holds the phi nodes that are first-order recurrences.
1709   RecurrenceSet FirstOrderRecurrences;
1710   /// Holds instructions that need to sink past other instructions to handle
1711   /// first-order recurrences.
1712   DenseMap<Instruction *, Instruction *> SinkAfter;
1713   /// Holds the widest induction type encountered.
1714   Type *WidestIndTy;
1715 
1716   /// Allowed outside users. This holds the induction and reduction
1717   /// vars which can be accessed from outside the loop.
1718   SmallPtrSet<Value *, 4> AllowedExit;
1719 
1720   /// Can we assume the absence of NaNs.
1721   bool HasFunNoNaNAttr;
1722 
1723   /// Vectorization requirements that will go through late-evaluation.
1724   LoopVectorizationRequirements *Requirements;
1725 
1726   /// Used to emit an analysis of any legality issues.
1727   LoopVectorizeHints *Hints;
1728 
1729   /// While vectorizing these instructions we have to generate a
1730   /// call to the appropriate masked intrinsic
1731   SmallPtrSet<const Instruction *, 8> MaskedOp;
1732 };
1733 
1734 /// LoopVectorizationCostModel - estimates the expected speedups due to
1735 /// vectorization.
1736 /// In many cases vectorization is not profitable. This can happen because of
1737 /// a number of reasons. In this class we mainly attempt to predict the
1738 /// expected speedup/slowdowns due to the supported instruction set. We use the
1739 /// TargetTransformInfo to query the different backends for the cost of
1740 /// different operations.
1741 class LoopVectorizationCostModel {
1742 public:
1743   LoopVectorizationCostModel(Loop *L, PredicatedScalarEvolution &PSE,
1744                              LoopInfo *LI, LoopVectorizationLegality *Legal,
1745                              const TargetTransformInfo &TTI,
1746                              const TargetLibraryInfo *TLI, DemandedBits *DB,
1747                              AssumptionCache *AC,
1748                              OptimizationRemarkEmitter *ORE, const Function *F,
1749                              const LoopVectorizeHints *Hints)
1750       : TheLoop(L), PSE(PSE), LI(LI), Legal(Legal), TTI(TTI), TLI(TLI), DB(DB),
1751         AC(AC), ORE(ORE), TheFunction(F), Hints(Hints) {}
1752 
1753   /// \return An upper bound for the vectorization factor, or None if
1754   /// vectorization should be avoided up front.
1755   Optional<unsigned> computeMaxVF(bool OptForSize);
1756 
1757   /// Information about vectorization costs
1758   struct VectorizationFactor {
1759     unsigned Width; // Vector width with best cost
1760     unsigned Cost;  // Cost of the loop with that width
1761   };
1762   /// \return The most profitable vectorization factor and the cost of that VF.
1763   /// This method checks every power of two up to MaxVF. If UserVF is not ZERO
1764   /// then this vectorization factor will be selected if vectorization is
1765   /// possible.
1766   VectorizationFactor selectVectorizationFactor(unsigned MaxVF);
1767 
1768   /// Setup cost-based decisions for user vectorization factor.
1769   void selectUserVectorizationFactor(unsigned UserVF) {
1770     collectUniformsAndScalars(UserVF);
1771     collectInstsToScalarize(UserVF);
1772   }
1773 
1774   /// \return The size (in bits) of the smallest and widest types in the code
1775   /// that needs to be vectorized. We ignore values that remain scalar such as
1776   /// 64 bit loop indices.
1777   std::pair<unsigned, unsigned> getSmallestAndWidestTypes();
1778 
1779   /// \return The desired interleave count.
1780   /// If interleave count has been specified by metadata it will be returned.
1781   /// Otherwise, the interleave count is computed and returned. VF and LoopCost
1782   /// are the selected vectorization factor and the cost of the selected VF.
1783   unsigned selectInterleaveCount(bool OptForSize, unsigned VF,
1784                                  unsigned LoopCost);
1785 
1786   /// Memory access instruction may be vectorized in more than one way.
1787   /// Form of instruction after vectorization depends on cost.
1788   /// This function takes cost-based decisions for Load/Store instructions
1789   /// and collects them in a map. This decisions map is used for building
1790   /// the lists of loop-uniform and loop-scalar instructions.
1791   /// The calculated cost is saved with widening decision in order to
1792   /// avoid redundant calculations.
1793   void setCostBasedWideningDecision(unsigned VF);
1794 
1795   /// \brief A struct that represents some properties of the register usage
1796   /// of a loop.
1797   struct RegisterUsage {
1798     /// Holds the number of loop invariant values that are used in the loop.
1799     unsigned LoopInvariantRegs;
1800     /// Holds the maximum number of concurrent live intervals in the loop.
1801     unsigned MaxLocalUsers;
1802     /// Holds the number of instructions in the loop.
1803     unsigned NumInstructions;
1804   };
1805 
1806   /// \return Returns information about the register usages of the loop for the
1807   /// given vectorization factors.
1808   SmallVector<RegisterUsage, 8> calculateRegisterUsage(ArrayRef<unsigned> VFs);
1809 
1810   /// Collect values we want to ignore in the cost model.
1811   void collectValuesToIgnore();
1812 
1813   /// \returns The smallest bitwidth each instruction can be represented with.
1814   /// The vector equivalents of these instructions should be truncated to this
1815   /// type.
1816   const MapVector<Instruction *, uint64_t> &getMinimalBitwidths() const {
1817     return MinBWs;
1818   }
1819 
1820   /// \returns True if it is more profitable to scalarize instruction \p I for
1821   /// vectorization factor \p VF.
1822   bool isProfitableToScalarize(Instruction *I, unsigned VF) const {
1823     assert(VF > 1 && "Profitable to scalarize relevant only for VF > 1.");
1824     auto Scalars = InstsToScalarize.find(VF);
1825     assert(Scalars != InstsToScalarize.end() &&
1826            "VF not yet analyzed for scalarization profitability");
1827     return Scalars->second.count(I);
1828   }
1829 
1830   /// Returns true if \p I is known to be uniform after vectorization.
1831   bool isUniformAfterVectorization(Instruction *I, unsigned VF) const {
1832     if (VF == 1)
1833       return true;
1834     assert(Uniforms.count(VF) && "VF not yet analyzed for uniformity");
1835     auto UniformsPerVF = Uniforms.find(VF);
1836     return UniformsPerVF->second.count(I);
1837   }
1838 
1839   /// Returns true if \p I is known to be scalar after vectorization.
1840   bool isScalarAfterVectorization(Instruction *I, unsigned VF) const {
1841     if (VF == 1)
1842       return true;
1843     assert(Scalars.count(VF) && "Scalar values are not calculated for VF");
1844     auto ScalarsPerVF = Scalars.find(VF);
1845     return ScalarsPerVF->second.count(I);
1846   }
1847 
1848   /// \returns True if instruction \p I can be truncated to a smaller bitwidth
1849   /// for vectorization factor \p VF.
1850   bool canTruncateToMinimalBitwidth(Instruction *I, unsigned VF) const {
1851     return VF > 1 && MinBWs.count(I) && !isProfitableToScalarize(I, VF) &&
1852            !isScalarAfterVectorization(I, VF);
1853   }
1854 
1855   /// Decision that was taken during cost calculation for memory instruction.
1856   enum InstWidening {
1857     CM_Unknown,
1858     CM_Widen,
1859     CM_Interleave,
1860     CM_GatherScatter,
1861     CM_Scalarize
1862   };
1863 
1864   /// Save vectorization decision \p W and \p Cost taken by the cost model for
1865   /// instruction \p I and vector width \p VF.
1866   void setWideningDecision(Instruction *I, unsigned VF, InstWidening W,
1867                            unsigned Cost) {
1868     assert(VF >= 2 && "Expected VF >=2");
1869     WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost);
1870   }
1871 
1872   /// Save vectorization decision \p W and \p Cost taken by the cost model for
1873   /// interleaving group \p Grp and vector width \p VF.
1874   void setWideningDecision(const InterleaveGroup *Grp, unsigned VF,
1875                            InstWidening W, unsigned Cost) {
1876     assert(VF >= 2 && "Expected VF >=2");
1877     /// Broadcast this decicion to all instructions inside the group.
1878     /// But the cost will be assigned to one instruction only.
1879     for (unsigned i = 0; i < Grp->getFactor(); ++i) {
1880       if (auto *I = Grp->getMember(i)) {
1881         if (Grp->getInsertPos() == I)
1882           WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost);
1883         else
1884           WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, 0);
1885       }
1886     }
1887   }
1888 
1889   /// Return the cost model decision for the given instruction \p I and vector
1890   /// width \p VF. Return CM_Unknown if this instruction did not pass
1891   /// through the cost modeling.
1892   InstWidening getWideningDecision(Instruction *I, unsigned VF) {
1893     assert(VF >= 2 && "Expected VF >=2");
1894     std::pair<Instruction *, unsigned> InstOnVF = std::make_pair(I, VF);
1895     auto Itr = WideningDecisions.find(InstOnVF);
1896     if (Itr == WideningDecisions.end())
1897       return CM_Unknown;
1898     return Itr->second.first;
1899   }
1900 
1901   /// Return the vectorization cost for the given instruction \p I and vector
1902   /// width \p VF.
1903   unsigned getWideningCost(Instruction *I, unsigned VF) {
1904     assert(VF >= 2 && "Expected VF >=2");
1905     std::pair<Instruction *, unsigned> InstOnVF = std::make_pair(I, VF);
1906     assert(WideningDecisions.count(InstOnVF) && "The cost is not calculated");
1907     return WideningDecisions[InstOnVF].second;
1908   }
1909 
1910   /// Return True if instruction \p I is an optimizable truncate whose operand
1911   /// is an induction variable. Such a truncate will be removed by adding a new
1912   /// induction variable with the destination type.
1913   bool isOptimizableIVTruncate(Instruction *I, unsigned VF) {
1914 
1915     // If the instruction is not a truncate, return false.
1916     auto *Trunc = dyn_cast<TruncInst>(I);
1917     if (!Trunc)
1918       return false;
1919 
1920     // Get the source and destination types of the truncate.
1921     Type *SrcTy = ToVectorTy(cast<CastInst>(I)->getSrcTy(), VF);
1922     Type *DestTy = ToVectorTy(cast<CastInst>(I)->getDestTy(), VF);
1923 
1924     // If the truncate is free for the given types, return false. Replacing a
1925     // free truncate with an induction variable would add an induction variable
1926     // update instruction to each iteration of the loop. We exclude from this
1927     // check the primary induction variable since it will need an update
1928     // instruction regardless.
1929     Value *Op = Trunc->getOperand(0);
1930     if (Op != Legal->getPrimaryInduction() && TTI.isTruncateFree(SrcTy, DestTy))
1931       return false;
1932 
1933     // If the truncated value is not an induction variable, return false.
1934     return Legal->isInductionVariable(Op);
1935   }
1936 
1937   /// Collects the instructions to scalarize for each predicated instruction in
1938   /// the loop.
1939   void collectInstsToScalarize(unsigned VF);
1940 
1941   /// Collect Uniform and Scalar values for the given \p VF.
1942   /// The sets depend on CM decision for Load/Store instructions
1943   /// that may be vectorized as interleave, gather-scatter or scalarized.
1944   void collectUniformsAndScalars(unsigned VF) {
1945     // Do the analysis once.
1946     if (VF == 1 || Uniforms.count(VF))
1947       return;
1948     setCostBasedWideningDecision(VF);
1949     collectLoopUniforms(VF);
1950     collectLoopScalars(VF);
1951   }
1952 
1953 private:
1954   /// \return An upper bound for the vectorization factor, larger than zero.
1955   /// One is returned if vectorization should best be avoided due to cost.
1956   unsigned computeFeasibleMaxVF(bool OptForSize, unsigned ConstTripCount);
1957 
1958   /// The vectorization cost is a combination of the cost itself and a boolean
1959   /// indicating whether any of the contributing operations will actually
1960   /// operate on
1961   /// vector values after type legalization in the backend. If this latter value
1962   /// is
1963   /// false, then all operations will be scalarized (i.e. no vectorization has
1964   /// actually taken place).
1965   typedef std::pair<unsigned, bool> VectorizationCostTy;
1966 
1967   /// Returns the expected execution cost. The unit of the cost does
1968   /// not matter because we use the 'cost' units to compare different
1969   /// vector widths. The cost that is returned is *not* normalized by
1970   /// the factor width.
1971   VectorizationCostTy expectedCost(unsigned VF);
1972 
1973   /// Returns the execution time cost of an instruction for a given vector
1974   /// width. Vector width of one means scalar.
1975   VectorizationCostTy getInstructionCost(Instruction *I, unsigned VF);
1976 
1977   /// The cost-computation logic from getInstructionCost which provides
1978   /// the vector type as an output parameter.
1979   unsigned getInstructionCost(Instruction *I, unsigned VF, Type *&VectorTy);
1980 
1981   /// Calculate vectorization cost of memory instruction \p I.
1982   unsigned getMemoryInstructionCost(Instruction *I, unsigned VF);
1983 
1984   /// The cost computation for scalarized memory instruction.
1985   unsigned getMemInstScalarizationCost(Instruction *I, unsigned VF);
1986 
1987   /// The cost computation for interleaving group of memory instructions.
1988   unsigned getInterleaveGroupCost(Instruction *I, unsigned VF);
1989 
1990   /// The cost computation for Gather/Scatter instruction.
1991   unsigned getGatherScatterCost(Instruction *I, unsigned VF);
1992 
1993   /// The cost computation for widening instruction \p I with consecutive
1994   /// memory access.
1995   unsigned getConsecutiveMemOpCost(Instruction *I, unsigned VF);
1996 
1997   /// The cost calculation for Load instruction \p I with uniform pointer -
1998   /// scalar load + broadcast.
1999   unsigned getUniformMemOpCost(Instruction *I, unsigned VF);
2000 
2001   /// Returns whether the instruction is a load or store and will be a emitted
2002   /// as a vector operation.
2003   bool isConsecutiveLoadOrStore(Instruction *I);
2004 
2005   /// Create an analysis remark that explains why vectorization failed
2006   ///
2007   /// \p RemarkName is the identifier for the remark.  \return the remark object
2008   /// that can be streamed to.
2009   OptimizationRemarkAnalysis createMissedAnalysis(StringRef RemarkName) {
2010     return ::createMissedAnalysis(Hints->vectorizeAnalysisPassName(),
2011                                   RemarkName, TheLoop);
2012   }
2013 
2014   /// Map of scalar integer values to the smallest bitwidth they can be legally
2015   /// represented as. The vector equivalents of these values should be truncated
2016   /// to this type.
2017   MapVector<Instruction *, uint64_t> MinBWs;
2018 
2019   /// A type representing the costs for instructions if they were to be
2020   /// scalarized rather than vectorized. The entries are Instruction-Cost
2021   /// pairs.
2022   typedef DenseMap<Instruction *, unsigned> ScalarCostsTy;
2023 
2024   /// A set containing all BasicBlocks that are known to present after
2025   /// vectorization as a predicated block.
2026   SmallPtrSet<BasicBlock *, 4> PredicatedBBsAfterVectorization;
2027 
2028   /// A map holding scalar costs for different vectorization factors. The
2029   /// presence of a cost for an instruction in the mapping indicates that the
2030   /// instruction will be scalarized when vectorizing with the associated
2031   /// vectorization factor. The entries are VF-ScalarCostTy pairs.
2032   DenseMap<unsigned, ScalarCostsTy> InstsToScalarize;
2033 
2034   /// Holds the instructions known to be uniform after vectorization.
2035   /// The data is collected per VF.
2036   DenseMap<unsigned, SmallPtrSet<Instruction *, 4>> Uniforms;
2037 
2038   /// Holds the instructions known to be scalar after vectorization.
2039   /// The data is collected per VF.
2040   DenseMap<unsigned, SmallPtrSet<Instruction *, 4>> Scalars;
2041 
2042   /// Holds the instructions (address computations) that are forced to be
2043   /// scalarized.
2044   DenseMap<unsigned, SmallPtrSet<Instruction *, 4>> ForcedScalars;
2045 
2046   /// Returns the expected difference in cost from scalarizing the expression
2047   /// feeding a predicated instruction \p PredInst. The instructions to
2048   /// scalarize and their scalar costs are collected in \p ScalarCosts. A
2049   /// non-negative return value implies the expression will be scalarized.
2050   /// Currently, only single-use chains are considered for scalarization.
2051   int computePredInstDiscount(Instruction *PredInst, ScalarCostsTy &ScalarCosts,
2052                               unsigned VF);
2053 
2054   /// Collect the instructions that are uniform after vectorization. An
2055   /// instruction is uniform if we represent it with a single scalar value in
2056   /// the vectorized loop corresponding to each vector iteration. Examples of
2057   /// uniform instructions include pointer operands of consecutive or
2058   /// interleaved memory accesses. Note that although uniformity implies an
2059   /// instruction will be scalar, the reverse is not true. In general, a
2060   /// scalarized instruction will be represented by VF scalar values in the
2061   /// vectorized loop, each corresponding to an iteration of the original
2062   /// scalar loop.
2063   void collectLoopUniforms(unsigned VF);
2064 
2065   /// Collect the instructions that are scalar after vectorization. An
2066   /// instruction is scalar if it is known to be uniform or will be scalarized
2067   /// during vectorization. Non-uniform scalarized instructions will be
2068   /// represented by VF values in the vectorized loop, each corresponding to an
2069   /// iteration of the original scalar loop.
2070   void collectLoopScalars(unsigned VF);
2071 
2072   /// Keeps cost model vectorization decision and cost for instructions.
2073   /// Right now it is used for memory instructions only.
2074   typedef DenseMap<std::pair<Instruction *, unsigned>,
2075                    std::pair<InstWidening, unsigned>>
2076       DecisionList;
2077 
2078   DecisionList WideningDecisions;
2079 
2080 public:
2081   /// The loop that we evaluate.
2082   Loop *TheLoop;
2083   /// Predicated scalar evolution analysis.
2084   PredicatedScalarEvolution &PSE;
2085   /// Loop Info analysis.
2086   LoopInfo *LI;
2087   /// Vectorization legality.
2088   LoopVectorizationLegality *Legal;
2089   /// Vector target information.
2090   const TargetTransformInfo &TTI;
2091   /// Target Library Info.
2092   const TargetLibraryInfo *TLI;
2093   /// Demanded bits analysis.
2094   DemandedBits *DB;
2095   /// Assumption cache.
2096   AssumptionCache *AC;
2097   /// Interface to emit optimization remarks.
2098   OptimizationRemarkEmitter *ORE;
2099 
2100   const Function *TheFunction;
2101   /// Loop Vectorize Hint.
2102   const LoopVectorizeHints *Hints;
2103   /// Values to ignore in the cost model.
2104   SmallPtrSet<const Value *, 16> ValuesToIgnore;
2105   /// Values to ignore in the cost model when VF > 1.
2106   SmallPtrSet<const Value *, 16> VecValuesToIgnore;
2107 };
2108 
2109 } // end anonymous namespace
2110 
2111 namespace llvm {
2112 /// InnerLoopVectorizer vectorizes loops which contain only one basic
2113 /// LoopVectorizationPlanner - drives the vectorization process after having
2114 /// passed Legality checks.
2115 /// The planner builds and optimizes the Vectorization Plans which record the
2116 /// decisions how to vectorize the given loop. In particular, represent the
2117 /// control-flow of the vectorized version, the replication of instructions that
2118 /// are to be scalarized, and interleave access groups.
2119 class LoopVectorizationPlanner {
2120   /// The loop that we evaluate.
2121   Loop *OrigLoop;
2122 
2123   /// Loop Info analysis.
2124   LoopInfo *LI;
2125 
2126   /// Target Library Info.
2127   const TargetLibraryInfo *TLI;
2128 
2129   /// Target Transform Info.
2130   const TargetTransformInfo *TTI;
2131 
2132   /// The legality analysis.
2133   LoopVectorizationLegality *Legal;
2134 
2135   /// The profitablity analysis.
2136   LoopVectorizationCostModel &CM;
2137 
2138   SmallVector<VPlan *, 4> VPlans;
2139 
2140   unsigned BestVF;
2141   unsigned BestUF;
2142 
2143 public:
2144   LoopVectorizationPlanner(Loop *L, LoopInfo *LI, const TargetLibraryInfo *TLI,
2145                            const TargetTransformInfo *TTI,
2146                            LoopVectorizationLegality *Legal,
2147                            LoopVectorizationCostModel &CM)
2148       : OrigLoop(L), LI(LI), TLI(TLI), TTI(TTI), Legal(Legal), CM(CM),
2149         BestVF(0), BestUF(0) {}
2150 
2151   ~LoopVectorizationPlanner() {
2152     while (!VPlans.empty()) {
2153       VPlan *Plan = VPlans.back();
2154       VPlans.pop_back();
2155       delete Plan;
2156     }
2157   }
2158 
2159   /// Plan how to best vectorize, return the best VF and its cost.
2160   LoopVectorizationCostModel::VectorizationFactor plan(bool OptForSize,
2161                                                        unsigned UserVF);
2162 
2163   /// Finalize the best decision and dispose of all other VPlans.
2164   void setBestPlan(unsigned VF, unsigned UF);
2165 
2166   /// Generate the IR code for the body of the vectorized loop according to the
2167   /// best selected VPlan.
2168   void executePlan(InnerLoopVectorizer &LB, DominatorTree *DT);
2169 
2170   void printPlans(raw_ostream &O) {
2171     for (VPlan *Plan : VPlans)
2172       O << *Plan;
2173   }
2174 
2175 protected:
2176   /// Collect the instructions from the original loop that would be trivially
2177   /// dead in the vectorized loop if generated.
2178   void collectTriviallyDeadInstructions(
2179       SmallPtrSetImpl<Instruction *> &DeadInstructions);
2180 
2181   /// A range of powers-of-2 vectorization factors with fixed start and
2182   /// adjustable end. The range includes start and excludes end, e.g.,:
2183   /// [1, 9) = {1, 2, 4, 8}
2184   struct VFRange {
2185     const unsigned Start; // A power of 2.
2186     unsigned End; // Need not be a power of 2. If End <= Start range is empty.
2187   };
2188 
2189   /// Test a \p Predicate on a \p Range of VF's. Return the value of applying
2190   /// \p Predicate on Range.Start, possibly decreasing Range.End such that the
2191   /// returned value holds for the entire \p Range.
2192   bool getDecisionAndClampRange(const std::function<bool(unsigned)> &Predicate,
2193                                 VFRange &Range);
2194 
2195   /// Build VPlans for power-of-2 VF's between \p MinVF and \p MaxVF inclusive,
2196   /// according to the information gathered by Legal when it checked if it is
2197   /// legal to vectorize the loop.
2198   void buildVPlans(unsigned MinVF, unsigned MaxVF);
2199 
2200 private:
2201   /// Check if \I belongs to an Interleave Group within the given VF \p Range,
2202   /// \return true in the first returned value if so and false otherwise.
2203   /// Build a new VPInterleaveGroup Recipe if \I is the primary member of an IG
2204   /// for \p Range.Start, and provide it as the second returned value.
2205   /// Note that if \I is an adjunct member of an IG for \p Range.Start, the
2206   /// \return value is <true, nullptr>, as it is handled by another recipe.
2207   /// \p Range.End may be decreased to ensure same decision from \p Range.Start
2208   /// to \p Range.End.
2209   VPInterleaveRecipe *tryToInterleaveMemory(Instruction *I, VFRange &Range);
2210 
2211   /// Check if an induction recipe should be constructed for \I within the given
2212   /// VF \p Range. If so build and return it. If not, return null. \p Range.End
2213   /// may be decreased to ensure same decision from \p Range.Start to
2214   /// \p Range.End.
2215   VPWidenIntOrFpInductionRecipe *tryToOptimizeInduction(Instruction *I,
2216                                                         VFRange &Range);
2217 
2218   /// Check if \I can be widened within the given VF \p Range. If \I can be
2219   /// widened for Range.Start, extend \p LastWidenRecipe to include \p I if
2220   /// possible or else build a new VPWidenRecipe for it, and return the
2221   /// VPWidenRecipe that includes \p I. If \p I cannot be widened for
2222   /// Range.Start \return null. Range.End may be decreased to ensure same
2223   /// decision from \p Range.Start to \p Range.End.
2224   VPWidenRecipe *tryToWiden(Instruction *I, VPWidenRecipe *LastWidenRecipe,
2225                             VFRange &Range);
2226 
2227   /// Build a VPReplicationRecipe for \p I and enclose it within a Region if it
2228   /// is predicated. \return \p VPBB augmented with this new recipe if \p I is
2229   /// not predicated, otherwise \return a new VPBasicBlock that succeeds the new
2230   /// Region. Update the packing decision of predicated instructions if they
2231   /// feed \p I. Range.End may be decreased to ensure same recipe behavior from
2232   /// \p Range.Start to \p Range.End.
2233   VPBasicBlock *handleReplication(
2234       Instruction *I, VFRange &Range, VPBasicBlock *VPBB,
2235       DenseMap<Instruction *, VPReplicateRecipe *> &PredInst2Recipe);
2236 
2237   /// Create a replicating region for instruction \p I that requires
2238   /// predication. \p PredRecipe is a VPReplicateRecipe holding \p I.
2239   VPRegionBlock *createReplicateRegion(Instruction *I,
2240                                        VPRecipeBase *PredRecipe);
2241 
2242   /// Build a VPlan according to the information gathered by Legal. \return a
2243   /// VPlan for vectorization factors \p Range.Start and up to \p Range.End
2244   /// exclusive, possibly decreasing \p Range.End.
2245   VPlan *buildVPlan(VFRange &Range);
2246 };
2247 
2248 } // namespace llvm
2249 
2250 namespace {
2251 
2252 /// \brief This holds vectorization requirements that must be verified late in
2253 /// the process. The requirements are set by legalize and costmodel. Once
2254 /// vectorization has been determined to be possible and profitable the
2255 /// requirements can be verified by looking for metadata or compiler options.
2256 /// For example, some loops require FP commutativity which is only allowed if
2257 /// vectorization is explicitly specified or if the fast-math compiler option
2258 /// has been provided.
2259 /// Late evaluation of these requirements allows helpful diagnostics to be
2260 /// composed that tells the user what need to be done to vectorize the loop. For
2261 /// example, by specifying #pragma clang loop vectorize or -ffast-math. Late
2262 /// evaluation should be used only when diagnostics can generated that can be
2263 /// followed by a non-expert user.
2264 class LoopVectorizationRequirements {
2265 public:
2266   LoopVectorizationRequirements(OptimizationRemarkEmitter &ORE)
2267       : NumRuntimePointerChecks(0), UnsafeAlgebraInst(nullptr), ORE(ORE) {}
2268 
2269   void addUnsafeAlgebraInst(Instruction *I) {
2270     // First unsafe algebra instruction.
2271     if (!UnsafeAlgebraInst)
2272       UnsafeAlgebraInst = I;
2273   }
2274 
2275   void addRuntimePointerChecks(unsigned Num) { NumRuntimePointerChecks = Num; }
2276 
2277   bool doesNotMeet(Function *F, Loop *L, const LoopVectorizeHints &Hints) {
2278     const char *PassName = Hints.vectorizeAnalysisPassName();
2279     bool Failed = false;
2280     if (UnsafeAlgebraInst && !Hints.allowReordering()) {
2281       ORE.emit(
2282           OptimizationRemarkAnalysisFPCommute(PassName, "CantReorderFPOps",
2283                                               UnsafeAlgebraInst->getDebugLoc(),
2284                                               UnsafeAlgebraInst->getParent())
2285           << "loop not vectorized: cannot prove it is safe to reorder "
2286              "floating-point operations");
2287       Failed = true;
2288     }
2289 
2290     // Test if runtime memcheck thresholds are exceeded.
2291     bool PragmaThresholdReached =
2292         NumRuntimePointerChecks > PragmaVectorizeMemoryCheckThreshold;
2293     bool ThresholdReached =
2294         NumRuntimePointerChecks > VectorizerParams::RuntimeMemoryCheckThreshold;
2295     if ((ThresholdReached && !Hints.allowReordering()) ||
2296         PragmaThresholdReached) {
2297       ORE.emit(OptimizationRemarkAnalysisAliasing(PassName, "CantReorderMemOps",
2298                                                   L->getStartLoc(),
2299                                                   L->getHeader())
2300                << "loop not vectorized: cannot prove it is safe to reorder "
2301                   "memory operations");
2302       DEBUG(dbgs() << "LV: Too many memory checks needed.\n");
2303       Failed = true;
2304     }
2305 
2306     return Failed;
2307   }
2308 
2309 private:
2310   unsigned NumRuntimePointerChecks;
2311   Instruction *UnsafeAlgebraInst;
2312 
2313   /// Interface to emit optimization remarks.
2314   OptimizationRemarkEmitter &ORE;
2315 };
2316 
2317 static void addAcyclicInnerLoop(Loop &L, SmallVectorImpl<Loop *> &V) {
2318   if (L.empty()) {
2319     if (!hasCyclesInLoopBody(L))
2320       V.push_back(&L);
2321     return;
2322   }
2323   for (Loop *InnerL : L)
2324     addAcyclicInnerLoop(*InnerL, V);
2325 }
2326 
2327 /// The LoopVectorize Pass.
2328 struct LoopVectorize : public FunctionPass {
2329   /// Pass identification, replacement for typeid
2330   static char ID;
2331 
2332   explicit LoopVectorize(bool NoUnrolling = false, bool AlwaysVectorize = true)
2333       : FunctionPass(ID) {
2334     Impl.DisableUnrolling = NoUnrolling;
2335     Impl.AlwaysVectorize = AlwaysVectorize;
2336     initializeLoopVectorizePass(*PassRegistry::getPassRegistry());
2337   }
2338 
2339   LoopVectorizePass Impl;
2340 
2341   bool runOnFunction(Function &F) override {
2342     if (skipFunction(F))
2343       return false;
2344 
2345     auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
2346     auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
2347     auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
2348     auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
2349     auto *BFI = &getAnalysis<BlockFrequencyInfoWrapperPass>().getBFI();
2350     auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
2351     auto *TLI = TLIP ? &TLIP->getTLI() : nullptr;
2352     auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
2353     auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
2354     auto *LAA = &getAnalysis<LoopAccessLegacyAnalysis>();
2355     auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits();
2356     auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE();
2357 
2358     std::function<const LoopAccessInfo &(Loop &)> GetLAA =
2359         [&](Loop &L) -> const LoopAccessInfo & { return LAA->getInfo(&L); };
2360 
2361     return Impl.runImpl(F, *SE, *LI, *TTI, *DT, *BFI, TLI, *DB, *AA, *AC,
2362                         GetLAA, *ORE);
2363   }
2364 
2365   void getAnalysisUsage(AnalysisUsage &AU) const override {
2366     AU.addRequired<AssumptionCacheTracker>();
2367     AU.addRequired<BlockFrequencyInfoWrapperPass>();
2368     AU.addRequired<DominatorTreeWrapperPass>();
2369     AU.addRequired<LoopInfoWrapperPass>();
2370     AU.addRequired<ScalarEvolutionWrapperPass>();
2371     AU.addRequired<TargetTransformInfoWrapperPass>();
2372     AU.addRequired<AAResultsWrapperPass>();
2373     AU.addRequired<LoopAccessLegacyAnalysis>();
2374     AU.addRequired<DemandedBitsWrapperPass>();
2375     AU.addRequired<OptimizationRemarkEmitterWrapperPass>();
2376     AU.addPreserved<LoopInfoWrapperPass>();
2377     AU.addPreserved<DominatorTreeWrapperPass>();
2378     AU.addPreserved<BasicAAWrapperPass>();
2379     AU.addPreserved<GlobalsAAWrapperPass>();
2380   }
2381 };
2382 
2383 } // end anonymous namespace
2384 
2385 //===----------------------------------------------------------------------===//
2386 // Implementation of LoopVectorizationLegality, InnerLoopVectorizer and
2387 // LoopVectorizationCostModel and LoopVectorizationPlanner.
2388 //===----------------------------------------------------------------------===//
2389 
2390 Value *InnerLoopVectorizer::getBroadcastInstrs(Value *V) {
2391   // We need to place the broadcast of invariant variables outside the loop.
2392   Instruction *Instr = dyn_cast<Instruction>(V);
2393   bool NewInstr = (Instr && Instr->getParent() == LoopVectorBody);
2394   bool Invariant = OrigLoop->isLoopInvariant(V) && !NewInstr;
2395 
2396   // Place the code for broadcasting invariant variables in the new preheader.
2397   IRBuilder<>::InsertPointGuard Guard(Builder);
2398   if (Invariant)
2399     Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
2400 
2401   // Broadcast the scalar into all locations in the vector.
2402   Value *Shuf = Builder.CreateVectorSplat(VF, V, "broadcast");
2403 
2404   return Shuf;
2405 }
2406 
2407 void InnerLoopVectorizer::createVectorIntOrFpInductionPHI(
2408     const InductionDescriptor &II, Value *Step, Instruction *EntryVal) {
2409   Value *Start = II.getStartValue();
2410 
2411   // Construct the initial value of the vector IV in the vector loop preheader
2412   auto CurrIP = Builder.saveIP();
2413   Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
2414   if (isa<TruncInst>(EntryVal)) {
2415     assert(Start->getType()->isIntegerTy() &&
2416            "Truncation requires an integer type");
2417     auto *TruncType = cast<IntegerType>(EntryVal->getType());
2418     Step = Builder.CreateTrunc(Step, TruncType);
2419     Start = Builder.CreateCast(Instruction::Trunc, Start, TruncType);
2420   }
2421   Value *SplatStart = Builder.CreateVectorSplat(VF, Start);
2422   Value *SteppedStart =
2423       getStepVector(SplatStart, 0, Step, II.getInductionOpcode());
2424 
2425   // We create vector phi nodes for both integer and floating-point induction
2426   // variables. Here, we determine the kind of arithmetic we will perform.
2427   Instruction::BinaryOps AddOp;
2428   Instruction::BinaryOps MulOp;
2429   if (Step->getType()->isIntegerTy()) {
2430     AddOp = Instruction::Add;
2431     MulOp = Instruction::Mul;
2432   } else {
2433     AddOp = II.getInductionOpcode();
2434     MulOp = Instruction::FMul;
2435   }
2436 
2437   // Multiply the vectorization factor by the step using integer or
2438   // floating-point arithmetic as appropriate.
2439   Value *ConstVF = getSignedIntOrFpConstant(Step->getType(), VF);
2440   Value *Mul = addFastMathFlag(Builder.CreateBinOp(MulOp, Step, ConstVF));
2441 
2442   // Create a vector splat to use in the induction update.
2443   //
2444   // FIXME: If the step is non-constant, we create the vector splat with
2445   //        IRBuilder. IRBuilder can constant-fold the multiply, but it doesn't
2446   //        handle a constant vector splat.
2447   Value *SplatVF = isa<Constant>(Mul)
2448                        ? ConstantVector::getSplat(VF, cast<Constant>(Mul))
2449                        : Builder.CreateVectorSplat(VF, Mul);
2450   Builder.restoreIP(CurrIP);
2451 
2452   // We may need to add the step a number of times, depending on the unroll
2453   // factor. The last of those goes into the PHI.
2454   PHINode *VecInd = PHINode::Create(SteppedStart->getType(), 2, "vec.ind",
2455                                     &*LoopVectorBody->getFirstInsertionPt());
2456   Instruction *LastInduction = VecInd;
2457   for (unsigned Part = 0; Part < UF; ++Part) {
2458     VectorLoopValueMap.setVectorValue(EntryVal, Part, LastInduction);
2459     if (isa<TruncInst>(EntryVal))
2460       addMetadata(LastInduction, EntryVal);
2461     LastInduction = cast<Instruction>(addFastMathFlag(
2462         Builder.CreateBinOp(AddOp, LastInduction, SplatVF, "step.add")));
2463   }
2464 
2465   // Move the last step to the end of the latch block. This ensures consistent
2466   // placement of all induction updates.
2467   auto *LoopVectorLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch();
2468   auto *Br = cast<BranchInst>(LoopVectorLatch->getTerminator());
2469   auto *ICmp = cast<Instruction>(Br->getCondition());
2470   LastInduction->moveBefore(ICmp);
2471   LastInduction->setName("vec.ind.next");
2472 
2473   VecInd->addIncoming(SteppedStart, LoopVectorPreHeader);
2474   VecInd->addIncoming(LastInduction, LoopVectorLatch);
2475 }
2476 
2477 bool InnerLoopVectorizer::shouldScalarizeInstruction(Instruction *I) const {
2478   return Cost->isScalarAfterVectorization(I, VF) ||
2479          Cost->isProfitableToScalarize(I, VF);
2480 }
2481 
2482 bool InnerLoopVectorizer::needsScalarInduction(Instruction *IV) const {
2483   if (shouldScalarizeInstruction(IV))
2484     return true;
2485   auto isScalarInst = [&](User *U) -> bool {
2486     auto *I = cast<Instruction>(U);
2487     return (OrigLoop->contains(I) && shouldScalarizeInstruction(I));
2488   };
2489   return any_of(IV->users(), isScalarInst);
2490 }
2491 
2492 void InnerLoopVectorizer::widenIntOrFpInduction(PHINode *IV, TruncInst *Trunc) {
2493 
2494   assert((IV->getType()->isIntegerTy() || IV != OldInduction) &&
2495          "Primary induction variable must have an integer type");
2496 
2497   auto II = Legal->getInductionVars()->find(IV);
2498   assert(II != Legal->getInductionVars()->end() && "IV is not an induction");
2499 
2500   auto ID = II->second;
2501   assert(IV->getType() == ID.getStartValue()->getType() && "Types must match");
2502 
2503   // The scalar value to broadcast. This will be derived from the canonical
2504   // induction variable.
2505   Value *ScalarIV = nullptr;
2506 
2507   // The value from the original loop to which we are mapping the new induction
2508   // variable.
2509   Instruction *EntryVal = Trunc ? cast<Instruction>(Trunc) : IV;
2510 
2511   // True if we have vectorized the induction variable.
2512   auto VectorizedIV = false;
2513 
2514   // Determine if we want a scalar version of the induction variable. This is
2515   // true if the induction variable itself is not widened, or if it has at
2516   // least one user in the loop that is not widened.
2517   auto NeedsScalarIV = VF > 1 && needsScalarInduction(EntryVal);
2518 
2519   // Generate code for the induction step. Note that induction steps are
2520   // required to be loop-invariant
2521   assert(PSE.getSE()->isLoopInvariant(ID.getStep(), OrigLoop) &&
2522          "Induction step should be loop invariant");
2523   auto &DL = OrigLoop->getHeader()->getModule()->getDataLayout();
2524   Value *Step = nullptr;
2525   if (PSE.getSE()->isSCEVable(IV->getType())) {
2526     SCEVExpander Exp(*PSE.getSE(), DL, "induction");
2527     Step = Exp.expandCodeFor(ID.getStep(), ID.getStep()->getType(),
2528                              LoopVectorPreHeader->getTerminator());
2529   } else {
2530     Step = cast<SCEVUnknown>(ID.getStep())->getValue();
2531   }
2532 
2533   // Try to create a new independent vector induction variable. If we can't
2534   // create the phi node, we will splat the scalar induction variable in each
2535   // loop iteration.
2536   if (VF > 1 && !shouldScalarizeInstruction(EntryVal)) {
2537     createVectorIntOrFpInductionPHI(ID, Step, EntryVal);
2538     VectorizedIV = true;
2539   }
2540 
2541   // If we haven't yet vectorized the induction variable, or if we will create
2542   // a scalar one, we need to define the scalar induction variable and step
2543   // values. If we were given a truncation type, truncate the canonical
2544   // induction variable and step. Otherwise, derive these values from the
2545   // induction descriptor.
2546   if (!VectorizedIV || NeedsScalarIV) {
2547     ScalarIV = Induction;
2548     if (IV != OldInduction) {
2549       ScalarIV = IV->getType()->isIntegerTy()
2550                      ? Builder.CreateSExtOrTrunc(Induction, IV->getType())
2551                      : Builder.CreateCast(Instruction::SIToFP, Induction,
2552                                           IV->getType());
2553       ScalarIV = ID.transform(Builder, ScalarIV, PSE.getSE(), DL);
2554       ScalarIV->setName("offset.idx");
2555     }
2556     if (Trunc) {
2557       auto *TruncType = cast<IntegerType>(Trunc->getType());
2558       assert(Step->getType()->isIntegerTy() &&
2559              "Truncation requires an integer step");
2560       ScalarIV = Builder.CreateTrunc(ScalarIV, TruncType);
2561       Step = Builder.CreateTrunc(Step, TruncType);
2562     }
2563   }
2564 
2565   // If we haven't yet vectorized the induction variable, splat the scalar
2566   // induction variable, and build the necessary step vectors.
2567   if (!VectorizedIV) {
2568     Value *Broadcasted = getBroadcastInstrs(ScalarIV);
2569     for (unsigned Part = 0; Part < UF; ++Part) {
2570       Value *EntryPart =
2571           getStepVector(Broadcasted, VF * Part, Step, ID.getInductionOpcode());
2572       VectorLoopValueMap.setVectorValue(EntryVal, Part, EntryPart);
2573       if (Trunc)
2574         addMetadata(EntryPart, Trunc);
2575     }
2576   }
2577 
2578   // If an induction variable is only used for counting loop iterations or
2579   // calculating addresses, it doesn't need to be widened. Create scalar steps
2580   // that can be used by instructions we will later scalarize. Note that the
2581   // addition of the scalar steps will not increase the number of instructions
2582   // in the loop in the common case prior to InstCombine. We will be trading
2583   // one vector extract for each scalar step.
2584   if (NeedsScalarIV)
2585     buildScalarSteps(ScalarIV, Step, EntryVal, ID);
2586 }
2587 
2588 Value *InnerLoopVectorizer::getStepVector(Value *Val, int StartIdx, Value *Step,
2589                                           Instruction::BinaryOps BinOp) {
2590   // Create and check the types.
2591   assert(Val->getType()->isVectorTy() && "Must be a vector");
2592   int VLen = Val->getType()->getVectorNumElements();
2593 
2594   Type *STy = Val->getType()->getScalarType();
2595   assert((STy->isIntegerTy() || STy->isFloatingPointTy()) &&
2596          "Induction Step must be an integer or FP");
2597   assert(Step->getType() == STy && "Step has wrong type");
2598 
2599   SmallVector<Constant *, 8> Indices;
2600 
2601   if (STy->isIntegerTy()) {
2602     // Create a vector of consecutive numbers from zero to VF.
2603     for (int i = 0; i < VLen; ++i)
2604       Indices.push_back(ConstantInt::get(STy, StartIdx + i));
2605 
2606     // Add the consecutive indices to the vector value.
2607     Constant *Cv = ConstantVector::get(Indices);
2608     assert(Cv->getType() == Val->getType() && "Invalid consecutive vec");
2609     Step = Builder.CreateVectorSplat(VLen, Step);
2610     assert(Step->getType() == Val->getType() && "Invalid step vec");
2611     // FIXME: The newly created binary instructions should contain nsw/nuw flags,
2612     // which can be found from the original scalar operations.
2613     Step = Builder.CreateMul(Cv, Step);
2614     return Builder.CreateAdd(Val, Step, "induction");
2615   }
2616 
2617   // Floating point induction.
2618   assert((BinOp == Instruction::FAdd || BinOp == Instruction::FSub) &&
2619          "Binary Opcode should be specified for FP induction");
2620   // Create a vector of consecutive numbers from zero to VF.
2621   for (int i = 0; i < VLen; ++i)
2622     Indices.push_back(ConstantFP::get(STy, (double)(StartIdx + i)));
2623 
2624   // Add the consecutive indices to the vector value.
2625   Constant *Cv = ConstantVector::get(Indices);
2626 
2627   Step = Builder.CreateVectorSplat(VLen, Step);
2628 
2629   // Floating point operations had to be 'fast' to enable the induction.
2630   FastMathFlags Flags;
2631   Flags.setUnsafeAlgebra();
2632 
2633   Value *MulOp = Builder.CreateFMul(Cv, Step);
2634   if (isa<Instruction>(MulOp))
2635     // Have to check, MulOp may be a constant
2636     cast<Instruction>(MulOp)->setFastMathFlags(Flags);
2637 
2638   Value *BOp = Builder.CreateBinOp(BinOp, Val, MulOp, "induction");
2639   if (isa<Instruction>(BOp))
2640     cast<Instruction>(BOp)->setFastMathFlags(Flags);
2641   return BOp;
2642 }
2643 
2644 void InnerLoopVectorizer::buildScalarSteps(Value *ScalarIV, Value *Step,
2645                                            Value *EntryVal,
2646                                            const InductionDescriptor &ID) {
2647 
2648   // We shouldn't have to build scalar steps if we aren't vectorizing.
2649   assert(VF > 1 && "VF should be greater than one");
2650 
2651   // Get the value type and ensure it and the step have the same integer type.
2652   Type *ScalarIVTy = ScalarIV->getType()->getScalarType();
2653   assert(ScalarIVTy == Step->getType() &&
2654          "Val and Step should have the same type");
2655 
2656   // We build scalar steps for both integer and floating-point induction
2657   // variables. Here, we determine the kind of arithmetic we will perform.
2658   Instruction::BinaryOps AddOp;
2659   Instruction::BinaryOps MulOp;
2660   if (ScalarIVTy->isIntegerTy()) {
2661     AddOp = Instruction::Add;
2662     MulOp = Instruction::Mul;
2663   } else {
2664     AddOp = ID.getInductionOpcode();
2665     MulOp = Instruction::FMul;
2666   }
2667 
2668   // Determine the number of scalars we need to generate for each unroll
2669   // iteration. If EntryVal is uniform, we only need to generate the first
2670   // lane. Otherwise, we generate all VF values.
2671   unsigned Lanes =
2672       Cost->isUniformAfterVectorization(cast<Instruction>(EntryVal), VF) ? 1
2673                                                                          : VF;
2674   // Compute the scalar steps and save the results in VectorLoopValueMap.
2675   for (unsigned Part = 0; Part < UF; ++Part) {
2676     for (unsigned Lane = 0; Lane < Lanes; ++Lane) {
2677       auto *StartIdx = getSignedIntOrFpConstant(ScalarIVTy, VF * Part + Lane);
2678       auto *Mul = addFastMathFlag(Builder.CreateBinOp(MulOp, StartIdx, Step));
2679       auto *Add = addFastMathFlag(Builder.CreateBinOp(AddOp, ScalarIV, Mul));
2680       VectorLoopValueMap.setScalarValue(EntryVal, {Part, Lane}, Add);
2681     }
2682   }
2683 }
2684 
2685 int LoopVectorizationLegality::isConsecutivePtr(Value *Ptr) {
2686 
2687   const ValueToValueMap &Strides = getSymbolicStrides() ? *getSymbolicStrides() :
2688     ValueToValueMap();
2689 
2690   int Stride = getPtrStride(PSE, Ptr, TheLoop, Strides, true, false);
2691   if (Stride == 1 || Stride == -1)
2692     return Stride;
2693   return 0;
2694 }
2695 
2696 bool LoopVectorizationLegality::isUniform(Value *V) {
2697   return LAI->isUniform(V);
2698 }
2699 
2700 Value *InnerLoopVectorizer::getOrCreateVectorValue(Value *V, unsigned Part) {
2701   assert(V != Induction && "The new induction variable should not be used.");
2702   assert(!V->getType()->isVectorTy() && "Can't widen a vector");
2703   assert(!V->getType()->isVoidTy() && "Type does not produce a value");
2704 
2705   // If we have a stride that is replaced by one, do it here.
2706   if (Legal->hasStride(V))
2707     V = ConstantInt::get(V->getType(), 1);
2708 
2709   // If we have a vector mapped to this value, return it.
2710   if (VectorLoopValueMap.hasVectorValue(V, Part))
2711     return VectorLoopValueMap.getVectorValue(V, Part);
2712 
2713   // If the value has not been vectorized, check if it has been scalarized
2714   // instead. If it has been scalarized, and we actually need the value in
2715   // vector form, we will construct the vector values on demand.
2716   if (VectorLoopValueMap.hasAnyScalarValue(V)) {
2717 
2718     Value *ScalarValue = VectorLoopValueMap.getScalarValue(V, {Part, 0});
2719 
2720     // If we've scalarized a value, that value should be an instruction.
2721     auto *I = cast<Instruction>(V);
2722 
2723     // If we aren't vectorizing, we can just copy the scalar map values over to
2724     // the vector map.
2725     if (VF == 1) {
2726       VectorLoopValueMap.setVectorValue(V, Part, ScalarValue);
2727       return ScalarValue;
2728     }
2729 
2730     // Get the last scalar instruction we generated for V and Part. If the value
2731     // is known to be uniform after vectorization, this corresponds to lane zero
2732     // of the Part unroll iteration. Otherwise, the last instruction is the one
2733     // we created for the last vector lane of the Part unroll iteration.
2734     unsigned LastLane = Cost->isUniformAfterVectorization(I, VF) ? 0 : VF - 1;
2735     auto *LastInst = cast<Instruction>(
2736         VectorLoopValueMap.getScalarValue(V, {Part, LastLane}));
2737 
2738     // Set the insert point after the last scalarized instruction. This ensures
2739     // the insertelement sequence will directly follow the scalar definitions.
2740     auto OldIP = Builder.saveIP();
2741     auto NewIP = std::next(BasicBlock::iterator(LastInst));
2742     Builder.SetInsertPoint(&*NewIP);
2743 
2744     // However, if we are vectorizing, we need to construct the vector values.
2745     // If the value is known to be uniform after vectorization, we can just
2746     // broadcast the scalar value corresponding to lane zero for each unroll
2747     // iteration. Otherwise, we construct the vector values using insertelement
2748     // instructions. Since the resulting vectors are stored in
2749     // VectorLoopValueMap, we will only generate the insertelements once.
2750     Value *VectorValue = nullptr;
2751     if (Cost->isUniformAfterVectorization(I, VF)) {
2752       VectorValue = getBroadcastInstrs(ScalarValue);
2753       VectorLoopValueMap.setVectorValue(V, Part, VectorValue);
2754     } else {
2755       // Initialize packing with insertelements to start from undef.
2756       Value *Undef = UndefValue::get(VectorType::get(V->getType(), VF));
2757       VectorLoopValueMap.setVectorValue(V, Part, Undef);
2758       for (unsigned Lane = 0; Lane < VF; ++Lane)
2759         packScalarIntoVectorValue(V, {Part, Lane});
2760       VectorValue = VectorLoopValueMap.getVectorValue(V, Part);
2761     }
2762     Builder.restoreIP(OldIP);
2763     return VectorValue;
2764   }
2765 
2766   // If this scalar is unknown, assume that it is a constant or that it is
2767   // loop invariant. Broadcast V and save the value for future uses.
2768   Value *B = getBroadcastInstrs(V);
2769   VectorLoopValueMap.setVectorValue(V, Part, B);
2770   return B;
2771 }
2772 
2773 Value *
2774 InnerLoopVectorizer::getOrCreateScalarValue(Value *V,
2775                                             const VPIteration &Instance) {
2776   // If the value is not an instruction contained in the loop, it should
2777   // already be scalar.
2778   if (OrigLoop->isLoopInvariant(V))
2779     return V;
2780 
2781   assert(Instance.Lane > 0
2782              ? !Cost->isUniformAfterVectorization(cast<Instruction>(V), VF)
2783              : true && "Uniform values only have lane zero");
2784 
2785   // If the value from the original loop has not been vectorized, it is
2786   // represented by UF x VF scalar values in the new loop. Return the requested
2787   // scalar value.
2788   if (VectorLoopValueMap.hasScalarValue(V, Instance))
2789     return VectorLoopValueMap.getScalarValue(V, Instance);
2790 
2791   // If the value has not been scalarized, get its entry in VectorLoopValueMap
2792   // for the given unroll part. If this entry is not a vector type (i.e., the
2793   // vectorization factor is one), there is no need to generate an
2794   // extractelement instruction.
2795   auto *U = getOrCreateVectorValue(V, Instance.Part);
2796   if (!U->getType()->isVectorTy()) {
2797     assert(VF == 1 && "Value not scalarized has non-vector type");
2798     return U;
2799   }
2800 
2801   // Otherwise, the value from the original loop has been vectorized and is
2802   // represented by UF vector values. Extract and return the requested scalar
2803   // value from the appropriate vector lane.
2804   return Builder.CreateExtractElement(U, Builder.getInt32(Instance.Lane));
2805 }
2806 
2807 void InnerLoopVectorizer::packScalarIntoVectorValue(
2808     Value *V, const VPIteration &Instance) {
2809   assert(V != Induction && "The new induction variable should not be used.");
2810   assert(!V->getType()->isVectorTy() && "Can't pack a vector");
2811   assert(!V->getType()->isVoidTy() && "Type does not produce a value");
2812 
2813   Value *ScalarInst = VectorLoopValueMap.getScalarValue(V, Instance);
2814   Value *VectorValue = VectorLoopValueMap.getVectorValue(V, Instance.Part);
2815   VectorValue = Builder.CreateInsertElement(VectorValue, ScalarInst,
2816                                             Builder.getInt32(Instance.Lane));
2817   VectorLoopValueMap.resetVectorValue(V, Instance.Part, VectorValue);
2818 }
2819 
2820 Value *InnerLoopVectorizer::reverseVector(Value *Vec) {
2821   assert(Vec->getType()->isVectorTy() && "Invalid type");
2822   SmallVector<Constant *, 8> ShuffleMask;
2823   for (unsigned i = 0; i < VF; ++i)
2824     ShuffleMask.push_back(Builder.getInt32(VF - i - 1));
2825 
2826   return Builder.CreateShuffleVector(Vec, UndefValue::get(Vec->getType()),
2827                                      ConstantVector::get(ShuffleMask),
2828                                      "reverse");
2829 }
2830 
2831 // Try to vectorize the interleave group that \p Instr belongs to.
2832 //
2833 // E.g. Translate following interleaved load group (factor = 3):
2834 //   for (i = 0; i < N; i+=3) {
2835 //     R = Pic[i];             // Member of index 0
2836 //     G = Pic[i+1];           // Member of index 1
2837 //     B = Pic[i+2];           // Member of index 2
2838 //     ... // do something to R, G, B
2839 //   }
2840 // To:
2841 //   %wide.vec = load <12 x i32>                       ; Read 4 tuples of R,G,B
2842 //   %R.vec = shuffle %wide.vec, undef, <0, 3, 6, 9>   ; R elements
2843 //   %G.vec = shuffle %wide.vec, undef, <1, 4, 7, 10>  ; G elements
2844 //   %B.vec = shuffle %wide.vec, undef, <2, 5, 8, 11>  ; B elements
2845 //
2846 // Or translate following interleaved store group (factor = 3):
2847 //   for (i = 0; i < N; i+=3) {
2848 //     ... do something to R, G, B
2849 //     Pic[i]   = R;           // Member of index 0
2850 //     Pic[i+1] = G;           // Member of index 1
2851 //     Pic[i+2] = B;           // Member of index 2
2852 //   }
2853 // To:
2854 //   %R_G.vec = shuffle %R.vec, %G.vec, <0, 1, 2, ..., 7>
2855 //   %B_U.vec = shuffle %B.vec, undef, <0, 1, 2, 3, u, u, u, u>
2856 //   %interleaved.vec = shuffle %R_G.vec, %B_U.vec,
2857 //        <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11>    ; Interleave R,G,B elements
2858 //   store <12 x i32> %interleaved.vec              ; Write 4 tuples of R,G,B
2859 void InnerLoopVectorizer::vectorizeInterleaveGroup(Instruction *Instr) {
2860   const InterleaveGroup *Group = Legal->getInterleavedAccessGroup(Instr);
2861   assert(Group && "Fail to get an interleaved access group.");
2862 
2863   // Skip if current instruction is not the insert position.
2864   if (Instr != Group->getInsertPos())
2865     return;
2866 
2867   const DataLayout &DL = Instr->getModule()->getDataLayout();
2868   Value *Ptr = getPointerOperand(Instr);
2869 
2870   // Prepare for the vector type of the interleaved load/store.
2871   Type *ScalarTy = getMemInstValueType(Instr);
2872   unsigned InterleaveFactor = Group->getFactor();
2873   Type *VecTy = VectorType::get(ScalarTy, InterleaveFactor * VF);
2874   Type *PtrTy = VecTy->getPointerTo(getMemInstAddressSpace(Instr));
2875 
2876   // Prepare for the new pointers.
2877   setDebugLocFromInst(Builder, Ptr);
2878   SmallVector<Value *, 2> NewPtrs;
2879   unsigned Index = Group->getIndex(Instr);
2880 
2881   // If the group is reverse, adjust the index to refer to the last vector lane
2882   // instead of the first. We adjust the index from the first vector lane,
2883   // rather than directly getting the pointer for lane VF - 1, because the
2884   // pointer operand of the interleaved access is supposed to be uniform. For
2885   // uniform instructions, we're only required to generate a value for the
2886   // first vector lane in each unroll iteration.
2887   if (Group->isReverse())
2888     Index += (VF - 1) * Group->getFactor();
2889 
2890   for (unsigned Part = 0; Part < UF; Part++) {
2891     Value *NewPtr = getOrCreateScalarValue(Ptr, {Part, 0});
2892 
2893     // Notice current instruction could be any index. Need to adjust the address
2894     // to the member of index 0.
2895     //
2896     // E.g.  a = A[i+1];     // Member of index 1 (Current instruction)
2897     //       b = A[i];       // Member of index 0
2898     // Current pointer is pointed to A[i+1], adjust it to A[i].
2899     //
2900     // E.g.  A[i+1] = a;     // Member of index 1
2901     //       A[i]   = b;     // Member of index 0
2902     //       A[i+2] = c;     // Member of index 2 (Current instruction)
2903     // Current pointer is pointed to A[i+2], adjust it to A[i].
2904     NewPtr = Builder.CreateGEP(NewPtr, Builder.getInt32(-Index));
2905 
2906     // Cast to the vector pointer type.
2907     NewPtrs.push_back(Builder.CreateBitCast(NewPtr, PtrTy));
2908   }
2909 
2910   setDebugLocFromInst(Builder, Instr);
2911   Value *UndefVec = UndefValue::get(VecTy);
2912 
2913   // Vectorize the interleaved load group.
2914   if (isa<LoadInst>(Instr)) {
2915 
2916     // For each unroll part, create a wide load for the group.
2917     SmallVector<Value *, 2> NewLoads;
2918     for (unsigned Part = 0; Part < UF; Part++) {
2919       auto *NewLoad = Builder.CreateAlignedLoad(
2920           NewPtrs[Part], Group->getAlignment(), "wide.vec");
2921       addMetadata(NewLoad, Instr);
2922       NewLoads.push_back(NewLoad);
2923     }
2924 
2925     // For each member in the group, shuffle out the appropriate data from the
2926     // wide loads.
2927     for (unsigned I = 0; I < InterleaveFactor; ++I) {
2928       Instruction *Member = Group->getMember(I);
2929 
2930       // Skip the gaps in the group.
2931       if (!Member)
2932         continue;
2933 
2934       Constant *StrideMask = createStrideMask(Builder, I, InterleaveFactor, VF);
2935       for (unsigned Part = 0; Part < UF; Part++) {
2936         Value *StridedVec = Builder.CreateShuffleVector(
2937             NewLoads[Part], UndefVec, StrideMask, "strided.vec");
2938 
2939         // If this member has different type, cast the result type.
2940         if (Member->getType() != ScalarTy) {
2941           VectorType *OtherVTy = VectorType::get(Member->getType(), VF);
2942           StridedVec = createBitOrPointerCast(StridedVec, OtherVTy, DL);
2943         }
2944 
2945         if (Group->isReverse())
2946           StridedVec = reverseVector(StridedVec);
2947 
2948         VectorLoopValueMap.setVectorValue(Member, Part, StridedVec);
2949       }
2950     }
2951     return;
2952   }
2953 
2954   // The sub vector type for current instruction.
2955   VectorType *SubVT = VectorType::get(ScalarTy, VF);
2956 
2957   // Vectorize the interleaved store group.
2958   for (unsigned Part = 0; Part < UF; Part++) {
2959     // Collect the stored vector from each member.
2960     SmallVector<Value *, 4> StoredVecs;
2961     for (unsigned i = 0; i < InterleaveFactor; i++) {
2962       // Interleaved store group doesn't allow a gap, so each index has a member
2963       Instruction *Member = Group->getMember(i);
2964       assert(Member && "Fail to get a member from an interleaved store group");
2965 
2966       Value *StoredVec = getOrCreateVectorValue(
2967           cast<StoreInst>(Member)->getValueOperand(), Part);
2968       if (Group->isReverse())
2969         StoredVec = reverseVector(StoredVec);
2970 
2971       // If this member has different type, cast it to a unified type.
2972 
2973       if (StoredVec->getType() != SubVT)
2974         StoredVec = createBitOrPointerCast(StoredVec, SubVT, DL);
2975 
2976       StoredVecs.push_back(StoredVec);
2977     }
2978 
2979     // Concatenate all vectors into a wide vector.
2980     Value *WideVec = concatenateVectors(Builder, StoredVecs);
2981 
2982     // Interleave the elements in the wide vector.
2983     Constant *IMask = createInterleaveMask(Builder, VF, InterleaveFactor);
2984     Value *IVec = Builder.CreateShuffleVector(WideVec, UndefVec, IMask,
2985                                               "interleaved.vec");
2986 
2987     Instruction *NewStoreInstr =
2988         Builder.CreateAlignedStore(IVec, NewPtrs[Part], Group->getAlignment());
2989     addMetadata(NewStoreInstr, Instr);
2990   }
2991 }
2992 
2993 void InnerLoopVectorizer::vectorizeMemoryInstruction(Instruction *Instr) {
2994   // Attempt to issue a wide load.
2995   LoadInst *LI = dyn_cast<LoadInst>(Instr);
2996   StoreInst *SI = dyn_cast<StoreInst>(Instr);
2997 
2998   assert((LI || SI) && "Invalid Load/Store instruction");
2999 
3000   LoopVectorizationCostModel::InstWidening Decision =
3001       Cost->getWideningDecision(Instr, VF);
3002   assert(Decision != LoopVectorizationCostModel::CM_Unknown &&
3003          "CM decision should be taken at this point");
3004   if (Decision == LoopVectorizationCostModel::CM_Interleave)
3005     return vectorizeInterleaveGroup(Instr);
3006 
3007   Type *ScalarDataTy = getMemInstValueType(Instr);
3008   Type *DataTy = VectorType::get(ScalarDataTy, VF);
3009   Value *Ptr = getPointerOperand(Instr);
3010   unsigned Alignment = getMemInstAlignment(Instr);
3011   // An alignment of 0 means target abi alignment. We need to use the scalar's
3012   // target abi alignment in such a case.
3013   const DataLayout &DL = Instr->getModule()->getDataLayout();
3014   if (!Alignment)
3015     Alignment = DL.getABITypeAlignment(ScalarDataTy);
3016   unsigned AddressSpace = getMemInstAddressSpace(Instr);
3017 
3018   // Determine if the pointer operand of the access is either consecutive or
3019   // reverse consecutive.
3020   int ConsecutiveStride = Legal->isConsecutivePtr(Ptr);
3021   bool Reverse = ConsecutiveStride < 0;
3022   bool CreateGatherScatter =
3023       (Decision == LoopVectorizationCostModel::CM_GatherScatter);
3024 
3025   // Either Ptr feeds a vector load/store, or a vector GEP should feed a vector
3026   // gather/scatter. Otherwise Decision should have been to Scalarize.
3027   assert((ConsecutiveStride || CreateGatherScatter) &&
3028          "The instruction should be scalarized");
3029 
3030   // Handle consecutive loads/stores.
3031   if (ConsecutiveStride)
3032     Ptr = getOrCreateScalarValue(Ptr, {0, 0});
3033 
3034   VectorParts Mask = createBlockInMask(Instr->getParent());
3035   // Handle Stores:
3036   if (SI) {
3037     assert(!Legal->isUniform(SI->getPointerOperand()) &&
3038            "We do not allow storing to uniform addresses");
3039     setDebugLocFromInst(Builder, SI);
3040 
3041     for (unsigned Part = 0; Part < UF; ++Part) {
3042       Instruction *NewSI = nullptr;
3043       Value *StoredVal = getOrCreateVectorValue(SI->getValueOperand(), Part);
3044       if (CreateGatherScatter) {
3045         Value *MaskPart = Legal->isMaskRequired(SI) ? Mask[Part] : nullptr;
3046         Value *VectorGep = getOrCreateVectorValue(Ptr, Part);
3047         NewSI = Builder.CreateMaskedScatter(StoredVal, VectorGep, Alignment,
3048                                             MaskPart);
3049       } else {
3050         // Calculate the pointer for the specific unroll-part.
3051         Value *PartPtr =
3052             Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(Part * VF));
3053 
3054         if (Reverse) {
3055           // If we store to reverse consecutive memory locations, then we need
3056           // to reverse the order of elements in the stored value.
3057           StoredVal = reverseVector(StoredVal);
3058           // We don't want to update the value in the map as it might be used in
3059           // another expression. So don't call resetVectorValue(StoredVal).
3060 
3061           // If the address is consecutive but reversed, then the
3062           // wide store needs to start at the last vector element.
3063           PartPtr =
3064               Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(-Part * VF));
3065           PartPtr =
3066               Builder.CreateGEP(nullptr, PartPtr, Builder.getInt32(1 - VF));
3067           if (Mask[Part]) // The reverse of a null all-one mask is a null mask.
3068             Mask[Part] = reverseVector(Mask[Part]);
3069         }
3070 
3071         Value *VecPtr =
3072             Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace));
3073 
3074         if (Legal->isMaskRequired(SI) && Mask[Part])
3075           NewSI = Builder.CreateMaskedStore(StoredVal, VecPtr, Alignment,
3076                                             Mask[Part]);
3077         else
3078           NewSI = Builder.CreateAlignedStore(StoredVal, VecPtr, Alignment);
3079       }
3080       addMetadata(NewSI, SI);
3081     }
3082     return;
3083   }
3084 
3085   // Handle loads.
3086   assert(LI && "Must have a load instruction");
3087   setDebugLocFromInst(Builder, LI);
3088   for (unsigned Part = 0; Part < UF; ++Part) {
3089     Value *NewLI;
3090     if (CreateGatherScatter) {
3091       Value *MaskPart = Legal->isMaskRequired(LI) ? Mask[Part] : nullptr;
3092       Value *VectorGep = getOrCreateVectorValue(Ptr, Part);
3093       NewLI = Builder.CreateMaskedGather(VectorGep, Alignment, MaskPart,
3094                                          nullptr, "wide.masked.gather");
3095       addMetadata(NewLI, LI);
3096     } else {
3097       // Calculate the pointer for the specific unroll-part.
3098       Value *PartPtr =
3099           Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(Part * VF));
3100 
3101       if (Reverse) {
3102         // If the address is consecutive but reversed, then the
3103         // wide load needs to start at the last vector element.
3104         PartPtr = Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(-Part * VF));
3105         PartPtr = Builder.CreateGEP(nullptr, PartPtr, Builder.getInt32(1 - VF));
3106         if (Mask[Part]) // The reverse of a null all-one mask is a null mask.
3107           Mask[Part] = reverseVector(Mask[Part]);
3108       }
3109 
3110       Value *VecPtr =
3111           Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace));
3112       if (Legal->isMaskRequired(LI) && Mask[Part])
3113         NewLI = Builder.CreateMaskedLoad(VecPtr, Alignment, Mask[Part],
3114                                          UndefValue::get(DataTy),
3115                                          "wide.masked.load");
3116       else
3117         NewLI = Builder.CreateAlignedLoad(VecPtr, Alignment, "wide.load");
3118 
3119       // Add metadata to the load, but setVectorValue to the reverse shuffle.
3120       addMetadata(NewLI, LI);
3121       if (Reverse)
3122         NewLI = reverseVector(NewLI);
3123     }
3124     VectorLoopValueMap.setVectorValue(Instr, Part, NewLI);
3125   }
3126 }
3127 
3128 void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr,
3129                                                const VPIteration &Instance,
3130                                                bool IfPredicateInstr) {
3131   assert(!Instr->getType()->isAggregateType() && "Can't handle vectors");
3132 
3133   setDebugLocFromInst(Builder, Instr);
3134 
3135   // Does this instruction return a value ?
3136   bool IsVoidRetTy = Instr->getType()->isVoidTy();
3137 
3138   Instruction *Cloned = Instr->clone();
3139   if (!IsVoidRetTy)
3140     Cloned->setName(Instr->getName() + ".cloned");
3141 
3142   // Replace the operands of the cloned instructions with their scalar
3143   // equivalents in the new loop.
3144   for (unsigned op = 0, e = Instr->getNumOperands(); op != e; ++op) {
3145     auto *NewOp = getOrCreateScalarValue(Instr->getOperand(op), Instance);
3146     Cloned->setOperand(op, NewOp);
3147   }
3148   addNewMetadata(Cloned, Instr);
3149 
3150   // Place the cloned scalar in the new loop.
3151   Builder.Insert(Cloned);
3152 
3153   // Add the cloned scalar to the scalar map entry.
3154   VectorLoopValueMap.setScalarValue(Instr, Instance, Cloned);
3155 
3156   // If we just cloned a new assumption, add it the assumption cache.
3157   if (auto *II = dyn_cast<IntrinsicInst>(Cloned))
3158     if (II->getIntrinsicID() == Intrinsic::assume)
3159       AC->registerAssumption(II);
3160 
3161   // End if-block.
3162   if (IfPredicateInstr)
3163     PredicatedInstructions.push_back(Cloned);
3164 }
3165 
3166 PHINode *InnerLoopVectorizer::createInductionVariable(Loop *L, Value *Start,
3167                                                       Value *End, Value *Step,
3168                                                       Instruction *DL) {
3169   BasicBlock *Header = L->getHeader();
3170   BasicBlock *Latch = L->getLoopLatch();
3171   // As we're just creating this loop, it's possible no latch exists
3172   // yet. If so, use the header as this will be a single block loop.
3173   if (!Latch)
3174     Latch = Header;
3175 
3176   IRBuilder<> Builder(&*Header->getFirstInsertionPt());
3177   Instruction *OldInst = getDebugLocFromInstOrOperands(OldInduction);
3178   setDebugLocFromInst(Builder, OldInst);
3179   auto *Induction = Builder.CreatePHI(Start->getType(), 2, "index");
3180 
3181   Builder.SetInsertPoint(Latch->getTerminator());
3182   setDebugLocFromInst(Builder, OldInst);
3183 
3184   // Create i+1 and fill the PHINode.
3185   Value *Next = Builder.CreateAdd(Induction, Step, "index.next");
3186   Induction->addIncoming(Start, L->getLoopPreheader());
3187   Induction->addIncoming(Next, Latch);
3188   // Create the compare.
3189   Value *ICmp = Builder.CreateICmpEQ(Next, End);
3190   Builder.CreateCondBr(ICmp, L->getExitBlock(), Header);
3191 
3192   // Now we have two terminators. Remove the old one from the block.
3193   Latch->getTerminator()->eraseFromParent();
3194 
3195   return Induction;
3196 }
3197 
3198 Value *InnerLoopVectorizer::getOrCreateTripCount(Loop *L) {
3199   if (TripCount)
3200     return TripCount;
3201 
3202   IRBuilder<> Builder(L->getLoopPreheader()->getTerminator());
3203   // Find the loop boundaries.
3204   ScalarEvolution *SE = PSE.getSE();
3205   const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount();
3206   assert(BackedgeTakenCount != SE->getCouldNotCompute() &&
3207          "Invalid loop count");
3208 
3209   Type *IdxTy = Legal->getWidestInductionType();
3210 
3211   // The exit count might have the type of i64 while the phi is i32. This can
3212   // happen if we have an induction variable that is sign extended before the
3213   // compare. The only way that we get a backedge taken count is that the
3214   // induction variable was signed and as such will not overflow. In such a case
3215   // truncation is legal.
3216   if (BackedgeTakenCount->getType()->getPrimitiveSizeInBits() >
3217       IdxTy->getPrimitiveSizeInBits())
3218     BackedgeTakenCount = SE->getTruncateOrNoop(BackedgeTakenCount, IdxTy);
3219   BackedgeTakenCount = SE->getNoopOrZeroExtend(BackedgeTakenCount, IdxTy);
3220 
3221   // Get the total trip count from the count by adding 1.
3222   const SCEV *ExitCount = SE->getAddExpr(
3223       BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType()));
3224 
3225   const DataLayout &DL = L->getHeader()->getModule()->getDataLayout();
3226 
3227   // Expand the trip count and place the new instructions in the preheader.
3228   // Notice that the pre-header does not change, only the loop body.
3229   SCEVExpander Exp(*SE, DL, "induction");
3230 
3231   // Count holds the overall loop count (N).
3232   TripCount = Exp.expandCodeFor(ExitCount, ExitCount->getType(),
3233                                 L->getLoopPreheader()->getTerminator());
3234 
3235   if (TripCount->getType()->isPointerTy())
3236     TripCount =
3237         CastInst::CreatePointerCast(TripCount, IdxTy, "exitcount.ptrcnt.to.int",
3238                                     L->getLoopPreheader()->getTerminator());
3239 
3240   return TripCount;
3241 }
3242 
3243 Value *InnerLoopVectorizer::getOrCreateVectorTripCount(Loop *L) {
3244   if (VectorTripCount)
3245     return VectorTripCount;
3246 
3247   Value *TC = getOrCreateTripCount(L);
3248   IRBuilder<> Builder(L->getLoopPreheader()->getTerminator());
3249 
3250   // Now we need to generate the expression for the part of the loop that the
3251   // vectorized body will execute. This is equal to N - (N % Step) if scalar
3252   // iterations are not required for correctness, or N - Step, otherwise. Step
3253   // is equal to the vectorization factor (number of SIMD elements) times the
3254   // unroll factor (number of SIMD instructions).
3255   Constant *Step = ConstantInt::get(TC->getType(), VF * UF);
3256   Value *R = Builder.CreateURem(TC, Step, "n.mod.vf");
3257 
3258   // If there is a non-reversed interleaved group that may speculatively access
3259   // memory out-of-bounds, we need to ensure that there will be at least one
3260   // iteration of the scalar epilogue loop. Thus, if the step evenly divides
3261   // the trip count, we set the remainder to be equal to the step. If the step
3262   // does not evenly divide the trip count, no adjustment is necessary since
3263   // there will already be scalar iterations. Note that the minimum iterations
3264   // check ensures that N >= Step.
3265   if (VF > 1 && Legal->requiresScalarEpilogue()) {
3266     auto *IsZero = Builder.CreateICmpEQ(R, ConstantInt::get(R->getType(), 0));
3267     R = Builder.CreateSelect(IsZero, Step, R);
3268   }
3269 
3270   VectorTripCount = Builder.CreateSub(TC, R, "n.vec");
3271 
3272   return VectorTripCount;
3273 }
3274 
3275 Value *InnerLoopVectorizer::createBitOrPointerCast(Value *V, VectorType *DstVTy,
3276                                                    const DataLayout &DL) {
3277   // Verify that V is a vector type with same number of elements as DstVTy.
3278   unsigned VF = DstVTy->getNumElements();
3279   VectorType *SrcVecTy = cast<VectorType>(V->getType());
3280   assert((VF == SrcVecTy->getNumElements()) && "Vector dimensions do not match");
3281   Type *SrcElemTy = SrcVecTy->getElementType();
3282   Type *DstElemTy = DstVTy->getElementType();
3283   assert((DL.getTypeSizeInBits(SrcElemTy) == DL.getTypeSizeInBits(DstElemTy)) &&
3284          "Vector elements must have same size");
3285 
3286   // Do a direct cast if element types are castable.
3287   if (CastInst::isBitOrNoopPointerCastable(SrcElemTy, DstElemTy, DL)) {
3288     return Builder.CreateBitOrPointerCast(V, DstVTy);
3289   }
3290   // V cannot be directly casted to desired vector type.
3291   // May happen when V is a floating point vector but DstVTy is a vector of
3292   // pointers or vice-versa. Handle this using a two-step bitcast using an
3293   // intermediate Integer type for the bitcast i.e. Ptr <-> Int <-> Float.
3294   assert((DstElemTy->isPointerTy() != SrcElemTy->isPointerTy()) &&
3295          "Only one type should be a pointer type");
3296   assert((DstElemTy->isFloatingPointTy() != SrcElemTy->isFloatingPointTy()) &&
3297          "Only one type should be a floating point type");
3298   Type *IntTy =
3299       IntegerType::getIntNTy(V->getContext(), DL.getTypeSizeInBits(SrcElemTy));
3300   VectorType *VecIntTy = VectorType::get(IntTy, VF);
3301   Value *CastVal = Builder.CreateBitOrPointerCast(V, VecIntTy);
3302   return Builder.CreateBitOrPointerCast(CastVal, DstVTy);
3303 }
3304 
3305 void InnerLoopVectorizer::emitMinimumIterationCountCheck(Loop *L,
3306                                                          BasicBlock *Bypass) {
3307   Value *Count = getOrCreateTripCount(L);
3308   BasicBlock *BB = L->getLoopPreheader();
3309   IRBuilder<> Builder(BB->getTerminator());
3310 
3311   // Generate code to check if the loop's trip count is less than VF * UF, or
3312   // equal to it in case a scalar epilogue is required; this implies that the
3313   // vector trip count is zero. This check also covers the case where adding one
3314   // to the backedge-taken count overflowed leading to an incorrect trip count
3315   // of zero. In this case we will also jump to the scalar loop.
3316   auto P = Legal->requiresScalarEpilogue() ? ICmpInst::ICMP_ULE
3317                                            : ICmpInst::ICMP_ULT;
3318   Value *CheckMinIters = Builder.CreateICmp(
3319       P, Count, ConstantInt::get(Count->getType(), VF * UF), "min.iters.check");
3320 
3321   BasicBlock *NewBB = BB->splitBasicBlock(BB->getTerminator(), "vector.ph");
3322   // Update dominator tree immediately if the generated block is a
3323   // LoopBypassBlock because SCEV expansions to generate loop bypass
3324   // checks may query it before the current function is finished.
3325   DT->addNewBlock(NewBB, BB);
3326   if (L->getParentLoop())
3327     L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI);
3328   ReplaceInstWithInst(BB->getTerminator(),
3329                       BranchInst::Create(Bypass, NewBB, CheckMinIters));
3330   LoopBypassBlocks.push_back(BB);
3331 }
3332 
3333 void InnerLoopVectorizer::emitSCEVChecks(Loop *L, BasicBlock *Bypass) {
3334   BasicBlock *BB = L->getLoopPreheader();
3335 
3336   // Generate the code to check that the SCEV assumptions that we made.
3337   // We want the new basic block to start at the first instruction in a
3338   // sequence of instructions that form a check.
3339   SCEVExpander Exp(*PSE.getSE(), Bypass->getModule()->getDataLayout(),
3340                    "scev.check");
3341   Value *SCEVCheck =
3342       Exp.expandCodeForPredicate(&PSE.getUnionPredicate(), BB->getTerminator());
3343 
3344   if (auto *C = dyn_cast<ConstantInt>(SCEVCheck))
3345     if (C->isZero())
3346       return;
3347 
3348   // Create a new block containing the stride check.
3349   BB->setName("vector.scevcheck");
3350   auto *NewBB = BB->splitBasicBlock(BB->getTerminator(), "vector.ph");
3351   // Update dominator tree immediately if the generated block is a
3352   // LoopBypassBlock because SCEV expansions to generate loop bypass
3353   // checks may query it before the current function is finished.
3354   DT->addNewBlock(NewBB, BB);
3355   if (L->getParentLoop())
3356     L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI);
3357   ReplaceInstWithInst(BB->getTerminator(),
3358                       BranchInst::Create(Bypass, NewBB, SCEVCheck));
3359   LoopBypassBlocks.push_back(BB);
3360   AddedSafetyChecks = true;
3361 }
3362 
3363 void InnerLoopVectorizer::emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass) {
3364   BasicBlock *BB = L->getLoopPreheader();
3365 
3366   // Generate the code that checks in runtime if arrays overlap. We put the
3367   // checks into a separate block to make the more common case of few elements
3368   // faster.
3369   Instruction *FirstCheckInst;
3370   Instruction *MemRuntimeCheck;
3371   std::tie(FirstCheckInst, MemRuntimeCheck) =
3372       Legal->getLAI()->addRuntimeChecks(BB->getTerminator());
3373   if (!MemRuntimeCheck)
3374     return;
3375 
3376   // Create a new block containing the memory check.
3377   BB->setName("vector.memcheck");
3378   auto *NewBB = BB->splitBasicBlock(BB->getTerminator(), "vector.ph");
3379   // Update dominator tree immediately if the generated block is a
3380   // LoopBypassBlock because SCEV expansions to generate loop bypass
3381   // checks may query it before the current function is finished.
3382   DT->addNewBlock(NewBB, BB);
3383   if (L->getParentLoop())
3384     L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI);
3385   ReplaceInstWithInst(BB->getTerminator(),
3386                       BranchInst::Create(Bypass, NewBB, MemRuntimeCheck));
3387   LoopBypassBlocks.push_back(BB);
3388   AddedSafetyChecks = true;
3389 
3390   // We currently don't use LoopVersioning for the actual loop cloning but we
3391   // still use it to add the noalias metadata.
3392   LVer = llvm::make_unique<LoopVersioning>(*Legal->getLAI(), OrigLoop, LI, DT,
3393                                            PSE.getSE());
3394   LVer->prepareNoAliasMetadata();
3395 }
3396 
3397 BasicBlock *InnerLoopVectorizer::createVectorizedLoopSkeleton() {
3398   /*
3399    In this function we generate a new loop. The new loop will contain
3400    the vectorized instructions while the old loop will continue to run the
3401    scalar remainder.
3402 
3403        [ ] <-- loop iteration number check.
3404     /   |
3405    /    v
3406   |    [ ] <-- vector loop bypass (may consist of multiple blocks).
3407   |  /  |
3408   | /   v
3409   ||   [ ]     <-- vector pre header.
3410   |/    |
3411   |     v
3412   |    [  ] \
3413   |    [  ]_|   <-- vector loop.
3414   |     |
3415   |     v
3416   |   -[ ]   <--- middle-block.
3417   |  /  |
3418   | /   v
3419   -|- >[ ]     <--- new preheader.
3420    |    |
3421    |    v
3422    |   [ ] \
3423    |   [ ]_|   <-- old scalar loop to handle remainder.
3424     \   |
3425      \  v
3426       >[ ]     <-- exit block.
3427    ...
3428    */
3429 
3430   BasicBlock *OldBasicBlock = OrigLoop->getHeader();
3431   BasicBlock *VectorPH = OrigLoop->getLoopPreheader();
3432   BasicBlock *ExitBlock = OrigLoop->getExitBlock();
3433   assert(VectorPH && "Invalid loop structure");
3434   assert(ExitBlock && "Must have an exit block");
3435 
3436   // Some loops have a single integer induction variable, while other loops
3437   // don't. One example is c++ iterators that often have multiple pointer
3438   // induction variables. In the code below we also support a case where we
3439   // don't have a single induction variable.
3440   //
3441   // We try to obtain an induction variable from the original loop as hard
3442   // as possible. However if we don't find one that:
3443   //   - is an integer
3444   //   - counts from zero, stepping by one
3445   //   - is the size of the widest induction variable type
3446   // then we create a new one.
3447   OldInduction = Legal->getPrimaryInduction();
3448   Type *IdxTy = Legal->getWidestInductionType();
3449 
3450   // Split the single block loop into the two loop structure described above.
3451   BasicBlock *VecBody =
3452       VectorPH->splitBasicBlock(VectorPH->getTerminator(), "vector.body");
3453   BasicBlock *MiddleBlock =
3454       VecBody->splitBasicBlock(VecBody->getTerminator(), "middle.block");
3455   BasicBlock *ScalarPH =
3456       MiddleBlock->splitBasicBlock(MiddleBlock->getTerminator(), "scalar.ph");
3457 
3458   // Create and register the new vector loop.
3459   Loop *Lp = LI->AllocateLoop();
3460   Loop *ParentLoop = OrigLoop->getParentLoop();
3461 
3462   // Insert the new loop into the loop nest and register the new basic blocks
3463   // before calling any utilities such as SCEV that require valid LoopInfo.
3464   if (ParentLoop) {
3465     ParentLoop->addChildLoop(Lp);
3466     ParentLoop->addBasicBlockToLoop(ScalarPH, *LI);
3467     ParentLoop->addBasicBlockToLoop(MiddleBlock, *LI);
3468   } else {
3469     LI->addTopLevelLoop(Lp);
3470   }
3471   Lp->addBasicBlockToLoop(VecBody, *LI);
3472 
3473   // Find the loop boundaries.
3474   Value *Count = getOrCreateTripCount(Lp);
3475 
3476   Value *StartIdx = ConstantInt::get(IdxTy, 0);
3477 
3478   // Now, compare the new count to zero. If it is zero skip the vector loop and
3479   // jump to the scalar loop. This check also covers the case where the
3480   // backedge-taken count is uint##_max: adding one to it will overflow leading
3481   // to an incorrect trip count of zero. In this (rare) case we will also jump
3482   // to the scalar loop.
3483   emitMinimumIterationCountCheck(Lp, ScalarPH);
3484 
3485   // Generate the code to check any assumptions that we've made for SCEV
3486   // expressions.
3487   emitSCEVChecks(Lp, ScalarPH);
3488 
3489   // Generate the code that checks in runtime if arrays overlap. We put the
3490   // checks into a separate block to make the more common case of few elements
3491   // faster.
3492   emitMemRuntimeChecks(Lp, ScalarPH);
3493 
3494   // Generate the induction variable.
3495   // The loop step is equal to the vectorization factor (num of SIMD elements)
3496   // times the unroll factor (num of SIMD instructions).
3497   Value *CountRoundDown = getOrCreateVectorTripCount(Lp);
3498   Constant *Step = ConstantInt::get(IdxTy, VF * UF);
3499   Induction =
3500       createInductionVariable(Lp, StartIdx, CountRoundDown, Step,
3501                               getDebugLocFromInstOrOperands(OldInduction));
3502 
3503   // We are going to resume the execution of the scalar loop.
3504   // Go over all of the induction variables that we found and fix the
3505   // PHIs that are left in the scalar version of the loop.
3506   // The starting values of PHI nodes depend on the counter of the last
3507   // iteration in the vectorized loop.
3508   // If we come from a bypass edge then we need to start from the original
3509   // start value.
3510 
3511   // This variable saves the new starting index for the scalar loop. It is used
3512   // to test if there are any tail iterations left once the vector loop has
3513   // completed.
3514   LoopVectorizationLegality::InductionList *List = Legal->getInductionVars();
3515   for (auto &InductionEntry : *List) {
3516     PHINode *OrigPhi = InductionEntry.first;
3517     InductionDescriptor II = InductionEntry.second;
3518 
3519     // Create phi nodes to merge from the  backedge-taken check block.
3520     PHINode *BCResumeVal = PHINode::Create(
3521         OrigPhi->getType(), 3, "bc.resume.val", ScalarPH->getTerminator());
3522     Value *&EndValue = IVEndValues[OrigPhi];
3523     if (OrigPhi == OldInduction) {
3524       // We know what the end value is.
3525       EndValue = CountRoundDown;
3526     } else {
3527       IRBuilder<> B(Lp->getLoopPreheader()->getTerminator());
3528       Type *StepType = II.getStep()->getType();
3529       Instruction::CastOps CastOp =
3530         CastInst::getCastOpcode(CountRoundDown, true, StepType, true);
3531       Value *CRD = B.CreateCast(CastOp, CountRoundDown, StepType, "cast.crd");
3532       const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout();
3533       EndValue = II.transform(B, CRD, PSE.getSE(), DL);
3534       EndValue->setName("ind.end");
3535     }
3536 
3537     // The new PHI merges the original incoming value, in case of a bypass,
3538     // or the value at the end of the vectorized loop.
3539     BCResumeVal->addIncoming(EndValue, MiddleBlock);
3540 
3541     // Fix the scalar body counter (PHI node).
3542     unsigned BlockIdx = OrigPhi->getBasicBlockIndex(ScalarPH);
3543 
3544     // The old induction's phi node in the scalar body needs the truncated
3545     // value.
3546     for (BasicBlock *BB : LoopBypassBlocks)
3547       BCResumeVal->addIncoming(II.getStartValue(), BB);
3548     OrigPhi->setIncomingValue(BlockIdx, BCResumeVal);
3549   }
3550 
3551   // Add a check in the middle block to see if we have completed
3552   // all of the iterations in the first vector loop.
3553   // If (N - N%VF) == N, then we *don't* need to run the remainder.
3554   Value *CmpN =
3555       CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ, Count,
3556                       CountRoundDown, "cmp.n", MiddleBlock->getTerminator());
3557   ReplaceInstWithInst(MiddleBlock->getTerminator(),
3558                       BranchInst::Create(ExitBlock, ScalarPH, CmpN));
3559 
3560   // Get ready to start creating new instructions into the vectorized body.
3561   Builder.SetInsertPoint(&*VecBody->getFirstInsertionPt());
3562 
3563   // Save the state.
3564   LoopVectorPreHeader = Lp->getLoopPreheader();
3565   LoopScalarPreHeader = ScalarPH;
3566   LoopMiddleBlock = MiddleBlock;
3567   LoopExitBlock = ExitBlock;
3568   LoopVectorBody = VecBody;
3569   LoopScalarBody = OldBasicBlock;
3570 
3571   // Keep all loop hints from the original loop on the vector loop (we'll
3572   // replace the vectorizer-specific hints below).
3573   if (MDNode *LID = OrigLoop->getLoopID())
3574     Lp->setLoopID(LID);
3575 
3576   LoopVectorizeHints Hints(Lp, true, *ORE);
3577   Hints.setAlreadyVectorized();
3578 
3579   return LoopVectorPreHeader;
3580 }
3581 
3582 // Fix up external users of the induction variable. At this point, we are
3583 // in LCSSA form, with all external PHIs that use the IV having one input value,
3584 // coming from the remainder loop. We need those PHIs to also have a correct
3585 // value for the IV when arriving directly from the middle block.
3586 void InnerLoopVectorizer::fixupIVUsers(PHINode *OrigPhi,
3587                                        const InductionDescriptor &II,
3588                                        Value *CountRoundDown, Value *EndValue,
3589                                        BasicBlock *MiddleBlock) {
3590   // There are two kinds of external IV usages - those that use the value
3591   // computed in the last iteration (the PHI) and those that use the penultimate
3592   // value (the value that feeds into the phi from the loop latch).
3593   // We allow both, but they, obviously, have different values.
3594 
3595   assert(OrigLoop->getExitBlock() && "Expected a single exit block");
3596 
3597   DenseMap<Value *, Value *> MissingVals;
3598 
3599   // An external user of the last iteration's value should see the value that
3600   // the remainder loop uses to initialize its own IV.
3601   Value *PostInc = OrigPhi->getIncomingValueForBlock(OrigLoop->getLoopLatch());
3602   for (User *U : PostInc->users()) {
3603     Instruction *UI = cast<Instruction>(U);
3604     if (!OrigLoop->contains(UI)) {
3605       assert(isa<PHINode>(UI) && "Expected LCSSA form");
3606       MissingVals[UI] = EndValue;
3607     }
3608   }
3609 
3610   // An external user of the penultimate value need to see EndValue - Step.
3611   // The simplest way to get this is to recompute it from the constituent SCEVs,
3612   // that is Start + (Step * (CRD - 1)).
3613   for (User *U : OrigPhi->users()) {
3614     auto *UI = cast<Instruction>(U);
3615     if (!OrigLoop->contains(UI)) {
3616       const DataLayout &DL =
3617           OrigLoop->getHeader()->getModule()->getDataLayout();
3618       assert(isa<PHINode>(UI) && "Expected LCSSA form");
3619 
3620       IRBuilder<> B(MiddleBlock->getTerminator());
3621       Value *CountMinusOne = B.CreateSub(
3622           CountRoundDown, ConstantInt::get(CountRoundDown->getType(), 1));
3623       Value *CMO =
3624           !II.getStep()->getType()->isIntegerTy()
3625               ? B.CreateCast(Instruction::SIToFP, CountMinusOne,
3626                              II.getStep()->getType())
3627               : B.CreateSExtOrTrunc(CountMinusOne, II.getStep()->getType());
3628       CMO->setName("cast.cmo");
3629       Value *Escape = II.transform(B, CMO, PSE.getSE(), DL);
3630       Escape->setName("ind.escape");
3631       MissingVals[UI] = Escape;
3632     }
3633   }
3634 
3635   for (auto &I : MissingVals) {
3636     PHINode *PHI = cast<PHINode>(I.first);
3637     // One corner case we have to handle is two IVs "chasing" each-other,
3638     // that is %IV2 = phi [...], [ %IV1, %latch ]
3639     // In this case, if IV1 has an external use, we need to avoid adding both
3640     // "last value of IV1" and "penultimate value of IV2". So, verify that we
3641     // don't already have an incoming value for the middle block.
3642     if (PHI->getBasicBlockIndex(MiddleBlock) == -1)
3643       PHI->addIncoming(I.second, MiddleBlock);
3644   }
3645 }
3646 
3647 namespace {
3648 struct CSEDenseMapInfo {
3649   static bool canHandle(const Instruction *I) {
3650     return isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) ||
3651            isa<ShuffleVectorInst>(I) || isa<GetElementPtrInst>(I);
3652   }
3653   static inline Instruction *getEmptyKey() {
3654     return DenseMapInfo<Instruction *>::getEmptyKey();
3655   }
3656   static inline Instruction *getTombstoneKey() {
3657     return DenseMapInfo<Instruction *>::getTombstoneKey();
3658   }
3659   static unsigned getHashValue(const Instruction *I) {
3660     assert(canHandle(I) && "Unknown instruction!");
3661     return hash_combine(I->getOpcode(), hash_combine_range(I->value_op_begin(),
3662                                                            I->value_op_end()));
3663   }
3664   static bool isEqual(const Instruction *LHS, const Instruction *RHS) {
3665     if (LHS == getEmptyKey() || RHS == getEmptyKey() ||
3666         LHS == getTombstoneKey() || RHS == getTombstoneKey())
3667       return LHS == RHS;
3668     return LHS->isIdenticalTo(RHS);
3669   }
3670 };
3671 }
3672 
3673 ///\brief Perform cse of induction variable instructions.
3674 static void cse(BasicBlock *BB) {
3675   // Perform simple cse.
3676   SmallDenseMap<Instruction *, Instruction *, 4, CSEDenseMapInfo> CSEMap;
3677   for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) {
3678     Instruction *In = &*I++;
3679 
3680     if (!CSEDenseMapInfo::canHandle(In))
3681       continue;
3682 
3683     // Check if we can replace this instruction with any of the
3684     // visited instructions.
3685     if (Instruction *V = CSEMap.lookup(In)) {
3686       In->replaceAllUsesWith(V);
3687       In->eraseFromParent();
3688       continue;
3689     }
3690 
3691     CSEMap[In] = In;
3692   }
3693 }
3694 
3695 /// \brief Estimate the overhead of scalarizing an instruction. This is a
3696 /// convenience wrapper for the type-based getScalarizationOverhead API.
3697 static unsigned getScalarizationOverhead(Instruction *I, unsigned VF,
3698                                          const TargetTransformInfo &TTI) {
3699   if (VF == 1)
3700     return 0;
3701 
3702   unsigned Cost = 0;
3703   Type *RetTy = ToVectorTy(I->getType(), VF);
3704   if (!RetTy->isVoidTy() &&
3705       (!isa<LoadInst>(I) ||
3706        !TTI.supportsEfficientVectorElementLoadStore()))
3707     Cost += TTI.getScalarizationOverhead(RetTy, true, false);
3708 
3709   if (CallInst *CI = dyn_cast<CallInst>(I)) {
3710     SmallVector<const Value *, 4> Operands(CI->arg_operands());
3711     Cost += TTI.getOperandsScalarizationOverhead(Operands, VF);
3712   }
3713   else if (!isa<StoreInst>(I) ||
3714            !TTI.supportsEfficientVectorElementLoadStore()) {
3715     SmallVector<const Value *, 4> Operands(I->operand_values());
3716     Cost += TTI.getOperandsScalarizationOverhead(Operands, VF);
3717   }
3718 
3719   return Cost;
3720 }
3721 
3722 // Estimate cost of a call instruction CI if it were vectorized with factor VF.
3723 // Return the cost of the instruction, including scalarization overhead if it's
3724 // needed. The flag NeedToScalarize shows if the call needs to be scalarized -
3725 // i.e. either vector version isn't available, or is too expensive.
3726 static unsigned getVectorCallCost(CallInst *CI, unsigned VF,
3727                                   const TargetTransformInfo &TTI,
3728                                   const TargetLibraryInfo *TLI,
3729                                   bool &NeedToScalarize) {
3730   Function *F = CI->getCalledFunction();
3731   StringRef FnName = CI->getCalledFunction()->getName();
3732   Type *ScalarRetTy = CI->getType();
3733   SmallVector<Type *, 4> Tys, ScalarTys;
3734   for (auto &ArgOp : CI->arg_operands())
3735     ScalarTys.push_back(ArgOp->getType());
3736 
3737   // Estimate cost of scalarized vector call. The source operands are assumed
3738   // to be vectors, so we need to extract individual elements from there,
3739   // execute VF scalar calls, and then gather the result into the vector return
3740   // value.
3741   unsigned ScalarCallCost = TTI.getCallInstrCost(F, ScalarRetTy, ScalarTys);
3742   if (VF == 1)
3743     return ScalarCallCost;
3744 
3745   // Compute corresponding vector type for return value and arguments.
3746   Type *RetTy = ToVectorTy(ScalarRetTy, VF);
3747   for (Type *ScalarTy : ScalarTys)
3748     Tys.push_back(ToVectorTy(ScalarTy, VF));
3749 
3750   // Compute costs of unpacking argument values for the scalar calls and
3751   // packing the return values to a vector.
3752   unsigned ScalarizationCost = getScalarizationOverhead(CI, VF, TTI);
3753 
3754   unsigned Cost = ScalarCallCost * VF + ScalarizationCost;
3755 
3756   // If we can't emit a vector call for this function, then the currently found
3757   // cost is the cost we need to return.
3758   NeedToScalarize = true;
3759   if (!TLI || !TLI->isFunctionVectorizable(FnName, VF) || CI->isNoBuiltin())
3760     return Cost;
3761 
3762   // If the corresponding vector cost is cheaper, return its cost.
3763   unsigned VectorCallCost = TTI.getCallInstrCost(nullptr, RetTy, Tys);
3764   if (VectorCallCost < Cost) {
3765     NeedToScalarize = false;
3766     return VectorCallCost;
3767   }
3768   return Cost;
3769 }
3770 
3771 // Estimate cost of an intrinsic call instruction CI if it were vectorized with
3772 // factor VF.  Return the cost of the instruction, including scalarization
3773 // overhead if it's needed.
3774 static unsigned getVectorIntrinsicCost(CallInst *CI, unsigned VF,
3775                                        const TargetTransformInfo &TTI,
3776                                        const TargetLibraryInfo *TLI) {
3777   Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
3778   assert(ID && "Expected intrinsic call!");
3779 
3780   FastMathFlags FMF;
3781   if (auto *FPMO = dyn_cast<FPMathOperator>(CI))
3782     FMF = FPMO->getFastMathFlags();
3783 
3784   SmallVector<Value *, 4> Operands(CI->arg_operands());
3785   return TTI.getIntrinsicInstrCost(ID, CI->getType(), Operands, FMF, VF);
3786 }
3787 
3788 static Type *smallestIntegerVectorType(Type *T1, Type *T2) {
3789   auto *I1 = cast<IntegerType>(T1->getVectorElementType());
3790   auto *I2 = cast<IntegerType>(T2->getVectorElementType());
3791   return I1->getBitWidth() < I2->getBitWidth() ? T1 : T2;
3792 }
3793 static Type *largestIntegerVectorType(Type *T1, Type *T2) {
3794   auto *I1 = cast<IntegerType>(T1->getVectorElementType());
3795   auto *I2 = cast<IntegerType>(T2->getVectorElementType());
3796   return I1->getBitWidth() > I2->getBitWidth() ? T1 : T2;
3797 }
3798 
3799 void InnerLoopVectorizer::truncateToMinimalBitwidths() {
3800   // For every instruction `I` in MinBWs, truncate the operands, create a
3801   // truncated version of `I` and reextend its result. InstCombine runs
3802   // later and will remove any ext/trunc pairs.
3803   //
3804   SmallPtrSet<Value *, 4> Erased;
3805   for (const auto &KV : Cost->getMinimalBitwidths()) {
3806     // If the value wasn't vectorized, we must maintain the original scalar
3807     // type. The absence of the value from VectorLoopValueMap indicates that it
3808     // wasn't vectorized.
3809     if (!VectorLoopValueMap.hasAnyVectorValue(KV.first))
3810       continue;
3811     for (unsigned Part = 0; Part < UF; ++Part) {
3812       Value *I = getOrCreateVectorValue(KV.first, Part);
3813       if (Erased.count(I) || I->use_empty() || !isa<Instruction>(I))
3814         continue;
3815       Type *OriginalTy = I->getType();
3816       Type *ScalarTruncatedTy =
3817           IntegerType::get(OriginalTy->getContext(), KV.second);
3818       Type *TruncatedTy = VectorType::get(ScalarTruncatedTy,
3819                                           OriginalTy->getVectorNumElements());
3820       if (TruncatedTy == OriginalTy)
3821         continue;
3822 
3823       IRBuilder<> B(cast<Instruction>(I));
3824       auto ShrinkOperand = [&](Value *V) -> Value * {
3825         if (auto *ZI = dyn_cast<ZExtInst>(V))
3826           if (ZI->getSrcTy() == TruncatedTy)
3827             return ZI->getOperand(0);
3828         return B.CreateZExtOrTrunc(V, TruncatedTy);
3829       };
3830 
3831       // The actual instruction modification depends on the instruction type,
3832       // unfortunately.
3833       Value *NewI = nullptr;
3834       if (auto *BO = dyn_cast<BinaryOperator>(I)) {
3835         NewI = B.CreateBinOp(BO->getOpcode(), ShrinkOperand(BO->getOperand(0)),
3836                              ShrinkOperand(BO->getOperand(1)));
3837 
3838         // Any wrapping introduced by shrinking this operation shouldn't be
3839         // considered undefined behavior. So, we can't unconditionally copy
3840         // arithmetic wrapping flags to NewI.
3841         cast<BinaryOperator>(NewI)->copyIRFlags(I, /*IncludeWrapFlags=*/false);
3842       } else if (auto *CI = dyn_cast<ICmpInst>(I)) {
3843         NewI =
3844             B.CreateICmp(CI->getPredicate(), ShrinkOperand(CI->getOperand(0)),
3845                          ShrinkOperand(CI->getOperand(1)));
3846       } else if (auto *SI = dyn_cast<SelectInst>(I)) {
3847         NewI = B.CreateSelect(SI->getCondition(),
3848                               ShrinkOperand(SI->getTrueValue()),
3849                               ShrinkOperand(SI->getFalseValue()));
3850       } else if (auto *CI = dyn_cast<CastInst>(I)) {
3851         switch (CI->getOpcode()) {
3852         default:
3853           llvm_unreachable("Unhandled cast!");
3854         case Instruction::Trunc:
3855           NewI = ShrinkOperand(CI->getOperand(0));
3856           break;
3857         case Instruction::SExt:
3858           NewI = B.CreateSExtOrTrunc(
3859               CI->getOperand(0),
3860               smallestIntegerVectorType(OriginalTy, TruncatedTy));
3861           break;
3862         case Instruction::ZExt:
3863           NewI = B.CreateZExtOrTrunc(
3864               CI->getOperand(0),
3865               smallestIntegerVectorType(OriginalTy, TruncatedTy));
3866           break;
3867         }
3868       } else if (auto *SI = dyn_cast<ShuffleVectorInst>(I)) {
3869         auto Elements0 = SI->getOperand(0)->getType()->getVectorNumElements();
3870         auto *O0 = B.CreateZExtOrTrunc(
3871             SI->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements0));
3872         auto Elements1 = SI->getOperand(1)->getType()->getVectorNumElements();
3873         auto *O1 = B.CreateZExtOrTrunc(
3874             SI->getOperand(1), VectorType::get(ScalarTruncatedTy, Elements1));
3875 
3876         NewI = B.CreateShuffleVector(O0, O1, SI->getMask());
3877       } else if (isa<LoadInst>(I)) {
3878         // Don't do anything with the operands, just extend the result.
3879         continue;
3880       } else if (auto *IE = dyn_cast<InsertElementInst>(I)) {
3881         auto Elements = IE->getOperand(0)->getType()->getVectorNumElements();
3882         auto *O0 = B.CreateZExtOrTrunc(
3883             IE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements));
3884         auto *O1 = B.CreateZExtOrTrunc(IE->getOperand(1), ScalarTruncatedTy);
3885         NewI = B.CreateInsertElement(O0, O1, IE->getOperand(2));
3886       } else if (auto *EE = dyn_cast<ExtractElementInst>(I)) {
3887         auto Elements = EE->getOperand(0)->getType()->getVectorNumElements();
3888         auto *O0 = B.CreateZExtOrTrunc(
3889             EE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements));
3890         NewI = B.CreateExtractElement(O0, EE->getOperand(2));
3891       } else {
3892         llvm_unreachable("Unhandled instruction type!");
3893       }
3894 
3895       // Lastly, extend the result.
3896       NewI->takeName(cast<Instruction>(I));
3897       Value *Res = B.CreateZExtOrTrunc(NewI, OriginalTy);
3898       I->replaceAllUsesWith(Res);
3899       cast<Instruction>(I)->eraseFromParent();
3900       Erased.insert(I);
3901       VectorLoopValueMap.resetVectorValue(KV.first, Part, Res);
3902     }
3903   }
3904 
3905   // We'll have created a bunch of ZExts that are now parentless. Clean up.
3906   for (const auto &KV : Cost->getMinimalBitwidths()) {
3907     // If the value wasn't vectorized, we must maintain the original scalar
3908     // type. The absence of the value from VectorLoopValueMap indicates that it
3909     // wasn't vectorized.
3910     if (!VectorLoopValueMap.hasAnyVectorValue(KV.first))
3911       continue;
3912     for (unsigned Part = 0; Part < UF; ++Part) {
3913       Value *I = getOrCreateVectorValue(KV.first, Part);
3914       ZExtInst *Inst = dyn_cast<ZExtInst>(I);
3915       if (Inst && Inst->use_empty()) {
3916         Value *NewI = Inst->getOperand(0);
3917         Inst->eraseFromParent();
3918         VectorLoopValueMap.resetVectorValue(KV.first, Part, NewI);
3919       }
3920     }
3921   }
3922 }
3923 
3924 void InnerLoopVectorizer::fixVectorizedLoop() {
3925   // Insert truncates and extends for any truncated instructions as hints to
3926   // InstCombine.
3927   if (VF > 1)
3928     truncateToMinimalBitwidths();
3929 
3930   // At this point every instruction in the original loop is widened to a
3931   // vector form. Now we need to fix the recurrences in the loop. These PHI
3932   // nodes are currently empty because we did not want to introduce cycles.
3933   // This is the second stage of vectorizing recurrences.
3934   fixCrossIterationPHIs();
3935 
3936   // Update the dominator tree.
3937   //
3938   // FIXME: After creating the structure of the new loop, the dominator tree is
3939   //        no longer up-to-date, and it remains that way until we update it
3940   //        here. An out-of-date dominator tree is problematic for SCEV,
3941   //        because SCEVExpander uses it to guide code generation. The
3942   //        vectorizer use SCEVExpanders in several places. Instead, we should
3943   //        keep the dominator tree up-to-date as we go.
3944   updateAnalysis();
3945 
3946   // Fix-up external users of the induction variables.
3947   for (auto &Entry : *Legal->getInductionVars())
3948     fixupIVUsers(Entry.first, Entry.second,
3949                  getOrCreateVectorTripCount(LI->getLoopFor(LoopVectorBody)),
3950                  IVEndValues[Entry.first], LoopMiddleBlock);
3951 
3952   fixLCSSAPHIs();
3953   for (Instruction *PI : PredicatedInstructions)
3954     sinkScalarOperands(&*PI);
3955 
3956   // Remove redundant induction instructions.
3957   cse(LoopVectorBody);
3958 }
3959 
3960 void InnerLoopVectorizer::fixCrossIterationPHIs() {
3961   // In order to support recurrences we need to be able to vectorize Phi nodes.
3962   // Phi nodes have cycles, so we need to vectorize them in two stages. This is
3963   // stage #2: We now need to fix the recurrences by adding incoming edges to
3964   // the currently empty PHI nodes. At this point every instruction in the
3965   // original loop is widened to a vector form so we can use them to construct
3966   // the incoming edges.
3967   for (Instruction &I : *OrigLoop->getHeader()) {
3968     PHINode *Phi = dyn_cast<PHINode>(&I);
3969     if (!Phi)
3970       break;
3971     // Handle first-order recurrences and reductions that need to be fixed.
3972     if (Legal->isFirstOrderRecurrence(Phi))
3973       fixFirstOrderRecurrence(Phi);
3974     else if (Legal->isReductionVariable(Phi))
3975       fixReduction(Phi);
3976   }
3977 }
3978 
3979 void InnerLoopVectorizer::fixFirstOrderRecurrence(PHINode *Phi) {
3980 
3981   // This is the second phase of vectorizing first-order recurrences. An
3982   // overview of the transformation is described below. Suppose we have the
3983   // following loop.
3984   //
3985   //   for (int i = 0; i < n; ++i)
3986   //     b[i] = a[i] - a[i - 1];
3987   //
3988   // There is a first-order recurrence on "a". For this loop, the shorthand
3989   // scalar IR looks like:
3990   //
3991   //   scalar.ph:
3992   //     s_init = a[-1]
3993   //     br scalar.body
3994   //
3995   //   scalar.body:
3996   //     i = phi [0, scalar.ph], [i+1, scalar.body]
3997   //     s1 = phi [s_init, scalar.ph], [s2, scalar.body]
3998   //     s2 = a[i]
3999   //     b[i] = s2 - s1
4000   //     br cond, scalar.body, ...
4001   //
4002   // In this example, s1 is a recurrence because it's value depends on the
4003   // previous iteration. In the first phase of vectorization, we created a
4004   // temporary value for s1. We now complete the vectorization and produce the
4005   // shorthand vector IR shown below (for VF = 4, UF = 1).
4006   //
4007   //   vector.ph:
4008   //     v_init = vector(..., ..., ..., a[-1])
4009   //     br vector.body
4010   //
4011   //   vector.body
4012   //     i = phi [0, vector.ph], [i+4, vector.body]
4013   //     v1 = phi [v_init, vector.ph], [v2, vector.body]
4014   //     v2 = a[i, i+1, i+2, i+3];
4015   //     v3 = vector(v1(3), v2(0, 1, 2))
4016   //     b[i, i+1, i+2, i+3] = v2 - v3
4017   //     br cond, vector.body, middle.block
4018   //
4019   //   middle.block:
4020   //     x = v2(3)
4021   //     br scalar.ph
4022   //
4023   //   scalar.ph:
4024   //     s_init = phi [x, middle.block], [a[-1], otherwise]
4025   //     br scalar.body
4026   //
4027   // After execution completes the vector loop, we extract the next value of
4028   // the recurrence (x) to use as the initial value in the scalar loop.
4029 
4030   // Get the original loop preheader and single loop latch.
4031   auto *Preheader = OrigLoop->getLoopPreheader();
4032   auto *Latch = OrigLoop->getLoopLatch();
4033 
4034   // Get the initial and previous values of the scalar recurrence.
4035   auto *ScalarInit = Phi->getIncomingValueForBlock(Preheader);
4036   auto *Previous = Phi->getIncomingValueForBlock(Latch);
4037 
4038   // Create a vector from the initial value.
4039   auto *VectorInit = ScalarInit;
4040   if (VF > 1) {
4041     Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
4042     VectorInit = Builder.CreateInsertElement(
4043         UndefValue::get(VectorType::get(VectorInit->getType(), VF)), VectorInit,
4044         Builder.getInt32(VF - 1), "vector.recur.init");
4045   }
4046 
4047   // We constructed a temporary phi node in the first phase of vectorization.
4048   // This phi node will eventually be deleted.
4049   Builder.SetInsertPoint(
4050       cast<Instruction>(VectorLoopValueMap.getVectorValue(Phi, 0)));
4051 
4052   // Create a phi node for the new recurrence. The current value will either be
4053   // the initial value inserted into a vector or loop-varying vector value.
4054   auto *VecPhi = Builder.CreatePHI(VectorInit->getType(), 2, "vector.recur");
4055   VecPhi->addIncoming(VectorInit, LoopVectorPreHeader);
4056 
4057   // Get the vectorized previous value of the last part UF - 1. It appears last
4058   // among all unrolled iterations, due to the order of their construction.
4059   Value *PreviousLastPart = getOrCreateVectorValue(Previous, UF - 1);
4060 
4061   // Set the insertion point after the previous value if it is an instruction.
4062   // Note that the previous value may have been constant-folded so it is not
4063   // guaranteed to be an instruction in the vector loop. Also, if the previous
4064   // value is a phi node, we should insert after all the phi nodes to avoid
4065   // breaking basic block verification.
4066   if (LI->getLoopFor(LoopVectorBody)->isLoopInvariant(PreviousLastPart) ||
4067       isa<PHINode>(PreviousLastPart))
4068     Builder.SetInsertPoint(&*LoopVectorBody->getFirstInsertionPt());
4069   else
4070     Builder.SetInsertPoint(
4071         &*++BasicBlock::iterator(cast<Instruction>(PreviousLastPart)));
4072 
4073   // We will construct a vector for the recurrence by combining the values for
4074   // the current and previous iterations. This is the required shuffle mask.
4075   SmallVector<Constant *, 8> ShuffleMask(VF);
4076   ShuffleMask[0] = Builder.getInt32(VF - 1);
4077   for (unsigned I = 1; I < VF; ++I)
4078     ShuffleMask[I] = Builder.getInt32(I + VF - 1);
4079 
4080   // The vector from which to take the initial value for the current iteration
4081   // (actual or unrolled). Initially, this is the vector phi node.
4082   Value *Incoming = VecPhi;
4083 
4084   // Shuffle the current and previous vector and update the vector parts.
4085   for (unsigned Part = 0; Part < UF; ++Part) {
4086     Value *PreviousPart = getOrCreateVectorValue(Previous, Part);
4087     Value *PhiPart = VectorLoopValueMap.getVectorValue(Phi, Part);
4088     auto *Shuffle =
4089         VF > 1 ? Builder.CreateShuffleVector(Incoming, PreviousPart,
4090                                              ConstantVector::get(ShuffleMask))
4091                : Incoming;
4092     PhiPart->replaceAllUsesWith(Shuffle);
4093     cast<Instruction>(PhiPart)->eraseFromParent();
4094     VectorLoopValueMap.resetVectorValue(Phi, Part, Shuffle);
4095     Incoming = PreviousPart;
4096   }
4097 
4098   // Fix the latch value of the new recurrence in the vector loop.
4099   VecPhi->addIncoming(Incoming, LI->getLoopFor(LoopVectorBody)->getLoopLatch());
4100 
4101   // Extract the last vector element in the middle block. This will be the
4102   // initial value for the recurrence when jumping to the scalar loop.
4103   auto *ExtractForScalar = Incoming;
4104   if (VF > 1) {
4105     Builder.SetInsertPoint(LoopMiddleBlock->getTerminator());
4106     ExtractForScalar = Builder.CreateExtractElement(
4107         ExtractForScalar, Builder.getInt32(VF - 1), "vector.recur.extract");
4108   }
4109   // Extract the second last element in the middle block if the
4110   // Phi is used outside the loop. We need to extract the phi itself
4111   // and not the last element (the phi update in the current iteration). This
4112   // will be the value when jumping to the exit block from the LoopMiddleBlock,
4113   // when the scalar loop is not run at all.
4114   Value *ExtractForPhiUsedOutsideLoop = nullptr;
4115   if (VF > 1)
4116     ExtractForPhiUsedOutsideLoop = Builder.CreateExtractElement(
4117         Incoming, Builder.getInt32(VF - 2), "vector.recur.extract.for.phi");
4118   // When loop is unrolled without vectorizing, initialize
4119   // ExtractForPhiUsedOutsideLoop with the value just prior to unrolled value of
4120   // `Incoming`. This is analogous to the vectorized case above: extracting the
4121   // second last element when VF > 1.
4122   else if (UF > 1)
4123     ExtractForPhiUsedOutsideLoop = getOrCreateVectorValue(Previous, UF - 2);
4124 
4125   // Fix the initial value of the original recurrence in the scalar loop.
4126   Builder.SetInsertPoint(&*LoopScalarPreHeader->begin());
4127   auto *Start = Builder.CreatePHI(Phi->getType(), 2, "scalar.recur.init");
4128   for (auto *BB : predecessors(LoopScalarPreHeader)) {
4129     auto *Incoming = BB == LoopMiddleBlock ? ExtractForScalar : ScalarInit;
4130     Start->addIncoming(Incoming, BB);
4131   }
4132 
4133   Phi->setIncomingValue(Phi->getBasicBlockIndex(LoopScalarPreHeader), Start);
4134   Phi->setName("scalar.recur");
4135 
4136   // Finally, fix users of the recurrence outside the loop. The users will need
4137   // either the last value of the scalar recurrence or the last value of the
4138   // vector recurrence we extracted in the middle block. Since the loop is in
4139   // LCSSA form, we just need to find the phi node for the original scalar
4140   // recurrence in the exit block, and then add an edge for the middle block.
4141   for (auto &I : *LoopExitBlock) {
4142     auto *LCSSAPhi = dyn_cast<PHINode>(&I);
4143     if (!LCSSAPhi)
4144       break;
4145     if (LCSSAPhi->getIncomingValue(0) == Phi) {
4146       LCSSAPhi->addIncoming(ExtractForPhiUsedOutsideLoop, LoopMiddleBlock);
4147       break;
4148     }
4149   }
4150 }
4151 
4152 void InnerLoopVectorizer::fixReduction(PHINode *Phi) {
4153   Constant *Zero = Builder.getInt32(0);
4154 
4155   // Get it's reduction variable descriptor.
4156   assert(Legal->isReductionVariable(Phi) &&
4157          "Unable to find the reduction variable");
4158   RecurrenceDescriptor RdxDesc = (*Legal->getReductionVars())[Phi];
4159 
4160   RecurrenceDescriptor::RecurrenceKind RK = RdxDesc.getRecurrenceKind();
4161   TrackingVH<Value> ReductionStartValue = RdxDesc.getRecurrenceStartValue();
4162   Instruction *LoopExitInst = RdxDesc.getLoopExitInstr();
4163   RecurrenceDescriptor::MinMaxRecurrenceKind MinMaxKind =
4164     RdxDesc.getMinMaxRecurrenceKind();
4165   setDebugLocFromInst(Builder, ReductionStartValue);
4166 
4167   // We need to generate a reduction vector from the incoming scalar.
4168   // To do so, we need to generate the 'identity' vector and override
4169   // one of the elements with the incoming scalar reduction. We need
4170   // to do it in the vector-loop preheader.
4171   Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
4172 
4173   // This is the vector-clone of the value that leaves the loop.
4174   Type *VecTy = getOrCreateVectorValue(LoopExitInst, 0)->getType();
4175 
4176   // Find the reduction identity variable. Zero for addition, or, xor,
4177   // one for multiplication, -1 for And.
4178   Value *Identity;
4179   Value *VectorStart;
4180   if (RK == RecurrenceDescriptor::RK_IntegerMinMax ||
4181       RK == RecurrenceDescriptor::RK_FloatMinMax) {
4182     // MinMax reduction have the start value as their identify.
4183     if (VF == 1) {
4184       VectorStart = Identity = ReductionStartValue;
4185     } else {
4186       VectorStart = Identity =
4187         Builder.CreateVectorSplat(VF, ReductionStartValue, "minmax.ident");
4188     }
4189   } else {
4190     // Handle other reduction kinds:
4191     Constant *Iden = RecurrenceDescriptor::getRecurrenceIdentity(
4192         RK, VecTy->getScalarType());
4193     if (VF == 1) {
4194       Identity = Iden;
4195       // This vector is the Identity vector where the first element is the
4196       // incoming scalar reduction.
4197       VectorStart = ReductionStartValue;
4198     } else {
4199       Identity = ConstantVector::getSplat(VF, Iden);
4200 
4201       // This vector is the Identity vector where the first element is the
4202       // incoming scalar reduction.
4203       VectorStart =
4204         Builder.CreateInsertElement(Identity, ReductionStartValue, Zero);
4205     }
4206   }
4207 
4208   // Fix the vector-loop phi.
4209 
4210   // Reductions do not have to start at zero. They can start with
4211   // any loop invariant values.
4212   BasicBlock *Latch = OrigLoop->getLoopLatch();
4213   Value *LoopVal = Phi->getIncomingValueForBlock(Latch);
4214   for (unsigned Part = 0; Part < UF; ++Part) {
4215     Value *VecRdxPhi = getOrCreateVectorValue(Phi, Part);
4216     Value *Val = getOrCreateVectorValue(LoopVal, Part);
4217     // Make sure to add the reduction stat value only to the
4218     // first unroll part.
4219     Value *StartVal = (Part == 0) ? VectorStart : Identity;
4220     cast<PHINode>(VecRdxPhi)->addIncoming(StartVal, LoopVectorPreHeader);
4221     cast<PHINode>(VecRdxPhi)
4222       ->addIncoming(Val, LI->getLoopFor(LoopVectorBody)->getLoopLatch());
4223   }
4224 
4225   // Before each round, move the insertion point right between
4226   // the PHIs and the values we are going to write.
4227   // This allows us to write both PHINodes and the extractelement
4228   // instructions.
4229   Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt());
4230 
4231   setDebugLocFromInst(Builder, LoopExitInst);
4232 
4233   // If the vector reduction can be performed in a smaller type, we truncate
4234   // then extend the loop exit value to enable InstCombine to evaluate the
4235   // entire expression in the smaller type.
4236   if (VF > 1 && Phi->getType() != RdxDesc.getRecurrenceType()) {
4237     Type *RdxVecTy = VectorType::get(RdxDesc.getRecurrenceType(), VF);
4238     Builder.SetInsertPoint(
4239         LI->getLoopFor(LoopVectorBody)->getLoopLatch()->getTerminator());
4240     VectorParts RdxParts(UF);
4241     for (unsigned Part = 0; Part < UF; ++Part) {
4242       RdxParts[Part] = VectorLoopValueMap.getVectorValue(LoopExitInst, Part);
4243       Value *Trunc = Builder.CreateTrunc(RdxParts[Part], RdxVecTy);
4244       Value *Extnd = RdxDesc.isSigned() ? Builder.CreateSExt(Trunc, VecTy)
4245                                         : Builder.CreateZExt(Trunc, VecTy);
4246       for (Value::user_iterator UI = RdxParts[Part]->user_begin();
4247            UI != RdxParts[Part]->user_end();)
4248         if (*UI != Trunc) {
4249           (*UI++)->replaceUsesOfWith(RdxParts[Part], Extnd);
4250           RdxParts[Part] = Extnd;
4251         } else {
4252           ++UI;
4253         }
4254     }
4255     Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt());
4256     for (unsigned Part = 0; Part < UF; ++Part) {
4257       RdxParts[Part] = Builder.CreateTrunc(RdxParts[Part], RdxVecTy);
4258       VectorLoopValueMap.resetVectorValue(LoopExitInst, Part, RdxParts[Part]);
4259     }
4260   }
4261 
4262   // Reduce all of the unrolled parts into a single vector.
4263   Value *ReducedPartRdx = VectorLoopValueMap.getVectorValue(LoopExitInst, 0);
4264   unsigned Op = RecurrenceDescriptor::getRecurrenceBinOp(RK);
4265   setDebugLocFromInst(Builder, ReducedPartRdx);
4266   for (unsigned Part = 1; Part < UF; ++Part) {
4267     Value *RdxPart = VectorLoopValueMap.getVectorValue(LoopExitInst, Part);
4268     if (Op != Instruction::ICmp && Op != Instruction::FCmp)
4269       // Floating point operations had to be 'fast' to enable the reduction.
4270       ReducedPartRdx = addFastMathFlag(
4271           Builder.CreateBinOp((Instruction::BinaryOps)Op, RdxPart,
4272                               ReducedPartRdx, "bin.rdx"));
4273     else
4274       ReducedPartRdx = RecurrenceDescriptor::createMinMaxOp(
4275           Builder, MinMaxKind, ReducedPartRdx, RdxPart);
4276   }
4277 
4278   if (VF > 1) {
4279     bool NoNaN = Legal->hasFunNoNaNAttr();
4280     ReducedPartRdx =
4281         createTargetReduction(Builder, TTI, RdxDesc, ReducedPartRdx, NoNaN);
4282     // If the reduction can be performed in a smaller type, we need to extend
4283     // the reduction to the wider type before we branch to the original loop.
4284     if (Phi->getType() != RdxDesc.getRecurrenceType())
4285       ReducedPartRdx =
4286         RdxDesc.isSigned()
4287         ? Builder.CreateSExt(ReducedPartRdx, Phi->getType())
4288         : Builder.CreateZExt(ReducedPartRdx, Phi->getType());
4289   }
4290 
4291   // Create a phi node that merges control-flow from the backedge-taken check
4292   // block and the middle block.
4293   PHINode *BCBlockPhi = PHINode::Create(Phi->getType(), 2, "bc.merge.rdx",
4294                                         LoopScalarPreHeader->getTerminator());
4295   for (unsigned I = 0, E = LoopBypassBlocks.size(); I != E; ++I)
4296     BCBlockPhi->addIncoming(ReductionStartValue, LoopBypassBlocks[I]);
4297   BCBlockPhi->addIncoming(ReducedPartRdx, LoopMiddleBlock);
4298 
4299   // Now, we need to fix the users of the reduction variable
4300   // inside and outside of the scalar remainder loop.
4301   // We know that the loop is in LCSSA form. We need to update the
4302   // PHI nodes in the exit blocks.
4303   for (BasicBlock::iterator LEI = LoopExitBlock->begin(),
4304          LEE = LoopExitBlock->end();
4305        LEI != LEE; ++LEI) {
4306     PHINode *LCSSAPhi = dyn_cast<PHINode>(LEI);
4307     if (!LCSSAPhi)
4308       break;
4309 
4310     // All PHINodes need to have a single entry edge, or two if
4311     // we already fixed them.
4312     assert(LCSSAPhi->getNumIncomingValues() < 3 && "Invalid LCSSA PHI");
4313 
4314     // We found a reduction value exit-PHI. Update it with the
4315     // incoming bypass edge.
4316     if (LCSSAPhi->getIncomingValue(0) == LoopExitInst)
4317       LCSSAPhi->addIncoming(ReducedPartRdx, LoopMiddleBlock);
4318   } // end of the LCSSA phi scan.
4319 
4320     // Fix the scalar loop reduction variable with the incoming reduction sum
4321     // from the vector body and from the backedge value.
4322   int IncomingEdgeBlockIdx =
4323     Phi->getBasicBlockIndex(OrigLoop->getLoopLatch());
4324   assert(IncomingEdgeBlockIdx >= 0 && "Invalid block index");
4325   // Pick the other block.
4326   int SelfEdgeBlockIdx = (IncomingEdgeBlockIdx ? 0 : 1);
4327   Phi->setIncomingValue(SelfEdgeBlockIdx, BCBlockPhi);
4328   Phi->setIncomingValue(IncomingEdgeBlockIdx, LoopExitInst);
4329 }
4330 
4331 void InnerLoopVectorizer::fixLCSSAPHIs() {
4332   for (Instruction &LEI : *LoopExitBlock) {
4333     auto *LCSSAPhi = dyn_cast<PHINode>(&LEI);
4334     if (!LCSSAPhi)
4335       break;
4336     if (LCSSAPhi->getNumIncomingValues() == 1) {
4337       assert(OrigLoop->isLoopInvariant(LCSSAPhi->getIncomingValue(0)) &&
4338              "Incoming value isn't loop invariant");
4339       LCSSAPhi->addIncoming(LCSSAPhi->getIncomingValue(0), LoopMiddleBlock);
4340     }
4341   }
4342 }
4343 
4344 void InnerLoopVectorizer::sinkScalarOperands(Instruction *PredInst) {
4345 
4346   // The basic block and loop containing the predicated instruction.
4347   auto *PredBB = PredInst->getParent();
4348   auto *VectorLoop = LI->getLoopFor(PredBB);
4349 
4350   // Initialize a worklist with the operands of the predicated instruction.
4351   SetVector<Value *> Worklist(PredInst->op_begin(), PredInst->op_end());
4352 
4353   // Holds instructions that we need to analyze again. An instruction may be
4354   // reanalyzed if we don't yet know if we can sink it or not.
4355   SmallVector<Instruction *, 8> InstsToReanalyze;
4356 
4357   // Returns true if a given use occurs in the predicated block. Phi nodes use
4358   // their operands in their corresponding predecessor blocks.
4359   auto isBlockOfUsePredicated = [&](Use &U) -> bool {
4360     auto *I = cast<Instruction>(U.getUser());
4361     BasicBlock *BB = I->getParent();
4362     if (auto *Phi = dyn_cast<PHINode>(I))
4363       BB = Phi->getIncomingBlock(
4364           PHINode::getIncomingValueNumForOperand(U.getOperandNo()));
4365     return BB == PredBB;
4366   };
4367 
4368   // Iteratively sink the scalarized operands of the predicated instruction
4369   // into the block we created for it. When an instruction is sunk, it's
4370   // operands are then added to the worklist. The algorithm ends after one pass
4371   // through the worklist doesn't sink a single instruction.
4372   bool Changed;
4373   do {
4374 
4375     // Add the instructions that need to be reanalyzed to the worklist, and
4376     // reset the changed indicator.
4377     Worklist.insert(InstsToReanalyze.begin(), InstsToReanalyze.end());
4378     InstsToReanalyze.clear();
4379     Changed = false;
4380 
4381     while (!Worklist.empty()) {
4382       auto *I = dyn_cast<Instruction>(Worklist.pop_back_val());
4383 
4384       // We can't sink an instruction if it is a phi node, is already in the
4385       // predicated block, is not in the loop, or may have side effects.
4386       if (!I || isa<PHINode>(I) || I->getParent() == PredBB ||
4387           !VectorLoop->contains(I) || I->mayHaveSideEffects())
4388         continue;
4389 
4390       // It's legal to sink the instruction if all its uses occur in the
4391       // predicated block. Otherwise, there's nothing to do yet, and we may
4392       // need to reanalyze the instruction.
4393       if (!all_of(I->uses(), isBlockOfUsePredicated)) {
4394         InstsToReanalyze.push_back(I);
4395         continue;
4396       }
4397 
4398       // Move the instruction to the beginning of the predicated block, and add
4399       // it's operands to the worklist.
4400       I->moveBefore(&*PredBB->getFirstInsertionPt());
4401       Worklist.insert(I->op_begin(), I->op_end());
4402 
4403       // The sinking may have enabled other instructions to be sunk, so we will
4404       // need to iterate.
4405       Changed = true;
4406     }
4407   } while (Changed);
4408 }
4409 
4410 InnerLoopVectorizer::VectorParts
4411 InnerLoopVectorizer::createEdgeMask(BasicBlock *Src, BasicBlock *Dst) {
4412   assert(is_contained(predecessors(Dst), Src) && "Invalid edge");
4413 
4414   // Look for cached value.
4415   std::pair<BasicBlock *, BasicBlock *> Edge(Src, Dst);
4416   EdgeMaskCacheTy::iterator ECEntryIt = EdgeMaskCache.find(Edge);
4417   if (ECEntryIt != EdgeMaskCache.end())
4418     return ECEntryIt->second;
4419 
4420   VectorParts SrcMask = createBlockInMask(Src);
4421 
4422   // The terminator has to be a branch inst!
4423   BranchInst *BI = dyn_cast<BranchInst>(Src->getTerminator());
4424   assert(BI && "Unexpected terminator found");
4425 
4426   if (!BI->isConditional())
4427     return EdgeMaskCache[Edge] = SrcMask;
4428 
4429   VectorParts EdgeMask(UF);
4430   for (unsigned Part = 0; Part < UF; ++Part) {
4431     auto *EdgeMaskPart = getOrCreateVectorValue(BI->getCondition(), Part);
4432     if (BI->getSuccessor(0) != Dst)
4433       EdgeMaskPart = Builder.CreateNot(EdgeMaskPart);
4434 
4435     if (SrcMask[Part]) // Otherwise block in-mask is all-one, no need to AND.
4436       EdgeMaskPart = Builder.CreateAnd(EdgeMaskPart, SrcMask[Part]);
4437 
4438     EdgeMask[Part] = EdgeMaskPart;
4439   }
4440 
4441   return EdgeMaskCache[Edge] = EdgeMask;
4442 }
4443 
4444 InnerLoopVectorizer::VectorParts
4445 InnerLoopVectorizer::createBlockInMask(BasicBlock *BB) {
4446   assert(OrigLoop->contains(BB) && "Block is not a part of a loop");
4447 
4448   // Look for cached value.
4449   BlockMaskCacheTy::iterator BCEntryIt = BlockMaskCache.find(BB);
4450   if (BCEntryIt != BlockMaskCache.end())
4451     return BCEntryIt->second;
4452 
4453   // All-one mask is modelled as no-mask following the convention for masked
4454   // load/store/gather/scatter. Initialize BlockMask to no-mask.
4455   VectorParts BlockMask(UF);
4456   for (unsigned Part = 0; Part < UF; ++Part)
4457     BlockMask[Part] = nullptr;
4458 
4459   // Loop incoming mask is all-one.
4460   if (OrigLoop->getHeader() == BB)
4461     return BlockMaskCache[BB] = BlockMask;
4462 
4463   // This is the block mask. We OR all incoming edges.
4464   for (auto *Predecessor : predecessors(BB)) {
4465     VectorParts EdgeMask = createEdgeMask(Predecessor, BB);
4466     if (!EdgeMask[0]) // Mask of predecessor is all-one so mask of block is too.
4467       return BlockMaskCache[BB] = EdgeMask;
4468 
4469     if (!BlockMask[0]) { // BlockMask has its initialized nullptr value.
4470       BlockMask = EdgeMask;
4471       continue;
4472     }
4473 
4474     for (unsigned Part = 0; Part < UF; ++Part)
4475       BlockMask[Part] = Builder.CreateOr(BlockMask[Part], EdgeMask[Part]);
4476   }
4477 
4478   return BlockMaskCache[BB] = BlockMask;
4479 }
4480 
4481 void InnerLoopVectorizer::widenPHIInstruction(Instruction *PN, unsigned UF,
4482                                               unsigned VF) {
4483   PHINode *P = cast<PHINode>(PN);
4484   // In order to support recurrences we need to be able to vectorize Phi nodes.
4485   // Phi nodes have cycles, so we need to vectorize them in two stages. This is
4486   // stage #1: We create a new vector PHI node with no incoming edges. We'll use
4487   // this value when we vectorize all of the instructions that use the PHI.
4488   if (Legal->isReductionVariable(P) || Legal->isFirstOrderRecurrence(P)) {
4489     for (unsigned Part = 0; Part < UF; ++Part) {
4490       // This is phase one of vectorizing PHIs.
4491       Type *VecTy =
4492           (VF == 1) ? PN->getType() : VectorType::get(PN->getType(), VF);
4493       Value *EntryPart = PHINode::Create(
4494           VecTy, 2, "vec.phi", &*LoopVectorBody->getFirstInsertionPt());
4495       VectorLoopValueMap.setVectorValue(P, Part, EntryPart);
4496     }
4497     return;
4498   }
4499 
4500   setDebugLocFromInst(Builder, P);
4501   // Check for PHI nodes that are lowered to vector selects.
4502   if (P->getParent() != OrigLoop->getHeader()) {
4503     // We know that all PHIs in non-header blocks are converted into
4504     // selects, so we don't have to worry about the insertion order and we
4505     // can just use the builder.
4506     // At this point we generate the predication tree. There may be
4507     // duplications since this is a simple recursive scan, but future
4508     // optimizations will clean it up.
4509 
4510     unsigned NumIncoming = P->getNumIncomingValues();
4511 
4512     // Generate a sequence of selects of the form:
4513     // SELECT(Mask3, In3,
4514     //      SELECT(Mask2, In2,
4515     //                   ( ...)))
4516     VectorParts Entry(UF);
4517     for (unsigned In = 0; In < NumIncoming; In++) {
4518       VectorParts Cond =
4519           createEdgeMask(P->getIncomingBlock(In), P->getParent());
4520 
4521       for (unsigned Part = 0; Part < UF; ++Part) {
4522         Value *In0 = getOrCreateVectorValue(P->getIncomingValue(In), Part);
4523         assert((Cond[Part] || NumIncoming == 1) &&
4524                "Multiple predecessors with one predecessor having a full mask");
4525         if (In == 0)
4526           Entry[Part] = In0; // Initialize with the first incoming value.
4527         else
4528           // Select between the current value and the previous incoming edge
4529           // based on the incoming mask.
4530           Entry[Part] = Builder.CreateSelect(Cond[Part], In0, Entry[Part],
4531                                              "predphi");
4532       }
4533     }
4534     for (unsigned Part = 0; Part < UF; ++Part)
4535       VectorLoopValueMap.setVectorValue(P, Part, Entry[Part]);
4536     return;
4537   }
4538 
4539   // This PHINode must be an induction variable.
4540   // Make sure that we know about it.
4541   assert(Legal->getInductionVars()->count(P) && "Not an induction variable");
4542 
4543   InductionDescriptor II = Legal->getInductionVars()->lookup(P);
4544   const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout();
4545 
4546   // FIXME: The newly created binary instructions should contain nsw/nuw flags,
4547   // which can be found from the original scalar operations.
4548   switch (II.getKind()) {
4549   case InductionDescriptor::IK_NoInduction:
4550     llvm_unreachable("Unknown induction");
4551   case InductionDescriptor::IK_IntInduction:
4552   case InductionDescriptor::IK_FpInduction:
4553     llvm_unreachable("Integer/fp induction is handled elsewhere.");
4554   case InductionDescriptor::IK_PtrInduction: {
4555     // Handle the pointer induction variable case.
4556     assert(P->getType()->isPointerTy() && "Unexpected type.");
4557     // This is the normalized GEP that starts counting at zero.
4558     Value *PtrInd = Induction;
4559     PtrInd = Builder.CreateSExtOrTrunc(PtrInd, II.getStep()->getType());
4560     // Determine the number of scalars we need to generate for each unroll
4561     // iteration. If the instruction is uniform, we only need to generate the
4562     // first lane. Otherwise, we generate all VF values.
4563     unsigned Lanes = Cost->isUniformAfterVectorization(P, VF) ? 1 : VF;
4564     // These are the scalar results. Notice that we don't generate vector GEPs
4565     // because scalar GEPs result in better code.
4566     for (unsigned Part = 0; Part < UF; ++Part) {
4567       for (unsigned Lane = 0; Lane < Lanes; ++Lane) {
4568         Constant *Idx = ConstantInt::get(PtrInd->getType(), Lane + Part * VF);
4569         Value *GlobalIdx = Builder.CreateAdd(PtrInd, Idx);
4570         Value *SclrGep = II.transform(Builder, GlobalIdx, PSE.getSE(), DL);
4571         SclrGep->setName("next.gep");
4572         VectorLoopValueMap.setScalarValue(P, {Part, Lane}, SclrGep);
4573       }
4574     }
4575     return;
4576   }
4577   }
4578 }
4579 
4580 /// A helper function for checking whether an integer division-related
4581 /// instruction may divide by zero (in which case it must be predicated if
4582 /// executed conditionally in the scalar code).
4583 /// TODO: It may be worthwhile to generalize and check isKnownNonZero().
4584 /// Non-zero divisors that are non compile-time constants will not be
4585 /// converted into multiplication, so we will still end up scalarizing
4586 /// the division, but can do so w/o predication.
4587 static bool mayDivideByZero(Instruction &I) {
4588   assert((I.getOpcode() == Instruction::UDiv ||
4589           I.getOpcode() == Instruction::SDiv ||
4590           I.getOpcode() == Instruction::URem ||
4591           I.getOpcode() == Instruction::SRem) &&
4592          "Unexpected instruction");
4593   Value *Divisor = I.getOperand(1);
4594   auto *CInt = dyn_cast<ConstantInt>(Divisor);
4595   return !CInt || CInt->isZero();
4596 }
4597 
4598 void InnerLoopVectorizer::widenInstruction(Instruction &I) {
4599   switch (I.getOpcode()) {
4600   case Instruction::Br:
4601   case Instruction::PHI:
4602     llvm_unreachable("This instruction is handled by a different recipe.");
4603   case Instruction::GetElementPtr: {
4604     // Construct a vector GEP by widening the operands of the scalar GEP as
4605     // necessary. We mark the vector GEP 'inbounds' if appropriate. A GEP
4606     // results in a vector of pointers when at least one operand of the GEP
4607     // is vector-typed. Thus, to keep the representation compact, we only use
4608     // vector-typed operands for loop-varying values.
4609     auto *GEP = cast<GetElementPtrInst>(&I);
4610 
4611     if (VF > 1 && OrigLoop->hasLoopInvariantOperands(GEP)) {
4612       // If we are vectorizing, but the GEP has only loop-invariant operands,
4613       // the GEP we build (by only using vector-typed operands for
4614       // loop-varying values) would be a scalar pointer. Thus, to ensure we
4615       // produce a vector of pointers, we need to either arbitrarily pick an
4616       // operand to broadcast, or broadcast a clone of the original GEP.
4617       // Here, we broadcast a clone of the original.
4618       //
4619       // TODO: If at some point we decide to scalarize instructions having
4620       //       loop-invariant operands, this special case will no longer be
4621       //       required. We would add the scalarization decision to
4622       //       collectLoopScalars() and teach getVectorValue() to broadcast
4623       //       the lane-zero scalar value.
4624       auto *Clone = Builder.Insert(GEP->clone());
4625       for (unsigned Part = 0; Part < UF; ++Part) {
4626         Value *EntryPart = Builder.CreateVectorSplat(VF, Clone);
4627         VectorLoopValueMap.setVectorValue(&I, Part, EntryPart);
4628         addMetadata(EntryPart, GEP);
4629       }
4630     } else {
4631       // If the GEP has at least one loop-varying operand, we are sure to
4632       // produce a vector of pointers. But if we are only unrolling, we want
4633       // to produce a scalar GEP for each unroll part. Thus, the GEP we
4634       // produce with the code below will be scalar (if VF == 1) or vector
4635       // (otherwise). Note that for the unroll-only case, we still maintain
4636       // values in the vector mapping with initVector, as we do for other
4637       // instructions.
4638       for (unsigned Part = 0; Part < UF; ++Part) {
4639 
4640         // The pointer operand of the new GEP. If it's loop-invariant, we
4641         // won't broadcast it.
4642         auto *Ptr =
4643             OrigLoop->isLoopInvariant(GEP->getPointerOperand())
4644                 ? GEP->getPointerOperand()
4645                 : getOrCreateVectorValue(GEP->getPointerOperand(), Part);
4646 
4647         // Collect all the indices for the new GEP. If any index is
4648         // loop-invariant, we won't broadcast it.
4649         SmallVector<Value *, 4> Indices;
4650         for (auto &U : make_range(GEP->idx_begin(), GEP->idx_end())) {
4651           if (OrigLoop->isLoopInvariant(U.get()))
4652             Indices.push_back(U.get());
4653           else
4654             Indices.push_back(getOrCreateVectorValue(U.get(), Part));
4655         }
4656 
4657         // Create the new GEP. Note that this GEP may be a scalar if VF == 1,
4658         // but it should be a vector, otherwise.
4659         auto *NewGEP = GEP->isInBounds()
4660                            ? Builder.CreateInBoundsGEP(Ptr, Indices)
4661                            : Builder.CreateGEP(Ptr, Indices);
4662         assert((VF == 1 || NewGEP->getType()->isVectorTy()) &&
4663                "NewGEP is not a pointer vector");
4664         VectorLoopValueMap.setVectorValue(&I, Part, NewGEP);
4665         addMetadata(NewGEP, GEP);
4666       }
4667     }
4668 
4669     break;
4670   }
4671   case Instruction::UDiv:
4672   case Instruction::SDiv:
4673   case Instruction::SRem:
4674   case Instruction::URem:
4675   case Instruction::Add:
4676   case Instruction::FAdd:
4677   case Instruction::Sub:
4678   case Instruction::FSub:
4679   case Instruction::Mul:
4680   case Instruction::FMul:
4681   case Instruction::FDiv:
4682   case Instruction::FRem:
4683   case Instruction::Shl:
4684   case Instruction::LShr:
4685   case Instruction::AShr:
4686   case Instruction::And:
4687   case Instruction::Or:
4688   case Instruction::Xor: {
4689     // Just widen binops.
4690     auto *BinOp = cast<BinaryOperator>(&I);
4691     setDebugLocFromInst(Builder, BinOp);
4692 
4693     for (unsigned Part = 0; Part < UF; ++Part) {
4694       Value *A = getOrCreateVectorValue(BinOp->getOperand(0), Part);
4695       Value *B = getOrCreateVectorValue(BinOp->getOperand(1), Part);
4696       Value *V = Builder.CreateBinOp(BinOp->getOpcode(), A, B);
4697 
4698       if (BinaryOperator *VecOp = dyn_cast<BinaryOperator>(V))
4699         VecOp->copyIRFlags(BinOp);
4700 
4701       // Use this vector value for all users of the original instruction.
4702       VectorLoopValueMap.setVectorValue(&I, Part, V);
4703       addMetadata(V, BinOp);
4704     }
4705 
4706     break;
4707   }
4708   case Instruction::Select: {
4709     // Widen selects.
4710     // If the selector is loop invariant we can create a select
4711     // instruction with a scalar condition. Otherwise, use vector-select.
4712     auto *SE = PSE.getSE();
4713     bool InvariantCond =
4714         SE->isLoopInvariant(PSE.getSCEV(I.getOperand(0)), OrigLoop);
4715     setDebugLocFromInst(Builder, &I);
4716 
4717     // The condition can be loop invariant  but still defined inside the
4718     // loop. This means that we can't just use the original 'cond' value.
4719     // We have to take the 'vectorized' value and pick the first lane.
4720     // Instcombine will make this a no-op.
4721 
4722     auto *ScalarCond = getOrCreateScalarValue(I.getOperand(0), {0, 0});
4723 
4724     for (unsigned Part = 0; Part < UF; ++Part) {
4725       Value *Cond = getOrCreateVectorValue(I.getOperand(0), Part);
4726       Value *Op0 = getOrCreateVectorValue(I.getOperand(1), Part);
4727       Value *Op1 = getOrCreateVectorValue(I.getOperand(2), Part);
4728       Value *Sel =
4729           Builder.CreateSelect(InvariantCond ? ScalarCond : Cond, Op0, Op1);
4730       VectorLoopValueMap.setVectorValue(&I, Part, Sel);
4731       addMetadata(Sel, &I);
4732     }
4733 
4734     break;
4735   }
4736 
4737   case Instruction::ICmp:
4738   case Instruction::FCmp: {
4739     // Widen compares. Generate vector compares.
4740     bool FCmp = (I.getOpcode() == Instruction::FCmp);
4741     auto *Cmp = dyn_cast<CmpInst>(&I);
4742     setDebugLocFromInst(Builder, Cmp);
4743     for (unsigned Part = 0; Part < UF; ++Part) {
4744       Value *A = getOrCreateVectorValue(Cmp->getOperand(0), Part);
4745       Value *B = getOrCreateVectorValue(Cmp->getOperand(1), Part);
4746       Value *C = nullptr;
4747       if (FCmp) {
4748         // Propagate fast math flags.
4749         IRBuilder<>::FastMathFlagGuard FMFG(Builder);
4750         Builder.setFastMathFlags(Cmp->getFastMathFlags());
4751         C = Builder.CreateFCmp(Cmp->getPredicate(), A, B);
4752       } else {
4753         C = Builder.CreateICmp(Cmp->getPredicate(), A, B);
4754       }
4755       VectorLoopValueMap.setVectorValue(&I, Part, C);
4756       addMetadata(C, &I);
4757     }
4758 
4759     break;
4760   }
4761 
4762   case Instruction::Store:
4763   case Instruction::Load:
4764     vectorizeMemoryInstruction(&I);
4765     break;
4766   case Instruction::ZExt:
4767   case Instruction::SExt:
4768   case Instruction::FPToUI:
4769   case Instruction::FPToSI:
4770   case Instruction::FPExt:
4771   case Instruction::PtrToInt:
4772   case Instruction::IntToPtr:
4773   case Instruction::SIToFP:
4774   case Instruction::UIToFP:
4775   case Instruction::Trunc:
4776   case Instruction::FPTrunc:
4777   case Instruction::BitCast: {
4778     auto *CI = dyn_cast<CastInst>(&I);
4779     setDebugLocFromInst(Builder, CI);
4780 
4781     /// Vectorize casts.
4782     Type *DestTy =
4783         (VF == 1) ? CI->getType() : VectorType::get(CI->getType(), VF);
4784 
4785     for (unsigned Part = 0; Part < UF; ++Part) {
4786       Value *A = getOrCreateVectorValue(CI->getOperand(0), Part);
4787       Value *Cast = Builder.CreateCast(CI->getOpcode(), A, DestTy);
4788       VectorLoopValueMap.setVectorValue(&I, Part, Cast);
4789       addMetadata(Cast, &I);
4790     }
4791     break;
4792   }
4793 
4794   case Instruction::Call: {
4795     // Ignore dbg intrinsics.
4796     if (isa<DbgInfoIntrinsic>(I))
4797       break;
4798     setDebugLocFromInst(Builder, &I);
4799 
4800     Module *M = I.getParent()->getParent()->getParent();
4801     auto *CI = cast<CallInst>(&I);
4802 
4803     StringRef FnName = CI->getCalledFunction()->getName();
4804     Function *F = CI->getCalledFunction();
4805     Type *RetTy = ToVectorTy(CI->getType(), VF);
4806     SmallVector<Type *, 4> Tys;
4807     for (Value *ArgOperand : CI->arg_operands())
4808       Tys.push_back(ToVectorTy(ArgOperand->getType(), VF));
4809 
4810     Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
4811 
4812     // The flag shows whether we use Intrinsic or a usual Call for vectorized
4813     // version of the instruction.
4814     // Is it beneficial to perform intrinsic call compared to lib call?
4815     bool NeedToScalarize;
4816     unsigned CallCost = getVectorCallCost(CI, VF, *TTI, TLI, NeedToScalarize);
4817     bool UseVectorIntrinsic =
4818         ID && getVectorIntrinsicCost(CI, VF, *TTI, TLI) <= CallCost;
4819     assert((UseVectorIntrinsic || !NeedToScalarize) &&
4820            "Instruction should be scalarized elsewhere.");
4821 
4822     for (unsigned Part = 0; Part < UF; ++Part) {
4823       SmallVector<Value *, 4> Args;
4824       for (unsigned i = 0, ie = CI->getNumArgOperands(); i != ie; ++i) {
4825         Value *Arg = CI->getArgOperand(i);
4826         // Some intrinsics have a scalar argument - don't replace it with a
4827         // vector.
4828         if (!UseVectorIntrinsic || !hasVectorInstrinsicScalarOpd(ID, i))
4829           Arg = getOrCreateVectorValue(CI->getArgOperand(i), Part);
4830         Args.push_back(Arg);
4831       }
4832 
4833       Function *VectorF;
4834       if (UseVectorIntrinsic) {
4835         // Use vector version of the intrinsic.
4836         Type *TysForDecl[] = {CI->getType()};
4837         if (VF > 1)
4838           TysForDecl[0] = VectorType::get(CI->getType()->getScalarType(), VF);
4839         VectorF = Intrinsic::getDeclaration(M, ID, TysForDecl);
4840       } else {
4841         // Use vector version of the library call.
4842         StringRef VFnName = TLI->getVectorizedFunction(FnName, VF);
4843         assert(!VFnName.empty() && "Vector function name is empty.");
4844         VectorF = M->getFunction(VFnName);
4845         if (!VectorF) {
4846           // Generate a declaration
4847           FunctionType *FTy = FunctionType::get(RetTy, Tys, false);
4848           VectorF =
4849               Function::Create(FTy, Function::ExternalLinkage, VFnName, M);
4850           VectorF->copyAttributesFrom(F);
4851         }
4852       }
4853       assert(VectorF && "Can't create vector function.");
4854 
4855       SmallVector<OperandBundleDef, 1> OpBundles;
4856       CI->getOperandBundlesAsDefs(OpBundles);
4857       CallInst *V = Builder.CreateCall(VectorF, Args, OpBundles);
4858 
4859       if (isa<FPMathOperator>(V))
4860         V->copyFastMathFlags(CI);
4861 
4862       VectorLoopValueMap.setVectorValue(&I, Part, V);
4863       addMetadata(V, &I);
4864     }
4865 
4866     break;
4867   }
4868 
4869   default:
4870     // All other instructions are scalarized.
4871     DEBUG(dbgs() << "LV: Found an unhandled instruction: " << I);
4872     llvm_unreachable("Unhandled instruction!");
4873   } // end of switch.
4874 }
4875 
4876 void InnerLoopVectorizer::updateAnalysis() {
4877   // Forget the original basic block.
4878   PSE.getSE()->forgetLoop(OrigLoop);
4879 
4880   // Update the dominator tree information.
4881   assert(DT->properlyDominates(LoopBypassBlocks.front(), LoopExitBlock) &&
4882          "Entry does not dominate exit.");
4883 
4884   DT->addNewBlock(LoopMiddleBlock,
4885                   LI->getLoopFor(LoopVectorBody)->getLoopLatch());
4886   DT->addNewBlock(LoopScalarPreHeader, LoopBypassBlocks[0]);
4887   DT->changeImmediateDominator(LoopScalarBody, LoopScalarPreHeader);
4888   DT->changeImmediateDominator(LoopExitBlock, LoopBypassBlocks[0]);
4889   DEBUG(DT->verifyDomTree());
4890 }
4891 
4892 /// \brief Check whether it is safe to if-convert this phi node.
4893 ///
4894 /// Phi nodes with constant expressions that can trap are not safe to if
4895 /// convert.
4896 static bool canIfConvertPHINodes(BasicBlock *BB) {
4897   for (Instruction &I : *BB) {
4898     auto *Phi = dyn_cast<PHINode>(&I);
4899     if (!Phi)
4900       return true;
4901     for (Value *V : Phi->incoming_values())
4902       if (auto *C = dyn_cast<Constant>(V))
4903         if (C->canTrap())
4904           return false;
4905   }
4906   return true;
4907 }
4908 
4909 bool LoopVectorizationLegality::canVectorizeWithIfConvert() {
4910   if (!EnableIfConversion) {
4911     ORE->emit(createMissedAnalysis("IfConversionDisabled")
4912               << "if-conversion is disabled");
4913     return false;
4914   }
4915 
4916   assert(TheLoop->getNumBlocks() > 1 && "Single block loops are vectorizable");
4917 
4918   // A list of pointers that we can safely read and write to.
4919   SmallPtrSet<Value *, 8> SafePointes;
4920 
4921   // Collect safe addresses.
4922   for (BasicBlock *BB : TheLoop->blocks()) {
4923     if (blockNeedsPredication(BB))
4924       continue;
4925 
4926     for (Instruction &I : *BB)
4927       if (auto *Ptr = getPointerOperand(&I))
4928         SafePointes.insert(Ptr);
4929   }
4930 
4931   // Collect the blocks that need predication.
4932   BasicBlock *Header = TheLoop->getHeader();
4933   for (BasicBlock *BB : TheLoop->blocks()) {
4934     // We don't support switch statements inside loops.
4935     if (!isa<BranchInst>(BB->getTerminator())) {
4936       ORE->emit(createMissedAnalysis("LoopContainsSwitch", BB->getTerminator())
4937                 << "loop contains a switch statement");
4938       return false;
4939     }
4940 
4941     // We must be able to predicate all blocks that need to be predicated.
4942     if (blockNeedsPredication(BB)) {
4943       if (!blockCanBePredicated(BB, SafePointes)) {
4944         ORE->emit(createMissedAnalysis("NoCFGForSelect", BB->getTerminator())
4945                   << "control flow cannot be substituted for a select");
4946         return false;
4947       }
4948     } else if (BB != Header && !canIfConvertPHINodes(BB)) {
4949       ORE->emit(createMissedAnalysis("NoCFGForSelect", BB->getTerminator())
4950                 << "control flow cannot be substituted for a select");
4951       return false;
4952     }
4953   }
4954 
4955   // We can if-convert this loop.
4956   return true;
4957 }
4958 
4959 bool LoopVectorizationLegality::canVectorize() {
4960   // Store the result and return it at the end instead of exiting early, in case
4961   // allowExtraAnalysis is used to report multiple reasons for not vectorizing.
4962   bool Result = true;
4963 
4964   bool DoExtraAnalysis = ORE->allowExtraAnalysis(DEBUG_TYPE);
4965   if (DoExtraAnalysis)
4966   // We must have a loop in canonical form. Loops with indirectbr in them cannot
4967   // be canonicalized.
4968   if (!TheLoop->getLoopPreheader()) {
4969     ORE->emit(createMissedAnalysis("CFGNotUnderstood")
4970               << "loop control flow is not understood by vectorizer");
4971   if (DoExtraAnalysis)
4972       Result = false;
4973     else
4974       return false;
4975   }
4976 
4977   // FIXME: The code is currently dead, since the loop gets sent to
4978   // LoopVectorizationLegality is already an innermost loop.
4979   //
4980   // We can only vectorize innermost loops.
4981   if (!TheLoop->empty()) {
4982     ORE->emit(createMissedAnalysis("NotInnermostLoop")
4983               << "loop is not the innermost loop");
4984     if (DoExtraAnalysis)
4985       Result = false;
4986     else
4987       return false;
4988   }
4989 
4990   // We must have a single backedge.
4991   if (TheLoop->getNumBackEdges() != 1) {
4992     ORE->emit(createMissedAnalysis("CFGNotUnderstood")
4993               << "loop control flow is not understood by vectorizer");
4994     if (DoExtraAnalysis)
4995       Result = false;
4996     else
4997       return false;
4998   }
4999 
5000   // We must have a single exiting block.
5001   if (!TheLoop->getExitingBlock()) {
5002     ORE->emit(createMissedAnalysis("CFGNotUnderstood")
5003               << "loop control flow is not understood by vectorizer");
5004     if (DoExtraAnalysis)
5005       Result = false;
5006     else
5007       return false;
5008   }
5009 
5010   // We only handle bottom-tested loops, i.e. loop in which the condition is
5011   // checked at the end of each iteration. With that we can assume that all
5012   // instructions in the loop are executed the same number of times.
5013   if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) {
5014     ORE->emit(createMissedAnalysis("CFGNotUnderstood")
5015               << "loop control flow is not understood by vectorizer");
5016     if (DoExtraAnalysis)
5017       Result = false;
5018     else
5019       return false;
5020   }
5021 
5022   // We need to have a loop header.
5023   DEBUG(dbgs() << "LV: Found a loop: " << TheLoop->getHeader()->getName()
5024                << '\n');
5025 
5026   // Check if we can if-convert non-single-bb loops.
5027   unsigned NumBlocks = TheLoop->getNumBlocks();
5028   if (NumBlocks != 1 && !canVectorizeWithIfConvert()) {
5029     DEBUG(dbgs() << "LV: Can't if-convert the loop.\n");
5030     if (DoExtraAnalysis)
5031       Result = false;
5032     else
5033       return false;
5034   }
5035 
5036   // Check if we can vectorize the instructions and CFG in this loop.
5037   if (!canVectorizeInstrs()) {
5038     DEBUG(dbgs() << "LV: Can't vectorize the instructions or CFG\n");
5039     if (DoExtraAnalysis)
5040       Result = false;
5041     else
5042       return false;
5043   }
5044 
5045   // Go over each instruction and look at memory deps.
5046   if (!canVectorizeMemory()) {
5047     DEBUG(dbgs() << "LV: Can't vectorize due to memory conflicts\n");
5048     if (DoExtraAnalysis)
5049       Result = false;
5050     else
5051       return false;
5052   }
5053 
5054   DEBUG(dbgs() << "LV: We can vectorize this loop"
5055                << (LAI->getRuntimePointerChecking()->Need
5056                        ? " (with a runtime bound check)"
5057                        : "")
5058                << "!\n");
5059 
5060   bool UseInterleaved = TTI->enableInterleavedAccessVectorization();
5061 
5062   // If an override option has been passed in for interleaved accesses, use it.
5063   if (EnableInterleavedMemAccesses.getNumOccurrences() > 0)
5064     UseInterleaved = EnableInterleavedMemAccesses;
5065 
5066   // Analyze interleaved memory accesses.
5067   if (UseInterleaved)
5068     InterleaveInfo.analyzeInterleaving(*getSymbolicStrides());
5069 
5070   unsigned SCEVThreshold = VectorizeSCEVCheckThreshold;
5071   if (Hints->getForce() == LoopVectorizeHints::FK_Enabled)
5072     SCEVThreshold = PragmaVectorizeSCEVCheckThreshold;
5073 
5074   if (PSE.getUnionPredicate().getComplexity() > SCEVThreshold) {
5075     ORE->emit(createMissedAnalysis("TooManySCEVRunTimeChecks")
5076               << "Too many SCEV assumptions need to be made and checked "
5077               << "at runtime");
5078     DEBUG(dbgs() << "LV: Too many SCEV checks needed.\n");
5079     if (DoExtraAnalysis)
5080       Result = false;
5081     else
5082       return false;
5083   }
5084 
5085   // Okay! We've done all the tests. If any have failed, return false. Otherwise
5086   // we can vectorize, and at this point we don't have any other mem analysis
5087   // which may limit our maximum vectorization factor, so just return true with
5088   // no restrictions.
5089   return Result;
5090 }
5091 
5092 static Type *convertPointerToIntegerType(const DataLayout &DL, Type *Ty) {
5093   if (Ty->isPointerTy())
5094     return DL.getIntPtrType(Ty);
5095 
5096   // It is possible that char's or short's overflow when we ask for the loop's
5097   // trip count, work around this by changing the type size.
5098   if (Ty->getScalarSizeInBits() < 32)
5099     return Type::getInt32Ty(Ty->getContext());
5100 
5101   return Ty;
5102 }
5103 
5104 static Type *getWiderType(const DataLayout &DL, Type *Ty0, Type *Ty1) {
5105   Ty0 = convertPointerToIntegerType(DL, Ty0);
5106   Ty1 = convertPointerToIntegerType(DL, Ty1);
5107   if (Ty0->getScalarSizeInBits() > Ty1->getScalarSizeInBits())
5108     return Ty0;
5109   return Ty1;
5110 }
5111 
5112 /// \brief Check that the instruction has outside loop users and is not an
5113 /// identified reduction variable.
5114 static bool hasOutsideLoopUser(const Loop *TheLoop, Instruction *Inst,
5115                                SmallPtrSetImpl<Value *> &AllowedExit) {
5116   // Reduction and Induction instructions are allowed to have exit users. All
5117   // other instructions must not have external users.
5118   if (!AllowedExit.count(Inst))
5119     // Check that all of the users of the loop are inside the BB.
5120     for (User *U : Inst->users()) {
5121       Instruction *UI = cast<Instruction>(U);
5122       // This user may be a reduction exit value.
5123       if (!TheLoop->contains(UI)) {
5124         DEBUG(dbgs() << "LV: Found an outside user for : " << *UI << '\n');
5125         return true;
5126       }
5127     }
5128   return false;
5129 }
5130 
5131 void LoopVectorizationLegality::addInductionPhi(
5132     PHINode *Phi, const InductionDescriptor &ID,
5133     SmallPtrSetImpl<Value *> &AllowedExit) {
5134   Inductions[Phi] = ID;
5135   Type *PhiTy = Phi->getType();
5136   const DataLayout &DL = Phi->getModule()->getDataLayout();
5137 
5138   // Get the widest type.
5139   if (!PhiTy->isFloatingPointTy()) {
5140     if (!WidestIndTy)
5141       WidestIndTy = convertPointerToIntegerType(DL, PhiTy);
5142     else
5143       WidestIndTy = getWiderType(DL, PhiTy, WidestIndTy);
5144   }
5145 
5146   // Int inductions are special because we only allow one IV.
5147   if (ID.getKind() == InductionDescriptor::IK_IntInduction &&
5148       ID.getConstIntStepValue() &&
5149       ID.getConstIntStepValue()->isOne() &&
5150       isa<Constant>(ID.getStartValue()) &&
5151       cast<Constant>(ID.getStartValue())->isNullValue()) {
5152 
5153     // Use the phi node with the widest type as induction. Use the last
5154     // one if there are multiple (no good reason for doing this other
5155     // than it is expedient). We've checked that it begins at zero and
5156     // steps by one, so this is a canonical induction variable.
5157     if (!PrimaryInduction || PhiTy == WidestIndTy)
5158       PrimaryInduction = Phi;
5159   }
5160 
5161   // Both the PHI node itself, and the "post-increment" value feeding
5162   // back into the PHI node may have external users.
5163   // We can allow those uses, except if the SCEVs we have for them rely
5164   // on predicates that only hold within the loop, since allowing the exit
5165   // currently means re-using this SCEV outside the loop.
5166   if (PSE.getUnionPredicate().isAlwaysTrue()) {
5167     AllowedExit.insert(Phi);
5168     AllowedExit.insert(Phi->getIncomingValueForBlock(TheLoop->getLoopLatch()));
5169   }
5170 
5171   DEBUG(dbgs() << "LV: Found an induction variable.\n");
5172   return;
5173 }
5174 
5175 bool LoopVectorizationLegality::canVectorizeInstrs() {
5176   BasicBlock *Header = TheLoop->getHeader();
5177 
5178   // Look for the attribute signaling the absence of NaNs.
5179   Function &F = *Header->getParent();
5180   HasFunNoNaNAttr =
5181       F.getFnAttribute("no-nans-fp-math").getValueAsString() == "true";
5182 
5183   // For each block in the loop.
5184   for (BasicBlock *BB : TheLoop->blocks()) {
5185     // Scan the instructions in the block and look for hazards.
5186     for (Instruction &I : *BB) {
5187       if (auto *Phi = dyn_cast<PHINode>(&I)) {
5188         Type *PhiTy = Phi->getType();
5189         // Check that this PHI type is allowed.
5190         if (!PhiTy->isIntegerTy() && !PhiTy->isFloatingPointTy() &&
5191             !PhiTy->isPointerTy()) {
5192           ORE->emit(createMissedAnalysis("CFGNotUnderstood", Phi)
5193                     << "loop control flow is not understood by vectorizer");
5194           DEBUG(dbgs() << "LV: Found an non-int non-pointer PHI.\n");
5195           return false;
5196         }
5197 
5198         // If this PHINode is not in the header block, then we know that we
5199         // can convert it to select during if-conversion. No need to check if
5200         // the PHIs in this block are induction or reduction variables.
5201         if (BB != Header) {
5202           // Check that this instruction has no outside users or is an
5203           // identified reduction value with an outside user.
5204           if (!hasOutsideLoopUser(TheLoop, Phi, AllowedExit))
5205             continue;
5206           ORE->emit(createMissedAnalysis("NeitherInductionNorReduction", Phi)
5207                     << "value could not be identified as "
5208                        "an induction or reduction variable");
5209           return false;
5210         }
5211 
5212         // We only allow if-converted PHIs with exactly two incoming values.
5213         if (Phi->getNumIncomingValues() != 2) {
5214           ORE->emit(createMissedAnalysis("CFGNotUnderstood", Phi)
5215                     << "control flow not understood by vectorizer");
5216           DEBUG(dbgs() << "LV: Found an invalid PHI.\n");
5217           return false;
5218         }
5219 
5220         RecurrenceDescriptor RedDes;
5221         if (RecurrenceDescriptor::isReductionPHI(Phi, TheLoop, RedDes)) {
5222           if (RedDes.hasUnsafeAlgebra())
5223             Requirements->addUnsafeAlgebraInst(RedDes.getUnsafeAlgebraInst());
5224           AllowedExit.insert(RedDes.getLoopExitInstr());
5225           Reductions[Phi] = RedDes;
5226           continue;
5227         }
5228 
5229         InductionDescriptor ID;
5230         if (InductionDescriptor::isInductionPHI(Phi, TheLoop, PSE, ID)) {
5231           addInductionPhi(Phi, ID, AllowedExit);
5232           if (ID.hasUnsafeAlgebra() && !HasFunNoNaNAttr)
5233             Requirements->addUnsafeAlgebraInst(ID.getUnsafeAlgebraInst());
5234           continue;
5235         }
5236 
5237         if (RecurrenceDescriptor::isFirstOrderRecurrence(Phi, TheLoop,
5238                                                          SinkAfter, DT)) {
5239           FirstOrderRecurrences.insert(Phi);
5240           continue;
5241         }
5242 
5243         // As a last resort, coerce the PHI to a AddRec expression
5244         // and re-try classifying it a an induction PHI.
5245         if (InductionDescriptor::isInductionPHI(Phi, TheLoop, PSE, ID, true)) {
5246           addInductionPhi(Phi, ID, AllowedExit);
5247           continue;
5248         }
5249 
5250         ORE->emit(createMissedAnalysis("NonReductionValueUsedOutsideLoop", Phi)
5251                   << "value that could not be identified as "
5252                      "reduction is used outside the loop");
5253         DEBUG(dbgs() << "LV: Found an unidentified PHI." << *Phi << "\n");
5254         return false;
5255       } // end of PHI handling
5256 
5257       // We handle calls that:
5258       //   * Are debug info intrinsics.
5259       //   * Have a mapping to an IR intrinsic.
5260       //   * Have a vector version available.
5261       auto *CI = dyn_cast<CallInst>(&I);
5262       if (CI && !getVectorIntrinsicIDForCall(CI, TLI) &&
5263           !isa<DbgInfoIntrinsic>(CI) &&
5264           !(CI->getCalledFunction() && TLI &&
5265             TLI->isFunctionVectorizable(CI->getCalledFunction()->getName()))) {
5266         ORE->emit(createMissedAnalysis("CantVectorizeCall", CI)
5267                   << "call instruction cannot be vectorized");
5268         DEBUG(dbgs() << "LV: Found a non-intrinsic, non-libfunc callsite.\n");
5269         return false;
5270       }
5271 
5272       // Intrinsics such as powi,cttz and ctlz are legal to vectorize if the
5273       // second argument is the same (i.e. loop invariant)
5274       if (CI && hasVectorInstrinsicScalarOpd(
5275                     getVectorIntrinsicIDForCall(CI, TLI), 1)) {
5276         auto *SE = PSE.getSE();
5277         if (!SE->isLoopInvariant(PSE.getSCEV(CI->getOperand(1)), TheLoop)) {
5278           ORE->emit(createMissedAnalysis("CantVectorizeIntrinsic", CI)
5279                     << "intrinsic instruction cannot be vectorized");
5280           DEBUG(dbgs() << "LV: Found unvectorizable intrinsic " << *CI << "\n");
5281           return false;
5282         }
5283       }
5284 
5285       // Check that the instruction return type is vectorizable.
5286       // Also, we can't vectorize extractelement instructions.
5287       if ((!VectorType::isValidElementType(I.getType()) &&
5288            !I.getType()->isVoidTy()) ||
5289           isa<ExtractElementInst>(I)) {
5290         ORE->emit(createMissedAnalysis("CantVectorizeInstructionReturnType", &I)
5291                   << "instruction return type cannot be vectorized");
5292         DEBUG(dbgs() << "LV: Found unvectorizable type.\n");
5293         return false;
5294       }
5295 
5296       // Check that the stored type is vectorizable.
5297       if (auto *ST = dyn_cast<StoreInst>(&I)) {
5298         Type *T = ST->getValueOperand()->getType();
5299         if (!VectorType::isValidElementType(T)) {
5300           ORE->emit(createMissedAnalysis("CantVectorizeStore", ST)
5301                     << "store instruction cannot be vectorized");
5302           return false;
5303         }
5304 
5305         // FP instructions can allow unsafe algebra, thus vectorizable by
5306         // non-IEEE-754 compliant SIMD units.
5307         // This applies to floating-point math operations and calls, not memory
5308         // operations, shuffles, or casts, as they don't change precision or
5309         // semantics.
5310       } else if (I.getType()->isFloatingPointTy() && (CI || I.isBinaryOp()) &&
5311                  !I.hasUnsafeAlgebra()) {
5312         DEBUG(dbgs() << "LV: Found FP op with unsafe algebra.\n");
5313         Hints->setPotentiallyUnsafe();
5314       }
5315 
5316       // Reduction instructions are allowed to have exit users.
5317       // All other instructions must not have external users.
5318       if (hasOutsideLoopUser(TheLoop, &I, AllowedExit)) {
5319         ORE->emit(createMissedAnalysis("ValueUsedOutsideLoop", &I)
5320                   << "value cannot be used outside the loop");
5321         return false;
5322       }
5323 
5324     } // next instr.
5325   }
5326 
5327   if (!PrimaryInduction) {
5328     DEBUG(dbgs() << "LV: Did not find one integer induction var.\n");
5329     if (Inductions.empty()) {
5330       ORE->emit(createMissedAnalysis("NoInductionVariable")
5331                 << "loop induction variable could not be identified");
5332       return false;
5333     }
5334   }
5335 
5336   // Now we know the widest induction type, check if our found induction
5337   // is the same size. If it's not, unset it here and InnerLoopVectorizer
5338   // will create another.
5339   if (PrimaryInduction && WidestIndTy != PrimaryInduction->getType())
5340     PrimaryInduction = nullptr;
5341 
5342   return true;
5343 }
5344 
5345 void LoopVectorizationCostModel::collectLoopScalars(unsigned VF) {
5346 
5347   // We should not collect Scalars more than once per VF. Right now, this
5348   // function is called from collectUniformsAndScalars(), which already does
5349   // this check. Collecting Scalars for VF=1 does not make any sense.
5350   assert(VF >= 2 && !Scalars.count(VF) &&
5351          "This function should not be visited twice for the same VF");
5352 
5353   SmallSetVector<Instruction *, 8> Worklist;
5354 
5355   // These sets are used to seed the analysis with pointers used by memory
5356   // accesses that will remain scalar.
5357   SmallSetVector<Instruction *, 8> ScalarPtrs;
5358   SmallPtrSet<Instruction *, 8> PossibleNonScalarPtrs;
5359 
5360   // A helper that returns true if the use of Ptr by MemAccess will be scalar.
5361   // The pointer operands of loads and stores will be scalar as long as the
5362   // memory access is not a gather or scatter operation. The value operand of a
5363   // store will remain scalar if the store is scalarized.
5364   auto isScalarUse = [&](Instruction *MemAccess, Value *Ptr) {
5365     InstWidening WideningDecision = getWideningDecision(MemAccess, VF);
5366     assert(WideningDecision != CM_Unknown &&
5367            "Widening decision should be ready at this moment");
5368     if (auto *Store = dyn_cast<StoreInst>(MemAccess))
5369       if (Ptr == Store->getValueOperand())
5370         return WideningDecision == CM_Scalarize;
5371     assert(Ptr == getPointerOperand(MemAccess) &&
5372            "Ptr is neither a value or pointer operand");
5373     return WideningDecision != CM_GatherScatter;
5374   };
5375 
5376   // A helper that returns true if the given value is a bitcast or
5377   // getelementptr instruction contained in the loop.
5378   auto isLoopVaryingBitCastOrGEP = [&](Value *V) {
5379     return ((isa<BitCastInst>(V) && V->getType()->isPointerTy()) ||
5380             isa<GetElementPtrInst>(V)) &&
5381            !TheLoop->isLoopInvariant(V);
5382   };
5383 
5384   // A helper that evaluates a memory access's use of a pointer. If the use
5385   // will be a scalar use, and the pointer is only used by memory accesses, we
5386   // place the pointer in ScalarPtrs. Otherwise, the pointer is placed in
5387   // PossibleNonScalarPtrs.
5388   auto evaluatePtrUse = [&](Instruction *MemAccess, Value *Ptr) {
5389 
5390     // We only care about bitcast and getelementptr instructions contained in
5391     // the loop.
5392     if (!isLoopVaryingBitCastOrGEP(Ptr))
5393       return;
5394 
5395     // If the pointer has already been identified as scalar (e.g., if it was
5396     // also identified as uniform), there's nothing to do.
5397     auto *I = cast<Instruction>(Ptr);
5398     if (Worklist.count(I))
5399       return;
5400 
5401     // If the use of the pointer will be a scalar use, and all users of the
5402     // pointer are memory accesses, place the pointer in ScalarPtrs. Otherwise,
5403     // place the pointer in PossibleNonScalarPtrs.
5404     if (isScalarUse(MemAccess, Ptr) && all_of(I->users(), [&](User *U) {
5405           return isa<LoadInst>(U) || isa<StoreInst>(U);
5406         }))
5407       ScalarPtrs.insert(I);
5408     else
5409       PossibleNonScalarPtrs.insert(I);
5410   };
5411 
5412   // We seed the scalars analysis with three classes of instructions: (1)
5413   // instructions marked uniform-after-vectorization, (2) bitcast and
5414   // getelementptr instructions used by memory accesses requiring a scalar use,
5415   // and (3) pointer induction variables and their update instructions (we
5416   // currently only scalarize these).
5417   //
5418   // (1) Add to the worklist all instructions that have been identified as
5419   // uniform-after-vectorization.
5420   Worklist.insert(Uniforms[VF].begin(), Uniforms[VF].end());
5421 
5422   // (2) Add to the worklist all bitcast and getelementptr instructions used by
5423   // memory accesses requiring a scalar use. The pointer operands of loads and
5424   // stores will be scalar as long as the memory accesses is not a gather or
5425   // scatter operation. The value operand of a store will remain scalar if the
5426   // store is scalarized.
5427   for (auto *BB : TheLoop->blocks())
5428     for (auto &I : *BB) {
5429       if (auto *Load = dyn_cast<LoadInst>(&I)) {
5430         evaluatePtrUse(Load, Load->getPointerOperand());
5431       } else if (auto *Store = dyn_cast<StoreInst>(&I)) {
5432         evaluatePtrUse(Store, Store->getPointerOperand());
5433         evaluatePtrUse(Store, Store->getValueOperand());
5434       }
5435     }
5436   for (auto *I : ScalarPtrs)
5437     if (!PossibleNonScalarPtrs.count(I)) {
5438       DEBUG(dbgs() << "LV: Found scalar instruction: " << *I << "\n");
5439       Worklist.insert(I);
5440     }
5441 
5442   // (3) Add to the worklist all pointer induction variables and their update
5443   // instructions.
5444   //
5445   // TODO: Once we are able to vectorize pointer induction variables we should
5446   //       no longer insert them into the worklist here.
5447   auto *Latch = TheLoop->getLoopLatch();
5448   for (auto &Induction : *Legal->getInductionVars()) {
5449     auto *Ind = Induction.first;
5450     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
5451     if (Induction.second.getKind() != InductionDescriptor::IK_PtrInduction)
5452       continue;
5453     Worklist.insert(Ind);
5454     Worklist.insert(IndUpdate);
5455     DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n");
5456     DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate << "\n");
5457   }
5458 
5459   // Insert the forced scalars.
5460   // FIXME: Currently widenPHIInstruction() often creates a dead vector
5461   // induction variable when the PHI user is scalarized.
5462   if (ForcedScalars.count(VF))
5463     for (auto *I : ForcedScalars.find(VF)->second)
5464       Worklist.insert(I);
5465 
5466   // Expand the worklist by looking through any bitcasts and getelementptr
5467   // instructions we've already identified as scalar. This is similar to the
5468   // expansion step in collectLoopUniforms(); however, here we're only
5469   // expanding to include additional bitcasts and getelementptr instructions.
5470   unsigned Idx = 0;
5471   while (Idx != Worklist.size()) {
5472     Instruction *Dst = Worklist[Idx++];
5473     if (!isLoopVaryingBitCastOrGEP(Dst->getOperand(0)))
5474       continue;
5475     auto *Src = cast<Instruction>(Dst->getOperand(0));
5476     if (all_of(Src->users(), [&](User *U) -> bool {
5477           auto *J = cast<Instruction>(U);
5478           return !TheLoop->contains(J) || Worklist.count(J) ||
5479                  ((isa<LoadInst>(J) || isa<StoreInst>(J)) &&
5480                   isScalarUse(J, Src));
5481         })) {
5482       Worklist.insert(Src);
5483       DEBUG(dbgs() << "LV: Found scalar instruction: " << *Src << "\n");
5484     }
5485   }
5486 
5487   // An induction variable will remain scalar if all users of the induction
5488   // variable and induction variable update remain scalar.
5489   for (auto &Induction : *Legal->getInductionVars()) {
5490     auto *Ind = Induction.first;
5491     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
5492 
5493     // We already considered pointer induction variables, so there's no reason
5494     // to look at their users again.
5495     //
5496     // TODO: Once we are able to vectorize pointer induction variables we
5497     //       should no longer skip over them here.
5498     if (Induction.second.getKind() == InductionDescriptor::IK_PtrInduction)
5499       continue;
5500 
5501     // Determine if all users of the induction variable are scalar after
5502     // vectorization.
5503     auto ScalarInd = all_of(Ind->users(), [&](User *U) -> bool {
5504       auto *I = cast<Instruction>(U);
5505       return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I);
5506     });
5507     if (!ScalarInd)
5508       continue;
5509 
5510     // Determine if all users of the induction variable update instruction are
5511     // scalar after vectorization.
5512     auto ScalarIndUpdate = all_of(IndUpdate->users(), [&](User *U) -> bool {
5513       auto *I = cast<Instruction>(U);
5514       return I == Ind || !TheLoop->contains(I) || Worklist.count(I);
5515     });
5516     if (!ScalarIndUpdate)
5517       continue;
5518 
5519     // The induction variable and its update instruction will remain scalar.
5520     Worklist.insert(Ind);
5521     Worklist.insert(IndUpdate);
5522     DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n");
5523     DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate << "\n");
5524   }
5525 
5526   Scalars[VF].insert(Worklist.begin(), Worklist.end());
5527 }
5528 
5529 bool LoopVectorizationLegality::isScalarWithPredication(Instruction *I) {
5530   if (!blockNeedsPredication(I->getParent()))
5531     return false;
5532   switch(I->getOpcode()) {
5533   default:
5534     break;
5535   case Instruction::Store:
5536     return !isMaskRequired(I);
5537   case Instruction::UDiv:
5538   case Instruction::SDiv:
5539   case Instruction::SRem:
5540   case Instruction::URem:
5541     return mayDivideByZero(*I);
5542   }
5543   return false;
5544 }
5545 
5546 bool LoopVectorizationLegality::memoryInstructionCanBeWidened(Instruction *I,
5547                                                               unsigned VF) {
5548   // Get and ensure we have a valid memory instruction.
5549   LoadInst *LI = dyn_cast<LoadInst>(I);
5550   StoreInst *SI = dyn_cast<StoreInst>(I);
5551   assert((LI || SI) && "Invalid memory instruction");
5552 
5553   auto *Ptr = getPointerOperand(I);
5554 
5555   // In order to be widened, the pointer should be consecutive, first of all.
5556   if (!isConsecutivePtr(Ptr))
5557     return false;
5558 
5559   // If the instruction is a store located in a predicated block, it will be
5560   // scalarized.
5561   if (isScalarWithPredication(I))
5562     return false;
5563 
5564   // If the instruction's allocated size doesn't equal it's type size, it
5565   // requires padding and will be scalarized.
5566   auto &DL = I->getModule()->getDataLayout();
5567   auto *ScalarTy = LI ? LI->getType() : SI->getValueOperand()->getType();
5568   if (hasIrregularType(ScalarTy, DL, VF))
5569     return false;
5570 
5571   return true;
5572 }
5573 
5574 void LoopVectorizationCostModel::collectLoopUniforms(unsigned VF) {
5575 
5576   // We should not collect Uniforms more than once per VF. Right now,
5577   // this function is called from collectUniformsAndScalars(), which
5578   // already does this check. Collecting Uniforms for VF=1 does not make any
5579   // sense.
5580 
5581   assert(VF >= 2 && !Uniforms.count(VF) &&
5582          "This function should not be visited twice for the same VF");
5583 
5584   // Visit the list of Uniforms. If we'll not find any uniform value, we'll
5585   // not analyze again.  Uniforms.count(VF) will return 1.
5586   Uniforms[VF].clear();
5587 
5588   // We now know that the loop is vectorizable!
5589   // Collect instructions inside the loop that will remain uniform after
5590   // vectorization.
5591 
5592   // Global values, params and instructions outside of current loop are out of
5593   // scope.
5594   auto isOutOfScope = [&](Value *V) -> bool {
5595     Instruction *I = dyn_cast<Instruction>(V);
5596     return (!I || !TheLoop->contains(I));
5597   };
5598 
5599   SetVector<Instruction *> Worklist;
5600   BasicBlock *Latch = TheLoop->getLoopLatch();
5601 
5602   // Start with the conditional branch. If the branch condition is an
5603   // instruction contained in the loop that is only used by the branch, it is
5604   // uniform.
5605   auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0));
5606   if (Cmp && TheLoop->contains(Cmp) && Cmp->hasOneUse()) {
5607     Worklist.insert(Cmp);
5608     DEBUG(dbgs() << "LV: Found uniform instruction: " << *Cmp << "\n");
5609   }
5610 
5611   // Holds consecutive and consecutive-like pointers. Consecutive-like pointers
5612   // are pointers that are treated like consecutive pointers during
5613   // vectorization. The pointer operands of interleaved accesses are an
5614   // example.
5615   SmallSetVector<Instruction *, 8> ConsecutiveLikePtrs;
5616 
5617   // Holds pointer operands of instructions that are possibly non-uniform.
5618   SmallPtrSet<Instruction *, 8> PossibleNonUniformPtrs;
5619 
5620   auto isUniformDecision = [&](Instruction *I, unsigned VF) {
5621     InstWidening WideningDecision = getWideningDecision(I, VF);
5622     assert(WideningDecision != CM_Unknown &&
5623            "Widening decision should be ready at this moment");
5624 
5625     return (WideningDecision == CM_Widen ||
5626             WideningDecision == CM_Interleave);
5627   };
5628   // Iterate over the instructions in the loop, and collect all
5629   // consecutive-like pointer operands in ConsecutiveLikePtrs. If it's possible
5630   // that a consecutive-like pointer operand will be scalarized, we collect it
5631   // in PossibleNonUniformPtrs instead. We use two sets here because a single
5632   // getelementptr instruction can be used by both vectorized and scalarized
5633   // memory instructions. For example, if a loop loads and stores from the same
5634   // location, but the store is conditional, the store will be scalarized, and
5635   // the getelementptr won't remain uniform.
5636   for (auto *BB : TheLoop->blocks())
5637     for (auto &I : *BB) {
5638 
5639       // If there's no pointer operand, there's nothing to do.
5640       auto *Ptr = dyn_cast_or_null<Instruction>(getPointerOperand(&I));
5641       if (!Ptr)
5642         continue;
5643 
5644       // True if all users of Ptr are memory accesses that have Ptr as their
5645       // pointer operand.
5646       auto UsersAreMemAccesses = all_of(Ptr->users(), [&](User *U) -> bool {
5647         return getPointerOperand(U) == Ptr;
5648       });
5649 
5650       // Ensure the memory instruction will not be scalarized or used by
5651       // gather/scatter, making its pointer operand non-uniform. If the pointer
5652       // operand is used by any instruction other than a memory access, we
5653       // conservatively assume the pointer operand may be non-uniform.
5654       if (!UsersAreMemAccesses || !isUniformDecision(&I, VF))
5655         PossibleNonUniformPtrs.insert(Ptr);
5656 
5657       // If the memory instruction will be vectorized and its pointer operand
5658       // is consecutive-like, or interleaving - the pointer operand should
5659       // remain uniform.
5660       else
5661         ConsecutiveLikePtrs.insert(Ptr);
5662     }
5663 
5664   // Add to the Worklist all consecutive and consecutive-like pointers that
5665   // aren't also identified as possibly non-uniform.
5666   for (auto *V : ConsecutiveLikePtrs)
5667     if (!PossibleNonUniformPtrs.count(V)) {
5668       DEBUG(dbgs() << "LV: Found uniform instruction: " << *V << "\n");
5669       Worklist.insert(V);
5670     }
5671 
5672   // Expand Worklist in topological order: whenever a new instruction
5673   // is added , its users should be either already inside Worklist, or
5674   // out of scope. It ensures a uniform instruction will only be used
5675   // by uniform instructions or out of scope instructions.
5676   unsigned idx = 0;
5677   while (idx != Worklist.size()) {
5678     Instruction *I = Worklist[idx++];
5679 
5680     for (auto OV : I->operand_values()) {
5681       if (isOutOfScope(OV))
5682         continue;
5683       auto *OI = cast<Instruction>(OV);
5684       if (all_of(OI->users(), [&](User *U) -> bool {
5685             auto *J = cast<Instruction>(U);
5686             return !TheLoop->contains(J) || Worklist.count(J) ||
5687                    (OI == getPointerOperand(J) && isUniformDecision(J, VF));
5688           })) {
5689         Worklist.insert(OI);
5690         DEBUG(dbgs() << "LV: Found uniform instruction: " << *OI << "\n");
5691       }
5692     }
5693   }
5694 
5695   // Returns true if Ptr is the pointer operand of a memory access instruction
5696   // I, and I is known to not require scalarization.
5697   auto isVectorizedMemAccessUse = [&](Instruction *I, Value *Ptr) -> bool {
5698     return getPointerOperand(I) == Ptr && isUniformDecision(I, VF);
5699   };
5700 
5701   // For an instruction to be added into Worklist above, all its users inside
5702   // the loop should also be in Worklist. However, this condition cannot be
5703   // true for phi nodes that form a cyclic dependence. We must process phi
5704   // nodes separately. An induction variable will remain uniform if all users
5705   // of the induction variable and induction variable update remain uniform.
5706   // The code below handles both pointer and non-pointer induction variables.
5707   for (auto &Induction : *Legal->getInductionVars()) {
5708     auto *Ind = Induction.first;
5709     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
5710 
5711     // Determine if all users of the induction variable are uniform after
5712     // vectorization.
5713     auto UniformInd = all_of(Ind->users(), [&](User *U) -> bool {
5714       auto *I = cast<Instruction>(U);
5715       return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) ||
5716              isVectorizedMemAccessUse(I, Ind);
5717     });
5718     if (!UniformInd)
5719       continue;
5720 
5721     // Determine if all users of the induction variable update instruction are
5722     // uniform after vectorization.
5723     auto UniformIndUpdate = all_of(IndUpdate->users(), [&](User *U) -> bool {
5724       auto *I = cast<Instruction>(U);
5725       return I == Ind || !TheLoop->contains(I) || Worklist.count(I) ||
5726              isVectorizedMemAccessUse(I, IndUpdate);
5727     });
5728     if (!UniformIndUpdate)
5729       continue;
5730 
5731     // The induction variable and its update instruction will remain uniform.
5732     Worklist.insert(Ind);
5733     Worklist.insert(IndUpdate);
5734     DEBUG(dbgs() << "LV: Found uniform instruction: " << *Ind << "\n");
5735     DEBUG(dbgs() << "LV: Found uniform instruction: " << *IndUpdate << "\n");
5736   }
5737 
5738   Uniforms[VF].insert(Worklist.begin(), Worklist.end());
5739 }
5740 
5741 bool LoopVectorizationLegality::canVectorizeMemory() {
5742   LAI = &(*GetLAA)(*TheLoop);
5743   InterleaveInfo.setLAI(LAI);
5744   const OptimizationRemarkAnalysis *LAR = LAI->getReport();
5745   if (LAR) {
5746     OptimizationRemarkAnalysis VR(Hints->vectorizeAnalysisPassName(),
5747                                   "loop not vectorized: ", *LAR);
5748     ORE->emit(VR);
5749   }
5750   if (!LAI->canVectorizeMemory())
5751     return false;
5752 
5753   if (LAI->hasStoreToLoopInvariantAddress()) {
5754     ORE->emit(createMissedAnalysis("CantVectorizeStoreToLoopInvariantAddress")
5755               << "write to a loop invariant address could not be vectorized");
5756     DEBUG(dbgs() << "LV: We don't allow storing to uniform addresses\n");
5757     return false;
5758   }
5759 
5760   Requirements->addRuntimePointerChecks(LAI->getNumRuntimePointerChecks());
5761   PSE.addPredicate(LAI->getPSE().getUnionPredicate());
5762 
5763   return true;
5764 }
5765 
5766 bool LoopVectorizationLegality::isInductionVariable(const Value *V) {
5767   Value *In0 = const_cast<Value *>(V);
5768   PHINode *PN = dyn_cast_or_null<PHINode>(In0);
5769   if (!PN)
5770     return false;
5771 
5772   return Inductions.count(PN);
5773 }
5774 
5775 bool LoopVectorizationLegality::isFirstOrderRecurrence(const PHINode *Phi) {
5776   return FirstOrderRecurrences.count(Phi);
5777 }
5778 
5779 bool LoopVectorizationLegality::blockNeedsPredication(BasicBlock *BB) {
5780   return LoopAccessInfo::blockNeedsPredication(BB, TheLoop, DT);
5781 }
5782 
5783 bool LoopVectorizationLegality::blockCanBePredicated(
5784     BasicBlock *BB, SmallPtrSetImpl<Value *> &SafePtrs) {
5785   const bool IsAnnotatedParallel = TheLoop->isAnnotatedParallel();
5786 
5787   for (Instruction &I : *BB) {
5788     // Check that we don't have a constant expression that can trap as operand.
5789     for (Value *Operand : I.operands()) {
5790       if (auto *C = dyn_cast<Constant>(Operand))
5791         if (C->canTrap())
5792           return false;
5793     }
5794     // We might be able to hoist the load.
5795     if (I.mayReadFromMemory()) {
5796       auto *LI = dyn_cast<LoadInst>(&I);
5797       if (!LI)
5798         return false;
5799       if (!SafePtrs.count(LI->getPointerOperand())) {
5800         if (isLegalMaskedLoad(LI->getType(), LI->getPointerOperand()) ||
5801             isLegalMaskedGather(LI->getType())) {
5802           MaskedOp.insert(LI);
5803           continue;
5804         }
5805         // !llvm.mem.parallel_loop_access implies if-conversion safety.
5806         if (IsAnnotatedParallel)
5807           continue;
5808         return false;
5809       }
5810     }
5811 
5812     if (I.mayWriteToMemory()) {
5813       auto *SI = dyn_cast<StoreInst>(&I);
5814       // We only support predication of stores in basic blocks with one
5815       // predecessor.
5816       if (!SI)
5817         return false;
5818 
5819       // Build a masked store if it is legal for the target.
5820       if (isLegalMaskedStore(SI->getValueOperand()->getType(),
5821                              SI->getPointerOperand()) ||
5822           isLegalMaskedScatter(SI->getValueOperand()->getType())) {
5823         MaskedOp.insert(SI);
5824         continue;
5825       }
5826 
5827       bool isSafePtr = (SafePtrs.count(SI->getPointerOperand()) != 0);
5828       bool isSinglePredecessor = SI->getParent()->getSinglePredecessor();
5829 
5830       if (++NumPredStores > NumberOfStoresToPredicate || !isSafePtr ||
5831           !isSinglePredecessor)
5832         return false;
5833     }
5834     if (I.mayThrow())
5835       return false;
5836   }
5837 
5838   return true;
5839 }
5840 
5841 void InterleavedAccessInfo::collectConstStrideAccesses(
5842     MapVector<Instruction *, StrideDescriptor> &AccessStrideInfo,
5843     const ValueToValueMap &Strides) {
5844 
5845   auto &DL = TheLoop->getHeader()->getModule()->getDataLayout();
5846 
5847   // Since it's desired that the load/store instructions be maintained in
5848   // "program order" for the interleaved access analysis, we have to visit the
5849   // blocks in the loop in reverse postorder (i.e., in a topological order).
5850   // Such an ordering will ensure that any load/store that may be executed
5851   // before a second load/store will precede the second load/store in
5852   // AccessStrideInfo.
5853   LoopBlocksDFS DFS(TheLoop);
5854   DFS.perform(LI);
5855   for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO()))
5856     for (auto &I : *BB) {
5857       auto *LI = dyn_cast<LoadInst>(&I);
5858       auto *SI = dyn_cast<StoreInst>(&I);
5859       if (!LI && !SI)
5860         continue;
5861 
5862       Value *Ptr = getPointerOperand(&I);
5863       // We don't check wrapping here because we don't know yet if Ptr will be
5864       // part of a full group or a group with gaps. Checking wrapping for all
5865       // pointers (even those that end up in groups with no gaps) will be overly
5866       // conservative. For full groups, wrapping should be ok since if we would
5867       // wrap around the address space we would do a memory access at nullptr
5868       // even without the transformation. The wrapping checks are therefore
5869       // deferred until after we've formed the interleaved groups.
5870       int64_t Stride = getPtrStride(PSE, Ptr, TheLoop, Strides,
5871                                     /*Assume=*/true, /*ShouldCheckWrap=*/false);
5872 
5873       const SCEV *Scev = replaceSymbolicStrideSCEV(PSE, Strides, Ptr);
5874       PointerType *PtrTy = dyn_cast<PointerType>(Ptr->getType());
5875       uint64_t Size = DL.getTypeAllocSize(PtrTy->getElementType());
5876 
5877       // An alignment of 0 means target ABI alignment.
5878       unsigned Align = getMemInstAlignment(&I);
5879       if (!Align)
5880         Align = DL.getABITypeAlignment(PtrTy->getElementType());
5881 
5882       AccessStrideInfo[&I] = StrideDescriptor(Stride, Scev, Size, Align);
5883     }
5884 }
5885 
5886 // Analyze interleaved accesses and collect them into interleaved load and
5887 // store groups.
5888 //
5889 // When generating code for an interleaved load group, we effectively hoist all
5890 // loads in the group to the location of the first load in program order. When
5891 // generating code for an interleaved store group, we sink all stores to the
5892 // location of the last store. This code motion can change the order of load
5893 // and store instructions and may break dependences.
5894 //
5895 // The code generation strategy mentioned above ensures that we won't violate
5896 // any write-after-read (WAR) dependences.
5897 //
5898 // E.g., for the WAR dependence:  a = A[i];      // (1)
5899 //                                A[i] = b;      // (2)
5900 //
5901 // The store group of (2) is always inserted at or below (2), and the load
5902 // group of (1) is always inserted at or above (1). Thus, the instructions will
5903 // never be reordered. All other dependences are checked to ensure the
5904 // correctness of the instruction reordering.
5905 //
5906 // The algorithm visits all memory accesses in the loop in bottom-up program
5907 // order. Program order is established by traversing the blocks in the loop in
5908 // reverse postorder when collecting the accesses.
5909 //
5910 // We visit the memory accesses in bottom-up order because it can simplify the
5911 // construction of store groups in the presence of write-after-write (WAW)
5912 // dependences.
5913 //
5914 // E.g., for the WAW dependence:  A[i] = a;      // (1)
5915 //                                A[i] = b;      // (2)
5916 //                                A[i + 1] = c;  // (3)
5917 //
5918 // We will first create a store group with (3) and (2). (1) can't be added to
5919 // this group because it and (2) are dependent. However, (1) can be grouped
5920 // with other accesses that may precede it in program order. Note that a
5921 // bottom-up order does not imply that WAW dependences should not be checked.
5922 void InterleavedAccessInfo::analyzeInterleaving(
5923     const ValueToValueMap &Strides) {
5924   DEBUG(dbgs() << "LV: Analyzing interleaved accesses...\n");
5925 
5926   // Holds all accesses with a constant stride.
5927   MapVector<Instruction *, StrideDescriptor> AccessStrideInfo;
5928   collectConstStrideAccesses(AccessStrideInfo, Strides);
5929 
5930   if (AccessStrideInfo.empty())
5931     return;
5932 
5933   // Collect the dependences in the loop.
5934   collectDependences();
5935 
5936   // Holds all interleaved store groups temporarily.
5937   SmallSetVector<InterleaveGroup *, 4> StoreGroups;
5938   // Holds all interleaved load groups temporarily.
5939   SmallSetVector<InterleaveGroup *, 4> LoadGroups;
5940 
5941   // Search in bottom-up program order for pairs of accesses (A and B) that can
5942   // form interleaved load or store groups. In the algorithm below, access A
5943   // precedes access B in program order. We initialize a group for B in the
5944   // outer loop of the algorithm, and then in the inner loop, we attempt to
5945   // insert each A into B's group if:
5946   //
5947   //  1. A and B have the same stride,
5948   //  2. A and B have the same memory object size, and
5949   //  3. A belongs in B's group according to its distance from B.
5950   //
5951   // Special care is taken to ensure group formation will not break any
5952   // dependences.
5953   for (auto BI = AccessStrideInfo.rbegin(), E = AccessStrideInfo.rend();
5954        BI != E; ++BI) {
5955     Instruction *B = BI->first;
5956     StrideDescriptor DesB = BI->second;
5957 
5958     // Initialize a group for B if it has an allowable stride. Even if we don't
5959     // create a group for B, we continue with the bottom-up algorithm to ensure
5960     // we don't break any of B's dependences.
5961     InterleaveGroup *Group = nullptr;
5962     if (isStrided(DesB.Stride)) {
5963       Group = getInterleaveGroup(B);
5964       if (!Group) {
5965         DEBUG(dbgs() << "LV: Creating an interleave group with:" << *B << '\n');
5966         Group = createInterleaveGroup(B, DesB.Stride, DesB.Align);
5967       }
5968       if (B->mayWriteToMemory())
5969         StoreGroups.insert(Group);
5970       else
5971         LoadGroups.insert(Group);
5972     }
5973 
5974     for (auto AI = std::next(BI); AI != E; ++AI) {
5975       Instruction *A = AI->first;
5976       StrideDescriptor DesA = AI->second;
5977 
5978       // Our code motion strategy implies that we can't have dependences
5979       // between accesses in an interleaved group and other accesses located
5980       // between the first and last member of the group. Note that this also
5981       // means that a group can't have more than one member at a given offset.
5982       // The accesses in a group can have dependences with other accesses, but
5983       // we must ensure we don't extend the boundaries of the group such that
5984       // we encompass those dependent accesses.
5985       //
5986       // For example, assume we have the sequence of accesses shown below in a
5987       // stride-2 loop:
5988       //
5989       //  (1, 2) is a group | A[i]   = a;  // (1)
5990       //                    | A[i-1] = b;  // (2) |
5991       //                      A[i-3] = c;  // (3)
5992       //                      A[i]   = d;  // (4) | (2, 4) is not a group
5993       //
5994       // Because accesses (2) and (3) are dependent, we can group (2) with (1)
5995       // but not with (4). If we did, the dependent access (3) would be within
5996       // the boundaries of the (2, 4) group.
5997       if (!canReorderMemAccessesForInterleavedGroups(&*AI, &*BI)) {
5998 
5999         // If a dependence exists and A is already in a group, we know that A
6000         // must be a store since A precedes B and WAR dependences are allowed.
6001         // Thus, A would be sunk below B. We release A's group to prevent this
6002         // illegal code motion. A will then be free to form another group with
6003         // instructions that precede it.
6004         if (isInterleaved(A)) {
6005           InterleaveGroup *StoreGroup = getInterleaveGroup(A);
6006           StoreGroups.remove(StoreGroup);
6007           releaseGroup(StoreGroup);
6008         }
6009 
6010         // If a dependence exists and A is not already in a group (or it was
6011         // and we just released it), B might be hoisted above A (if B is a
6012         // load) or another store might be sunk below A (if B is a store). In
6013         // either case, we can't add additional instructions to B's group. B
6014         // will only form a group with instructions that it precedes.
6015         break;
6016       }
6017 
6018       // At this point, we've checked for illegal code motion. If either A or B
6019       // isn't strided, there's nothing left to do.
6020       if (!isStrided(DesA.Stride) || !isStrided(DesB.Stride))
6021         continue;
6022 
6023       // Ignore A if it's already in a group or isn't the same kind of memory
6024       // operation as B.
6025       if (isInterleaved(A) || A->mayReadFromMemory() != B->mayReadFromMemory())
6026         continue;
6027 
6028       // Check rules 1 and 2. Ignore A if its stride or size is different from
6029       // that of B.
6030       if (DesA.Stride != DesB.Stride || DesA.Size != DesB.Size)
6031         continue;
6032 
6033       // Ignore A if the memory object of A and B don't belong to the same
6034       // address space
6035       if (getMemInstAddressSpace(A) != getMemInstAddressSpace(B))
6036         continue;
6037 
6038       // Calculate the distance from A to B.
6039       const SCEVConstant *DistToB = dyn_cast<SCEVConstant>(
6040           PSE.getSE()->getMinusSCEV(DesA.Scev, DesB.Scev));
6041       if (!DistToB)
6042         continue;
6043       int64_t DistanceToB = DistToB->getAPInt().getSExtValue();
6044 
6045       // Check rule 3. Ignore A if its distance to B is not a multiple of the
6046       // size.
6047       if (DistanceToB % static_cast<int64_t>(DesB.Size))
6048         continue;
6049 
6050       // Ignore A if either A or B is in a predicated block. Although we
6051       // currently prevent group formation for predicated accesses, we may be
6052       // able to relax this limitation in the future once we handle more
6053       // complicated blocks.
6054       if (isPredicated(A->getParent()) || isPredicated(B->getParent()))
6055         continue;
6056 
6057       // The index of A is the index of B plus A's distance to B in multiples
6058       // of the size.
6059       int IndexA =
6060           Group->getIndex(B) + DistanceToB / static_cast<int64_t>(DesB.Size);
6061 
6062       // Try to insert A into B's group.
6063       if (Group->insertMember(A, IndexA, DesA.Align)) {
6064         DEBUG(dbgs() << "LV: Inserted:" << *A << '\n'
6065                      << "    into the interleave group with" << *B << '\n');
6066         InterleaveGroupMap[A] = Group;
6067 
6068         // Set the first load in program order as the insert position.
6069         if (A->mayReadFromMemory())
6070           Group->setInsertPos(A);
6071       }
6072     } // Iteration over A accesses.
6073   } // Iteration over B accesses.
6074 
6075   // Remove interleaved store groups with gaps.
6076   for (InterleaveGroup *Group : StoreGroups)
6077     if (Group->getNumMembers() != Group->getFactor()) {
6078       DEBUG(dbgs() << "LV: Invalidate candidate interleaved store group due "
6079                       "to gaps.\n");
6080       releaseGroup(Group);
6081     }
6082   // Remove interleaved groups with gaps (currently only loads) whose memory
6083   // accesses may wrap around. We have to revisit the getPtrStride analysis,
6084   // this time with ShouldCheckWrap=true, since collectConstStrideAccesses does
6085   // not check wrapping (see documentation there).
6086   // FORNOW we use Assume=false;
6087   // TODO: Change to Assume=true but making sure we don't exceed the threshold
6088   // of runtime SCEV assumptions checks (thereby potentially failing to
6089   // vectorize altogether).
6090   // Additional optional optimizations:
6091   // TODO: If we are peeling the loop and we know that the first pointer doesn't
6092   // wrap then we can deduce that all pointers in the group don't wrap.
6093   // This means that we can forcefully peel the loop in order to only have to
6094   // check the first pointer for no-wrap. When we'll change to use Assume=true
6095   // we'll only need at most one runtime check per interleaved group.
6096   //
6097   for (InterleaveGroup *Group : LoadGroups) {
6098 
6099     // Case 1: A full group. Can Skip the checks; For full groups, if the wide
6100     // load would wrap around the address space we would do a memory access at
6101     // nullptr even without the transformation.
6102     if (Group->getNumMembers() == Group->getFactor())
6103       continue;
6104 
6105     // Case 2: If first and last members of the group don't wrap this implies
6106     // that all the pointers in the group don't wrap.
6107     // So we check only group member 0 (which is always guaranteed to exist),
6108     // and group member Factor - 1; If the latter doesn't exist we rely on
6109     // peeling (if it is a non-reveresed accsess -- see Case 3).
6110     Value *FirstMemberPtr = getPointerOperand(Group->getMember(0));
6111     if (!getPtrStride(PSE, FirstMemberPtr, TheLoop, Strides, /*Assume=*/false,
6112                       /*ShouldCheckWrap=*/true)) {
6113       DEBUG(dbgs() << "LV: Invalidate candidate interleaved group due to "
6114                       "first group member potentially pointer-wrapping.\n");
6115       releaseGroup(Group);
6116       continue;
6117     }
6118     Instruction *LastMember = Group->getMember(Group->getFactor() - 1);
6119     if (LastMember) {
6120       Value *LastMemberPtr = getPointerOperand(LastMember);
6121       if (!getPtrStride(PSE, LastMemberPtr, TheLoop, Strides, /*Assume=*/false,
6122                         /*ShouldCheckWrap=*/true)) {
6123         DEBUG(dbgs() << "LV: Invalidate candidate interleaved group due to "
6124                         "last group member potentially pointer-wrapping.\n");
6125         releaseGroup(Group);
6126       }
6127     } else {
6128       // Case 3: A non-reversed interleaved load group with gaps: We need
6129       // to execute at least one scalar epilogue iteration. This will ensure
6130       // we don't speculatively access memory out-of-bounds. We only need
6131       // to look for a member at index factor - 1, since every group must have
6132       // a member at index zero.
6133       if (Group->isReverse()) {
6134         DEBUG(dbgs() << "LV: Invalidate candidate interleaved group due to "
6135                         "a reverse access with gaps.\n");
6136         releaseGroup(Group);
6137         continue;
6138       }
6139       DEBUG(dbgs() << "LV: Interleaved group requires epilogue iteration.\n");
6140       RequiresScalarEpilogue = true;
6141     }
6142   }
6143 }
6144 
6145 Optional<unsigned> LoopVectorizationCostModel::computeMaxVF(bool OptForSize) {
6146   if (!EnableCondStoresVectorization && Legal->getNumPredStores()) {
6147     ORE->emit(createMissedAnalysis("ConditionalStore")
6148               << "store that is conditionally executed prevents vectorization");
6149     DEBUG(dbgs() << "LV: No vectorization. There are conditional stores.\n");
6150     return None;
6151   }
6152 
6153   if (Legal->getRuntimePointerChecking()->Need && TTI.hasBranchDivergence()) {
6154     // TODO: It may by useful to do since it's still likely to be dynamically
6155     // uniform if the target can skip.
6156     DEBUG(dbgs() << "LV: Not inserting runtime ptr check for divergent target");
6157 
6158     ORE->emit(
6159       createMissedAnalysis("CantVersionLoopWithDivergentTarget")
6160       << "runtime pointer checks needed. Not enabled for divergent target");
6161 
6162     return None;
6163   }
6164 
6165   unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop);
6166   if (!OptForSize) // Remaining checks deal with scalar loop when OptForSize.
6167     return computeFeasibleMaxVF(OptForSize, TC);
6168 
6169   if (Legal->getRuntimePointerChecking()->Need) {
6170     ORE->emit(createMissedAnalysis("CantVersionLoopWithOptForSize")
6171               << "runtime pointer checks needed. Enable vectorization of this "
6172                  "loop with '#pragma clang loop vectorize(enable)' when "
6173                  "compiling with -Os/-Oz");
6174     DEBUG(dbgs()
6175           << "LV: Aborting. Runtime ptr check is required with -Os/-Oz.\n");
6176     return None;
6177   }
6178 
6179   // If we optimize the program for size, avoid creating the tail loop.
6180   DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n');
6181 
6182   // If we don't know the precise trip count, don't try to vectorize.
6183   if (TC < 2) {
6184     ORE->emit(
6185         createMissedAnalysis("UnknownLoopCountComplexCFG")
6186         << "unable to calculate the loop count due to complex control flow");
6187     DEBUG(dbgs() << "LV: Aborting. A tail loop is required with -Os/-Oz.\n");
6188     return None;
6189   }
6190 
6191   unsigned MaxVF = computeFeasibleMaxVF(OptForSize, TC);
6192 
6193   if (TC % MaxVF != 0) {
6194     // If the trip count that we found modulo the vectorization factor is not
6195     // zero then we require a tail.
6196     // FIXME: look for a smaller MaxVF that does divide TC rather than give up.
6197     // FIXME: return None if loop requiresScalarEpilog(<MaxVF>), or look for a
6198     //        smaller MaxVF that does not require a scalar epilog.
6199 
6200     ORE->emit(createMissedAnalysis("NoTailLoopWithOptForSize")
6201               << "cannot optimize for size and vectorize at the "
6202                  "same time. Enable vectorization of this loop "
6203                  "with '#pragma clang loop vectorize(enable)' "
6204                  "when compiling with -Os/-Oz");
6205     DEBUG(dbgs() << "LV: Aborting. A tail loop is required with -Os/-Oz.\n");
6206     return None;
6207   }
6208 
6209   return MaxVF;
6210 }
6211 
6212 unsigned
6213 LoopVectorizationCostModel::computeFeasibleMaxVF(bool OptForSize,
6214                                                  unsigned ConstTripCount) {
6215   MinBWs = computeMinimumValueSizes(TheLoop->getBlocks(), *DB, &TTI);
6216   unsigned SmallestType, WidestType;
6217   std::tie(SmallestType, WidestType) = getSmallestAndWidestTypes();
6218   unsigned WidestRegister = TTI.getRegisterBitWidth(true);
6219 
6220   // Get the maximum safe dependence distance in bits computed by LAA.
6221   // It is computed by MaxVF * sizeOf(type) * 8, where type is taken from
6222   // the memory accesses that is most restrictive (involved in the smallest
6223   // dependence distance).
6224   unsigned MaxSafeRegisterWidth = Legal->getMaxSafeRegisterWidth();
6225 
6226   WidestRegister = std::min(WidestRegister, MaxSafeRegisterWidth);
6227 
6228   unsigned MaxVectorSize = WidestRegister / WidestType;
6229 
6230   DEBUG(dbgs() << "LV: The Smallest and Widest types: " << SmallestType << " / "
6231                << WidestType << " bits.\n");
6232   DEBUG(dbgs() << "LV: The Widest register safe to use is: " << WidestRegister
6233                << " bits.\n");
6234 
6235   assert(MaxVectorSize <= 64 && "Did not expect to pack so many elements"
6236                                 " into one vector!");
6237   if (MaxVectorSize == 0) {
6238     DEBUG(dbgs() << "LV: The target has no vector registers.\n");
6239     MaxVectorSize = 1;
6240     return MaxVectorSize;
6241   } else if (ConstTripCount && ConstTripCount < MaxVectorSize &&
6242              isPowerOf2_32(ConstTripCount)) {
6243     // We need to clamp the VF to be the ConstTripCount. There is no point in
6244     // choosing a higher viable VF as done in the loop below.
6245     DEBUG(dbgs() << "LV: Clamping the MaxVF to the constant trip count: "
6246                  << ConstTripCount << "\n");
6247     MaxVectorSize = ConstTripCount;
6248     return MaxVectorSize;
6249   }
6250 
6251   unsigned MaxVF = MaxVectorSize;
6252   if (MaximizeBandwidth && !OptForSize) {
6253     // Collect all viable vectorization factors larger than the default MaxVF
6254     // (i.e. MaxVectorSize).
6255     SmallVector<unsigned, 8> VFs;
6256     unsigned NewMaxVectorSize = WidestRegister / SmallestType;
6257     for (unsigned VS = MaxVectorSize * 2; VS <= NewMaxVectorSize; VS *= 2)
6258       VFs.push_back(VS);
6259 
6260     // For each VF calculate its register usage.
6261     auto RUs = calculateRegisterUsage(VFs);
6262 
6263     // Select the largest VF which doesn't require more registers than existing
6264     // ones.
6265     unsigned TargetNumRegisters = TTI.getNumberOfRegisters(true);
6266     for (int i = RUs.size() - 1; i >= 0; --i) {
6267       if (RUs[i].MaxLocalUsers <= TargetNumRegisters) {
6268         MaxVF = VFs[i];
6269         break;
6270       }
6271     }
6272   }
6273   return MaxVF;
6274 }
6275 
6276 LoopVectorizationCostModel::VectorizationFactor
6277 LoopVectorizationCostModel::selectVectorizationFactor(unsigned MaxVF) {
6278   float Cost = expectedCost(1).first;
6279 #ifndef NDEBUG
6280   const float ScalarCost = Cost;
6281 #endif /* NDEBUG */
6282   unsigned Width = 1;
6283   DEBUG(dbgs() << "LV: Scalar loop costs: " << (int)ScalarCost << ".\n");
6284 
6285   bool ForceVectorization = Hints->getForce() == LoopVectorizeHints::FK_Enabled;
6286   // Ignore scalar width, because the user explicitly wants vectorization.
6287   if (ForceVectorization && MaxVF > 1) {
6288     Width = 2;
6289     Cost = expectedCost(Width).first / (float)Width;
6290   }
6291 
6292   for (unsigned i = 2; i <= MaxVF; i *= 2) {
6293     // Notice that the vector loop needs to be executed less times, so
6294     // we need to divide the cost of the vector loops by the width of
6295     // the vector elements.
6296     VectorizationCostTy C = expectedCost(i);
6297     float VectorCost = C.first / (float)i;
6298     DEBUG(dbgs() << "LV: Vector loop of width " << i
6299                  << " costs: " << (int)VectorCost << ".\n");
6300     if (!C.second && !ForceVectorization) {
6301       DEBUG(
6302           dbgs() << "LV: Not considering vector loop of width " << i
6303                  << " because it will not generate any vector instructions.\n");
6304       continue;
6305     }
6306     if (VectorCost < Cost) {
6307       Cost = VectorCost;
6308       Width = i;
6309     }
6310   }
6311 
6312   DEBUG(if (ForceVectorization && Width > 1 && Cost >= ScalarCost) dbgs()
6313         << "LV: Vectorization seems to be not beneficial, "
6314         << "but was forced by a user.\n");
6315   DEBUG(dbgs() << "LV: Selecting VF: " << Width << ".\n");
6316   VectorizationFactor Factor = {Width, (unsigned)(Width * Cost)};
6317   return Factor;
6318 }
6319 
6320 std::pair<unsigned, unsigned>
6321 LoopVectorizationCostModel::getSmallestAndWidestTypes() {
6322   unsigned MinWidth = -1U;
6323   unsigned MaxWidth = 8;
6324   const DataLayout &DL = TheFunction->getParent()->getDataLayout();
6325 
6326   // For each block.
6327   for (BasicBlock *BB : TheLoop->blocks()) {
6328     // For each instruction in the loop.
6329     for (Instruction &I : *BB) {
6330       Type *T = I.getType();
6331 
6332       // Skip ignored values.
6333       if (ValuesToIgnore.count(&I))
6334         continue;
6335 
6336       // Only examine Loads, Stores and PHINodes.
6337       if (!isa<LoadInst>(I) && !isa<StoreInst>(I) && !isa<PHINode>(I))
6338         continue;
6339 
6340       // Examine PHI nodes that are reduction variables. Update the type to
6341       // account for the recurrence type.
6342       if (auto *PN = dyn_cast<PHINode>(&I)) {
6343         if (!Legal->isReductionVariable(PN))
6344           continue;
6345         RecurrenceDescriptor RdxDesc = (*Legal->getReductionVars())[PN];
6346         T = RdxDesc.getRecurrenceType();
6347       }
6348 
6349       // Examine the stored values.
6350       if (auto *ST = dyn_cast<StoreInst>(&I))
6351         T = ST->getValueOperand()->getType();
6352 
6353       // Ignore loaded pointer types and stored pointer types that are not
6354       // vectorizable.
6355       //
6356       // FIXME: The check here attempts to predict whether a load or store will
6357       //        be vectorized. We only know this for certain after a VF has
6358       //        been selected. Here, we assume that if an access can be
6359       //        vectorized, it will be. We should also look at extending this
6360       //        optimization to non-pointer types.
6361       //
6362       if (T->isPointerTy() && !isConsecutiveLoadOrStore(&I) &&
6363           !Legal->isAccessInterleaved(&I) && !Legal->isLegalGatherOrScatter(&I))
6364         continue;
6365 
6366       MinWidth = std::min(MinWidth,
6367                           (unsigned)DL.getTypeSizeInBits(T->getScalarType()));
6368       MaxWidth = std::max(MaxWidth,
6369                           (unsigned)DL.getTypeSizeInBits(T->getScalarType()));
6370     }
6371   }
6372 
6373   return {MinWidth, MaxWidth};
6374 }
6375 
6376 unsigned LoopVectorizationCostModel::selectInterleaveCount(bool OptForSize,
6377                                                            unsigned VF,
6378                                                            unsigned LoopCost) {
6379 
6380   // -- The interleave heuristics --
6381   // We interleave the loop in order to expose ILP and reduce the loop overhead.
6382   // There are many micro-architectural considerations that we can't predict
6383   // at this level. For example, frontend pressure (on decode or fetch) due to
6384   // code size, or the number and capabilities of the execution ports.
6385   //
6386   // We use the following heuristics to select the interleave count:
6387   // 1. If the code has reductions, then we interleave to break the cross
6388   // iteration dependency.
6389   // 2. If the loop is really small, then we interleave to reduce the loop
6390   // overhead.
6391   // 3. We don't interleave if we think that we will spill registers to memory
6392   // due to the increased register pressure.
6393 
6394   // When we optimize for size, we don't interleave.
6395   if (OptForSize)
6396     return 1;
6397 
6398   // We used the distance for the interleave count.
6399   if (Legal->getMaxSafeDepDistBytes() != -1U)
6400     return 1;
6401 
6402   // Do not interleave loops with a relatively small trip count.
6403   unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop);
6404   if (TC > 1 && TC < TinyTripCountInterleaveThreshold)
6405     return 1;
6406 
6407   unsigned TargetNumRegisters = TTI.getNumberOfRegisters(VF > 1);
6408   DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters
6409                << " registers\n");
6410 
6411   if (VF == 1) {
6412     if (ForceTargetNumScalarRegs.getNumOccurrences() > 0)
6413       TargetNumRegisters = ForceTargetNumScalarRegs;
6414   } else {
6415     if (ForceTargetNumVectorRegs.getNumOccurrences() > 0)
6416       TargetNumRegisters = ForceTargetNumVectorRegs;
6417   }
6418 
6419   RegisterUsage R = calculateRegisterUsage({VF})[0];
6420   // We divide by these constants so assume that we have at least one
6421   // instruction that uses at least one register.
6422   R.MaxLocalUsers = std::max(R.MaxLocalUsers, 1U);
6423   R.NumInstructions = std::max(R.NumInstructions, 1U);
6424 
6425   // We calculate the interleave count using the following formula.
6426   // Subtract the number of loop invariants from the number of available
6427   // registers. These registers are used by all of the interleaved instances.
6428   // Next, divide the remaining registers by the number of registers that is
6429   // required by the loop, in order to estimate how many parallel instances
6430   // fit without causing spills. All of this is rounded down if necessary to be
6431   // a power of two. We want power of two interleave count to simplify any
6432   // addressing operations or alignment considerations.
6433   unsigned IC = PowerOf2Floor((TargetNumRegisters - R.LoopInvariantRegs) /
6434                               R.MaxLocalUsers);
6435 
6436   // Don't count the induction variable as interleaved.
6437   if (EnableIndVarRegisterHeur)
6438     IC = PowerOf2Floor((TargetNumRegisters - R.LoopInvariantRegs - 1) /
6439                        std::max(1U, (R.MaxLocalUsers - 1)));
6440 
6441   // Clamp the interleave ranges to reasonable counts.
6442   unsigned MaxInterleaveCount = TTI.getMaxInterleaveFactor(VF);
6443 
6444   // Check if the user has overridden the max.
6445   if (VF == 1) {
6446     if (ForceTargetMaxScalarInterleaveFactor.getNumOccurrences() > 0)
6447       MaxInterleaveCount = ForceTargetMaxScalarInterleaveFactor;
6448   } else {
6449     if (ForceTargetMaxVectorInterleaveFactor.getNumOccurrences() > 0)
6450       MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor;
6451   }
6452 
6453   // If we did not calculate the cost for VF (because the user selected the VF)
6454   // then we calculate the cost of VF here.
6455   if (LoopCost == 0)
6456     LoopCost = expectedCost(VF).first;
6457 
6458   // Clamp the calculated IC to be between the 1 and the max interleave count
6459   // that the target allows.
6460   if (IC > MaxInterleaveCount)
6461     IC = MaxInterleaveCount;
6462   else if (IC < 1)
6463     IC = 1;
6464 
6465   // Interleave if we vectorized this loop and there is a reduction that could
6466   // benefit from interleaving.
6467   if (VF > 1 && Legal->getReductionVars()->size()) {
6468     DEBUG(dbgs() << "LV: Interleaving because of reductions.\n");
6469     return IC;
6470   }
6471 
6472   // Note that if we've already vectorized the loop we will have done the
6473   // runtime check and so interleaving won't require further checks.
6474   bool InterleavingRequiresRuntimePointerCheck =
6475       (VF == 1 && Legal->getRuntimePointerChecking()->Need);
6476 
6477   // We want to interleave small loops in order to reduce the loop overhead and
6478   // potentially expose ILP opportunities.
6479   DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n');
6480   if (!InterleavingRequiresRuntimePointerCheck && LoopCost < SmallLoopCost) {
6481     // We assume that the cost overhead is 1 and we use the cost model
6482     // to estimate the cost of the loop and interleave until the cost of the
6483     // loop overhead is about 5% of the cost of the loop.
6484     unsigned SmallIC =
6485         std::min(IC, (unsigned)PowerOf2Floor(SmallLoopCost / LoopCost));
6486 
6487     // Interleave until store/load ports (estimated by max interleave count) are
6488     // saturated.
6489     unsigned NumStores = Legal->getNumStores();
6490     unsigned NumLoads = Legal->getNumLoads();
6491     unsigned StoresIC = IC / (NumStores ? NumStores : 1);
6492     unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1);
6493 
6494     // If we have a scalar reduction (vector reductions are already dealt with
6495     // by this point), we can increase the critical path length if the loop
6496     // we're interleaving is inside another loop. Limit, by default to 2, so the
6497     // critical path only gets increased by one reduction operation.
6498     if (Legal->getReductionVars()->size() && TheLoop->getLoopDepth() > 1) {
6499       unsigned F = static_cast<unsigned>(MaxNestedScalarReductionIC);
6500       SmallIC = std::min(SmallIC, F);
6501       StoresIC = std::min(StoresIC, F);
6502       LoadsIC = std::min(LoadsIC, F);
6503     }
6504 
6505     if (EnableLoadStoreRuntimeInterleave &&
6506         std::max(StoresIC, LoadsIC) > SmallIC) {
6507       DEBUG(dbgs() << "LV: Interleaving to saturate store or load ports.\n");
6508       return std::max(StoresIC, LoadsIC);
6509     }
6510 
6511     DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n");
6512     return SmallIC;
6513   }
6514 
6515   // Interleave if this is a large loop (small loops are already dealt with by
6516   // this point) that could benefit from interleaving.
6517   bool HasReductions = (Legal->getReductionVars()->size() > 0);
6518   if (TTI.enableAggressiveInterleaving(HasReductions)) {
6519     DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n");
6520     return IC;
6521   }
6522 
6523   DEBUG(dbgs() << "LV: Not Interleaving.\n");
6524   return 1;
6525 }
6526 
6527 SmallVector<LoopVectorizationCostModel::RegisterUsage, 8>
6528 LoopVectorizationCostModel::calculateRegisterUsage(ArrayRef<unsigned> VFs) {
6529   // This function calculates the register usage by measuring the highest number
6530   // of values that are alive at a single location. Obviously, this is a very
6531   // rough estimation. We scan the loop in a topological order in order and
6532   // assign a number to each instruction. We use RPO to ensure that defs are
6533   // met before their users. We assume that each instruction that has in-loop
6534   // users starts an interval. We record every time that an in-loop value is
6535   // used, so we have a list of the first and last occurrences of each
6536   // instruction. Next, we transpose this data structure into a multi map that
6537   // holds the list of intervals that *end* at a specific location. This multi
6538   // map allows us to perform a linear search. We scan the instructions linearly
6539   // and record each time that a new interval starts, by placing it in a set.
6540   // If we find this value in the multi-map then we remove it from the set.
6541   // The max register usage is the maximum size of the set.
6542   // We also search for instructions that are defined outside the loop, but are
6543   // used inside the loop. We need this number separately from the max-interval
6544   // usage number because when we unroll, loop-invariant values do not take
6545   // more register.
6546   LoopBlocksDFS DFS(TheLoop);
6547   DFS.perform(LI);
6548 
6549   RegisterUsage RU;
6550   RU.NumInstructions = 0;
6551 
6552   // Each 'key' in the map opens a new interval. The values
6553   // of the map are the index of the 'last seen' usage of the
6554   // instruction that is the key.
6555   typedef DenseMap<Instruction *, unsigned> IntervalMap;
6556   // Maps instruction to its index.
6557   DenseMap<unsigned, Instruction *> IdxToInstr;
6558   // Marks the end of each interval.
6559   IntervalMap EndPoint;
6560   // Saves the list of instruction indices that are used in the loop.
6561   SmallSet<Instruction *, 8> Ends;
6562   // Saves the list of values that are used in the loop but are
6563   // defined outside the loop, such as arguments and constants.
6564   SmallPtrSet<Value *, 8> LoopInvariants;
6565 
6566   unsigned Index = 0;
6567   for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) {
6568     RU.NumInstructions += BB->size();
6569     for (Instruction &I : *BB) {
6570       IdxToInstr[Index++] = &I;
6571 
6572       // Save the end location of each USE.
6573       for (Value *U : I.operands()) {
6574         auto *Instr = dyn_cast<Instruction>(U);
6575 
6576         // Ignore non-instruction values such as arguments, constants, etc.
6577         if (!Instr)
6578           continue;
6579 
6580         // If this instruction is outside the loop then record it and continue.
6581         if (!TheLoop->contains(Instr)) {
6582           LoopInvariants.insert(Instr);
6583           continue;
6584         }
6585 
6586         // Overwrite previous end points.
6587         EndPoint[Instr] = Index;
6588         Ends.insert(Instr);
6589       }
6590     }
6591   }
6592 
6593   // Saves the list of intervals that end with the index in 'key'.
6594   typedef SmallVector<Instruction *, 2> InstrList;
6595   DenseMap<unsigned, InstrList> TransposeEnds;
6596 
6597   // Transpose the EndPoints to a list of values that end at each index.
6598   for (auto &Interval : EndPoint)
6599     TransposeEnds[Interval.second].push_back(Interval.first);
6600 
6601   SmallSet<Instruction *, 8> OpenIntervals;
6602 
6603   // Get the size of the widest register.
6604   unsigned MaxSafeDepDist = -1U;
6605   if (Legal->getMaxSafeDepDistBytes() != -1U)
6606     MaxSafeDepDist = Legal->getMaxSafeDepDistBytes() * 8;
6607   unsigned WidestRegister =
6608       std::min(TTI.getRegisterBitWidth(true), MaxSafeDepDist);
6609   const DataLayout &DL = TheFunction->getParent()->getDataLayout();
6610 
6611   SmallVector<RegisterUsage, 8> RUs(VFs.size());
6612   SmallVector<unsigned, 8> MaxUsages(VFs.size(), 0);
6613 
6614   DEBUG(dbgs() << "LV(REG): Calculating max register usage:\n");
6615 
6616   // A lambda that gets the register usage for the given type and VF.
6617   auto GetRegUsage = [&DL, WidestRegister](Type *Ty, unsigned VF) {
6618     if (Ty->isTokenTy())
6619       return 0U;
6620     unsigned TypeSize = DL.getTypeSizeInBits(Ty->getScalarType());
6621     return std::max<unsigned>(1, VF * TypeSize / WidestRegister);
6622   };
6623 
6624   for (unsigned int i = 0; i < Index; ++i) {
6625     Instruction *I = IdxToInstr[i];
6626 
6627     // Remove all of the instructions that end at this location.
6628     InstrList &List = TransposeEnds[i];
6629     for (Instruction *ToRemove : List)
6630       OpenIntervals.erase(ToRemove);
6631 
6632     // Ignore instructions that are never used within the loop.
6633     if (!Ends.count(I))
6634       continue;
6635 
6636     // Skip ignored values.
6637     if (ValuesToIgnore.count(I))
6638       continue;
6639 
6640     // For each VF find the maximum usage of registers.
6641     for (unsigned j = 0, e = VFs.size(); j < e; ++j) {
6642       if (VFs[j] == 1) {
6643         MaxUsages[j] = std::max(MaxUsages[j], OpenIntervals.size());
6644         continue;
6645       }
6646       collectUniformsAndScalars(VFs[j]);
6647       // Count the number of live intervals.
6648       unsigned RegUsage = 0;
6649       for (auto Inst : OpenIntervals) {
6650         // Skip ignored values for VF > 1.
6651         if (VecValuesToIgnore.count(Inst) ||
6652             isScalarAfterVectorization(Inst, VFs[j]))
6653           continue;
6654         RegUsage += GetRegUsage(Inst->getType(), VFs[j]);
6655       }
6656       MaxUsages[j] = std::max(MaxUsages[j], RegUsage);
6657     }
6658 
6659     DEBUG(dbgs() << "LV(REG): At #" << i << " Interval # "
6660                  << OpenIntervals.size() << '\n');
6661 
6662     // Add the current instruction to the list of open intervals.
6663     OpenIntervals.insert(I);
6664   }
6665 
6666   for (unsigned i = 0, e = VFs.size(); i < e; ++i) {
6667     unsigned Invariant = 0;
6668     if (VFs[i] == 1)
6669       Invariant = LoopInvariants.size();
6670     else {
6671       for (auto Inst : LoopInvariants)
6672         Invariant += GetRegUsage(Inst->getType(), VFs[i]);
6673     }
6674 
6675     DEBUG(dbgs() << "LV(REG): VF = " << VFs[i] << '\n');
6676     DEBUG(dbgs() << "LV(REG): Found max usage: " << MaxUsages[i] << '\n');
6677     DEBUG(dbgs() << "LV(REG): Found invariant usage: " << Invariant << '\n');
6678     DEBUG(dbgs() << "LV(REG): LoopSize: " << RU.NumInstructions << '\n');
6679 
6680     RU.LoopInvariantRegs = Invariant;
6681     RU.MaxLocalUsers = MaxUsages[i];
6682     RUs[i] = RU;
6683   }
6684 
6685   return RUs;
6686 }
6687 
6688 void LoopVectorizationCostModel::collectInstsToScalarize(unsigned VF) {
6689 
6690   // If we aren't vectorizing the loop, or if we've already collected the
6691   // instructions to scalarize, there's nothing to do. Collection may already
6692   // have occurred if we have a user-selected VF and are now computing the
6693   // expected cost for interleaving.
6694   if (VF < 2 || InstsToScalarize.count(VF))
6695     return;
6696 
6697   // Initialize a mapping for VF in InstsToScalalarize. If we find that it's
6698   // not profitable to scalarize any instructions, the presence of VF in the
6699   // map will indicate that we've analyzed it already.
6700   ScalarCostsTy &ScalarCostsVF = InstsToScalarize[VF];
6701 
6702   // Find all the instructions that are scalar with predication in the loop and
6703   // determine if it would be better to not if-convert the blocks they are in.
6704   // If so, we also record the instructions to scalarize.
6705   for (BasicBlock *BB : TheLoop->blocks()) {
6706     if (!Legal->blockNeedsPredication(BB))
6707       continue;
6708     for (Instruction &I : *BB)
6709       if (Legal->isScalarWithPredication(&I)) {
6710         ScalarCostsTy ScalarCosts;
6711         if (computePredInstDiscount(&I, ScalarCosts, VF) >= 0)
6712           ScalarCostsVF.insert(ScalarCosts.begin(), ScalarCosts.end());
6713 
6714         // Remember that BB will remain after vectorization.
6715         PredicatedBBsAfterVectorization.insert(BB);
6716       }
6717   }
6718 }
6719 
6720 int LoopVectorizationCostModel::computePredInstDiscount(
6721     Instruction *PredInst, DenseMap<Instruction *, unsigned> &ScalarCosts,
6722     unsigned VF) {
6723 
6724   assert(!isUniformAfterVectorization(PredInst, VF) &&
6725          "Instruction marked uniform-after-vectorization will be predicated");
6726 
6727   // Initialize the discount to zero, meaning that the scalar version and the
6728   // vector version cost the same.
6729   int Discount = 0;
6730 
6731   // Holds instructions to analyze. The instructions we visit are mapped in
6732   // ScalarCosts. Those instructions are the ones that would be scalarized if
6733   // we find that the scalar version costs less.
6734   SmallVector<Instruction *, 8> Worklist;
6735 
6736   // Returns true if the given instruction can be scalarized.
6737   auto canBeScalarized = [&](Instruction *I) -> bool {
6738 
6739     // We only attempt to scalarize instructions forming a single-use chain
6740     // from the original predicated block that would otherwise be vectorized.
6741     // Although not strictly necessary, we give up on instructions we know will
6742     // already be scalar to avoid traversing chains that are unlikely to be
6743     // beneficial.
6744     if (!I->hasOneUse() || PredInst->getParent() != I->getParent() ||
6745         isScalarAfterVectorization(I, VF))
6746       return false;
6747 
6748     // If the instruction is scalar with predication, it will be analyzed
6749     // separately. We ignore it within the context of PredInst.
6750     if (Legal->isScalarWithPredication(I))
6751       return false;
6752 
6753     // If any of the instruction's operands are uniform after vectorization,
6754     // the instruction cannot be scalarized. This prevents, for example, a
6755     // masked load from being scalarized.
6756     //
6757     // We assume we will only emit a value for lane zero of an instruction
6758     // marked uniform after vectorization, rather than VF identical values.
6759     // Thus, if we scalarize an instruction that uses a uniform, we would
6760     // create uses of values corresponding to the lanes we aren't emitting code
6761     // for. This behavior can be changed by allowing getScalarValue to clone
6762     // the lane zero values for uniforms rather than asserting.
6763     for (Use &U : I->operands())
6764       if (auto *J = dyn_cast<Instruction>(U.get()))
6765         if (isUniformAfterVectorization(J, VF))
6766           return false;
6767 
6768     // Otherwise, we can scalarize the instruction.
6769     return true;
6770   };
6771 
6772   // Returns true if an operand that cannot be scalarized must be extracted
6773   // from a vector. We will account for this scalarization overhead below. Note
6774   // that the non-void predicated instructions are placed in their own blocks,
6775   // and their return values are inserted into vectors. Thus, an extract would
6776   // still be required.
6777   auto needsExtract = [&](Instruction *I) -> bool {
6778     return TheLoop->contains(I) && !isScalarAfterVectorization(I, VF);
6779   };
6780 
6781   // Compute the expected cost discount from scalarizing the entire expression
6782   // feeding the predicated instruction. We currently only consider expressions
6783   // that are single-use instruction chains.
6784   Worklist.push_back(PredInst);
6785   while (!Worklist.empty()) {
6786     Instruction *I = Worklist.pop_back_val();
6787 
6788     // If we've already analyzed the instruction, there's nothing to do.
6789     if (ScalarCosts.count(I))
6790       continue;
6791 
6792     // Compute the cost of the vector instruction. Note that this cost already
6793     // includes the scalarization overhead of the predicated instruction.
6794     unsigned VectorCost = getInstructionCost(I, VF).first;
6795 
6796     // Compute the cost of the scalarized instruction. This cost is the cost of
6797     // the instruction as if it wasn't if-converted and instead remained in the
6798     // predicated block. We will scale this cost by block probability after
6799     // computing the scalarization overhead.
6800     unsigned ScalarCost = VF * getInstructionCost(I, 1).first;
6801 
6802     // Compute the scalarization overhead of needed insertelement instructions
6803     // and phi nodes.
6804     if (Legal->isScalarWithPredication(I) && !I->getType()->isVoidTy()) {
6805       ScalarCost += TTI.getScalarizationOverhead(ToVectorTy(I->getType(), VF),
6806                                                  true, false);
6807       ScalarCost += VF * TTI.getCFInstrCost(Instruction::PHI);
6808     }
6809 
6810     // Compute the scalarization overhead of needed extractelement
6811     // instructions. For each of the instruction's operands, if the operand can
6812     // be scalarized, add it to the worklist; otherwise, account for the
6813     // overhead.
6814     for (Use &U : I->operands())
6815       if (auto *J = dyn_cast<Instruction>(U.get())) {
6816         assert(VectorType::isValidElementType(J->getType()) &&
6817                "Instruction has non-scalar type");
6818         if (canBeScalarized(J))
6819           Worklist.push_back(J);
6820         else if (needsExtract(J))
6821           ScalarCost += TTI.getScalarizationOverhead(
6822                               ToVectorTy(J->getType(),VF), false, true);
6823       }
6824 
6825     // Scale the total scalar cost by block probability.
6826     ScalarCost /= getReciprocalPredBlockProb();
6827 
6828     // Compute the discount. A non-negative discount means the vector version
6829     // of the instruction costs more, and scalarizing would be beneficial.
6830     Discount += VectorCost - ScalarCost;
6831     ScalarCosts[I] = ScalarCost;
6832   }
6833 
6834   return Discount;
6835 }
6836 
6837 LoopVectorizationCostModel::VectorizationCostTy
6838 LoopVectorizationCostModel::expectedCost(unsigned VF) {
6839   VectorizationCostTy Cost;
6840 
6841   // For each block.
6842   for (BasicBlock *BB : TheLoop->blocks()) {
6843     VectorizationCostTy BlockCost;
6844 
6845     // For each instruction in the old loop.
6846     for (Instruction &I : *BB) {
6847       // Skip dbg intrinsics.
6848       if (isa<DbgInfoIntrinsic>(I))
6849         continue;
6850 
6851       // Skip ignored values.
6852       if (ValuesToIgnore.count(&I))
6853         continue;
6854 
6855       VectorizationCostTy C = getInstructionCost(&I, VF);
6856 
6857       // Check if we should override the cost.
6858       if (ForceTargetInstructionCost.getNumOccurrences() > 0)
6859         C.first = ForceTargetInstructionCost;
6860 
6861       BlockCost.first += C.first;
6862       BlockCost.second |= C.second;
6863       DEBUG(dbgs() << "LV: Found an estimated cost of " << C.first << " for VF "
6864                    << VF << " For instruction: " << I << '\n');
6865     }
6866 
6867     // If we are vectorizing a predicated block, it will have been
6868     // if-converted. This means that the block's instructions (aside from
6869     // stores and instructions that may divide by zero) will now be
6870     // unconditionally executed. For the scalar case, we may not always execute
6871     // the predicated block. Thus, scale the block's cost by the probability of
6872     // executing it.
6873     if (VF == 1 && Legal->blockNeedsPredication(BB))
6874       BlockCost.first /= getReciprocalPredBlockProb();
6875 
6876     Cost.first += BlockCost.first;
6877     Cost.second |= BlockCost.second;
6878   }
6879 
6880   return Cost;
6881 }
6882 
6883 /// \brief Gets Address Access SCEV after verifying that the access pattern
6884 /// is loop invariant except the induction variable dependence.
6885 ///
6886 /// This SCEV can be sent to the Target in order to estimate the address
6887 /// calculation cost.
6888 static const SCEV *getAddressAccessSCEV(
6889               Value *Ptr,
6890               LoopVectorizationLegality *Legal,
6891               ScalarEvolution *SE,
6892               const Loop *TheLoop) {
6893   auto *Gep = dyn_cast<GetElementPtrInst>(Ptr);
6894   if (!Gep)
6895     return nullptr;
6896 
6897   // We are looking for a gep with all loop invariant indices except for one
6898   // which should be an induction variable.
6899   unsigned NumOperands = Gep->getNumOperands();
6900   for (unsigned i = 1; i < NumOperands; ++i) {
6901     Value *Opd = Gep->getOperand(i);
6902     if (!SE->isLoopInvariant(SE->getSCEV(Opd), TheLoop) &&
6903         !Legal->isInductionVariable(Opd))
6904       return nullptr;
6905   }
6906 
6907   // Now we know we have a GEP ptr, %inv, %ind, %inv. return the Ptr SCEV.
6908   return SE->getSCEV(Ptr);
6909 }
6910 
6911 static bool isStrideMul(Instruction *I, LoopVectorizationLegality *Legal) {
6912   return Legal->hasStride(I->getOperand(0)) ||
6913          Legal->hasStride(I->getOperand(1));
6914 }
6915 
6916 unsigned LoopVectorizationCostModel::getMemInstScalarizationCost(Instruction *I,
6917                                                                  unsigned VF) {
6918   Type *ValTy = getMemInstValueType(I);
6919   auto SE = PSE.getSE();
6920 
6921   unsigned Alignment = getMemInstAlignment(I);
6922   unsigned AS = getMemInstAddressSpace(I);
6923   Value *Ptr = getPointerOperand(I);
6924   Type *PtrTy = ToVectorTy(Ptr->getType(), VF);
6925 
6926   // Figure out whether the access is strided and get the stride value
6927   // if it's known in compile time
6928   const SCEV *PtrSCEV = getAddressAccessSCEV(Ptr, Legal, SE, TheLoop);
6929 
6930   // Get the cost of the scalar memory instruction and address computation.
6931   unsigned Cost = VF * TTI.getAddressComputationCost(PtrTy, SE, PtrSCEV);
6932 
6933   Cost += VF *
6934           TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), Alignment,
6935                               AS, I);
6936 
6937   // Get the overhead of the extractelement and insertelement instructions
6938   // we might create due to scalarization.
6939   Cost += getScalarizationOverhead(I, VF, TTI);
6940 
6941   // If we have a predicated store, it may not be executed for each vector
6942   // lane. Scale the cost by the probability of executing the predicated
6943   // block.
6944   if (Legal->isScalarWithPredication(I))
6945     Cost /= getReciprocalPredBlockProb();
6946 
6947   return Cost;
6948 }
6949 
6950 unsigned LoopVectorizationCostModel::getConsecutiveMemOpCost(Instruction *I,
6951                                                              unsigned VF) {
6952   Type *ValTy = getMemInstValueType(I);
6953   Type *VectorTy = ToVectorTy(ValTy, VF);
6954   unsigned Alignment = getMemInstAlignment(I);
6955   Value *Ptr = getPointerOperand(I);
6956   unsigned AS = getMemInstAddressSpace(I);
6957   int ConsecutiveStride = Legal->isConsecutivePtr(Ptr);
6958 
6959   assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) &&
6960          "Stride should be 1 or -1 for consecutive memory access");
6961   unsigned Cost = 0;
6962   if (Legal->isMaskRequired(I))
6963     Cost += TTI.getMaskedMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS);
6964   else
6965     Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS, I);
6966 
6967   bool Reverse = ConsecutiveStride < 0;
6968   if (Reverse)
6969     Cost += TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, 0);
6970   return Cost;
6971 }
6972 
6973 unsigned LoopVectorizationCostModel::getUniformMemOpCost(Instruction *I,
6974                                                          unsigned VF) {
6975   LoadInst *LI = cast<LoadInst>(I);
6976   Type *ValTy = LI->getType();
6977   Type *VectorTy = ToVectorTy(ValTy, VF);
6978   unsigned Alignment = LI->getAlignment();
6979   unsigned AS = LI->getPointerAddressSpace();
6980 
6981   return TTI.getAddressComputationCost(ValTy) +
6982          TTI.getMemoryOpCost(Instruction::Load, ValTy, Alignment, AS) +
6983          TTI.getShuffleCost(TargetTransformInfo::SK_Broadcast, VectorTy);
6984 }
6985 
6986 unsigned LoopVectorizationCostModel::getGatherScatterCost(Instruction *I,
6987                                                           unsigned VF) {
6988   Type *ValTy = getMemInstValueType(I);
6989   Type *VectorTy = ToVectorTy(ValTy, VF);
6990   unsigned Alignment = getMemInstAlignment(I);
6991   Value *Ptr = getPointerOperand(I);
6992 
6993   return TTI.getAddressComputationCost(VectorTy) +
6994          TTI.getGatherScatterOpCost(I->getOpcode(), VectorTy, Ptr,
6995                                     Legal->isMaskRequired(I), Alignment);
6996 }
6997 
6998 unsigned LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I,
6999                                                             unsigned VF) {
7000   Type *ValTy = getMemInstValueType(I);
7001   Type *VectorTy = ToVectorTy(ValTy, VF);
7002   unsigned AS = getMemInstAddressSpace(I);
7003 
7004   auto Group = Legal->getInterleavedAccessGroup(I);
7005   assert(Group && "Fail to get an interleaved access group.");
7006 
7007   unsigned InterleaveFactor = Group->getFactor();
7008   Type *WideVecTy = VectorType::get(ValTy, VF * InterleaveFactor);
7009 
7010   // Holds the indices of existing members in an interleaved load group.
7011   // An interleaved store group doesn't need this as it doesn't allow gaps.
7012   SmallVector<unsigned, 4> Indices;
7013   if (isa<LoadInst>(I)) {
7014     for (unsigned i = 0; i < InterleaveFactor; i++)
7015       if (Group->getMember(i))
7016         Indices.push_back(i);
7017   }
7018 
7019   // Calculate the cost of the whole interleaved group.
7020   unsigned Cost = TTI.getInterleavedMemoryOpCost(I->getOpcode(), WideVecTy,
7021                                                  Group->getFactor(), Indices,
7022                                                  Group->getAlignment(), AS);
7023 
7024   if (Group->isReverse())
7025     Cost += Group->getNumMembers() *
7026             TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, 0);
7027   return Cost;
7028 }
7029 
7030 unsigned LoopVectorizationCostModel::getMemoryInstructionCost(Instruction *I,
7031                                                               unsigned VF) {
7032 
7033   // Calculate scalar cost only. Vectorization cost should be ready at this
7034   // moment.
7035   if (VF == 1) {
7036     Type *ValTy = getMemInstValueType(I);
7037     unsigned Alignment = getMemInstAlignment(I);
7038     unsigned AS = getMemInstAddressSpace(I);
7039 
7040     return TTI.getAddressComputationCost(ValTy) +
7041            TTI.getMemoryOpCost(I->getOpcode(), ValTy, Alignment, AS, I);
7042   }
7043   return getWideningCost(I, VF);
7044 }
7045 
7046 LoopVectorizationCostModel::VectorizationCostTy
7047 LoopVectorizationCostModel::getInstructionCost(Instruction *I, unsigned VF) {
7048   // If we know that this instruction will remain uniform, check the cost of
7049   // the scalar version.
7050   if (isUniformAfterVectorization(I, VF))
7051     VF = 1;
7052 
7053   if (VF > 1 && isProfitableToScalarize(I, VF))
7054     return VectorizationCostTy(InstsToScalarize[VF][I], false);
7055 
7056   // Forced scalars do not have any scalarization overhead.
7057   if (VF > 1 && ForcedScalars.count(VF) &&
7058       ForcedScalars.find(VF)->second.count(I))
7059     return VectorizationCostTy((getInstructionCost(I, 1).first * VF), false);
7060 
7061   Type *VectorTy;
7062   unsigned C = getInstructionCost(I, VF, VectorTy);
7063 
7064   bool TypeNotScalarized =
7065       VF > 1 && VectorTy->isVectorTy() && TTI.getNumberOfParts(VectorTy) < VF;
7066   return VectorizationCostTy(C, TypeNotScalarized);
7067 }
7068 
7069 void LoopVectorizationCostModel::setCostBasedWideningDecision(unsigned VF) {
7070   if (VF == 1)
7071     return;
7072   for (BasicBlock *BB : TheLoop->blocks()) {
7073     // For each instruction in the old loop.
7074     for (Instruction &I : *BB) {
7075       Value *Ptr = getPointerOperand(&I);
7076       if (!Ptr)
7077         continue;
7078 
7079       if (isa<LoadInst>(&I) && Legal->isUniform(Ptr)) {
7080         // Scalar load + broadcast
7081         unsigned Cost = getUniformMemOpCost(&I, VF);
7082         setWideningDecision(&I, VF, CM_Scalarize, Cost);
7083         continue;
7084       }
7085 
7086       // We assume that widening is the best solution when possible.
7087       if (Legal->memoryInstructionCanBeWidened(&I, VF)) {
7088         unsigned Cost = getConsecutiveMemOpCost(&I, VF);
7089         setWideningDecision(&I, VF, CM_Widen, Cost);
7090         continue;
7091       }
7092 
7093       // Choose between Interleaving, Gather/Scatter or Scalarization.
7094       unsigned InterleaveCost = UINT_MAX;
7095       unsigned NumAccesses = 1;
7096       if (Legal->isAccessInterleaved(&I)) {
7097         auto Group = Legal->getInterleavedAccessGroup(&I);
7098         assert(Group && "Fail to get an interleaved access group.");
7099 
7100         // Make one decision for the whole group.
7101         if (getWideningDecision(&I, VF) != CM_Unknown)
7102           continue;
7103 
7104         NumAccesses = Group->getNumMembers();
7105         InterleaveCost = getInterleaveGroupCost(&I, VF);
7106       }
7107 
7108       unsigned GatherScatterCost =
7109           Legal->isLegalGatherOrScatter(&I)
7110               ? getGatherScatterCost(&I, VF) * NumAccesses
7111               : UINT_MAX;
7112 
7113       unsigned ScalarizationCost =
7114           getMemInstScalarizationCost(&I, VF) * NumAccesses;
7115 
7116       // Choose better solution for the current VF,
7117       // write down this decision and use it during vectorization.
7118       unsigned Cost;
7119       InstWidening Decision;
7120       if (InterleaveCost <= GatherScatterCost &&
7121           InterleaveCost < ScalarizationCost) {
7122         Decision = CM_Interleave;
7123         Cost = InterleaveCost;
7124       } else if (GatherScatterCost < ScalarizationCost) {
7125         Decision = CM_GatherScatter;
7126         Cost = GatherScatterCost;
7127       } else {
7128         Decision = CM_Scalarize;
7129         Cost = ScalarizationCost;
7130       }
7131       // If the instructions belongs to an interleave group, the whole group
7132       // receives the same decision. The whole group receives the cost, but
7133       // the cost will actually be assigned to one instruction.
7134       if (auto Group = Legal->getInterleavedAccessGroup(&I))
7135         setWideningDecision(Group, VF, Decision, Cost);
7136       else
7137         setWideningDecision(&I, VF, Decision, Cost);
7138     }
7139   }
7140 
7141   // Make sure that any load of address and any other address computation
7142   // remains scalar unless there is gather/scatter support. This avoids
7143   // inevitable extracts into address registers, and also has the benefit of
7144   // activating LSR more, since that pass can't optimize vectorized
7145   // addresses.
7146   if (TTI.prefersVectorizedAddressing())
7147     return;
7148 
7149   // Start with all scalar pointer uses.
7150   SmallPtrSet<Instruction *, 8> AddrDefs;
7151   for (BasicBlock *BB : TheLoop->blocks())
7152     for (Instruction &I : *BB) {
7153       Instruction *PtrDef =
7154         dyn_cast_or_null<Instruction>(getPointerOperand(&I));
7155       if (PtrDef && TheLoop->contains(PtrDef) &&
7156           getWideningDecision(&I, VF) != CM_GatherScatter)
7157         AddrDefs.insert(PtrDef);
7158     }
7159 
7160   // Add all instructions used to generate the addresses.
7161   SmallVector<Instruction *, 4> Worklist;
7162   for (auto *I : AddrDefs)
7163     Worklist.push_back(I);
7164   while (!Worklist.empty()) {
7165     Instruction *I = Worklist.pop_back_val();
7166     for (auto &Op : I->operands())
7167       if (auto *InstOp = dyn_cast<Instruction>(Op))
7168         if ((InstOp->getParent() == I->getParent()) && !isa<PHINode>(InstOp) &&
7169             AddrDefs.insert(InstOp).second == true)
7170           Worklist.push_back(InstOp);
7171   }
7172 
7173   for (auto *I : AddrDefs) {
7174     if (isa<LoadInst>(I)) {
7175       // Setting the desired widening decision should ideally be handled in
7176       // by cost functions, but since this involves the task of finding out
7177       // if the loaded register is involved in an address computation, it is
7178       // instead changed here when we know this is the case.
7179       if (getWideningDecision(I, VF) == CM_Widen)
7180         // Scalarize a widened load of address.
7181         setWideningDecision(I, VF, CM_Scalarize,
7182                             (VF * getMemoryInstructionCost(I, 1)));
7183       else if (auto Group = Legal->getInterleavedAccessGroup(I)) {
7184         // Scalarize an interleave group of address loads.
7185         for (unsigned I = 0; I < Group->getFactor(); ++I) {
7186           if (Instruction *Member = Group->getMember(I))
7187             setWideningDecision(Member, VF, CM_Scalarize,
7188                                 (VF * getMemoryInstructionCost(Member, 1)));
7189         }
7190       }
7191     } else
7192       // Make sure I gets scalarized and a cost estimate without
7193       // scalarization overhead.
7194       ForcedScalars[VF].insert(I);
7195   }
7196 }
7197 
7198 unsigned LoopVectorizationCostModel::getInstructionCost(Instruction *I,
7199                                                         unsigned VF,
7200                                                         Type *&VectorTy) {
7201   Type *RetTy = I->getType();
7202   if (canTruncateToMinimalBitwidth(I, VF))
7203     RetTy = IntegerType::get(RetTy->getContext(), MinBWs[I]);
7204   VectorTy = isScalarAfterVectorization(I, VF) ? RetTy : ToVectorTy(RetTy, VF);
7205   auto SE = PSE.getSE();
7206 
7207   // TODO: We need to estimate the cost of intrinsic calls.
7208   switch (I->getOpcode()) {
7209   case Instruction::GetElementPtr:
7210     // We mark this instruction as zero-cost because the cost of GEPs in
7211     // vectorized code depends on whether the corresponding memory instruction
7212     // is scalarized or not. Therefore, we handle GEPs with the memory
7213     // instruction cost.
7214     return 0;
7215   case Instruction::Br: {
7216     // In cases of scalarized and predicated instructions, there will be VF
7217     // predicated blocks in the vectorized loop. Each branch around these
7218     // blocks requires also an extract of its vector compare i1 element.
7219     bool ScalarPredicatedBB = false;
7220     BranchInst *BI = cast<BranchInst>(I);
7221     if (VF > 1 && BI->isConditional() &&
7222         (PredicatedBBsAfterVectorization.count(BI->getSuccessor(0)) ||
7223          PredicatedBBsAfterVectorization.count(BI->getSuccessor(1))))
7224       ScalarPredicatedBB = true;
7225 
7226     if (ScalarPredicatedBB) {
7227       // Return cost for branches around scalarized and predicated blocks.
7228       Type *Vec_i1Ty =
7229           VectorType::get(IntegerType::getInt1Ty(RetTy->getContext()), VF);
7230       return (TTI.getScalarizationOverhead(Vec_i1Ty, false, true) +
7231               (TTI.getCFInstrCost(Instruction::Br) * VF));
7232     } else if (I->getParent() == TheLoop->getLoopLatch() || VF == 1)
7233       // The back-edge branch will remain, as will all scalar branches.
7234       return TTI.getCFInstrCost(Instruction::Br);
7235     else
7236       // This branch will be eliminated by if-conversion.
7237       return 0;
7238     // Note: We currently assume zero cost for an unconditional branch inside
7239     // a predicated block since it will become a fall-through, although we
7240     // may decide in the future to call TTI for all branches.
7241   }
7242   case Instruction::PHI: {
7243     auto *Phi = cast<PHINode>(I);
7244 
7245     // First-order recurrences are replaced by vector shuffles inside the loop.
7246     if (VF > 1 && Legal->isFirstOrderRecurrence(Phi))
7247       return TTI.getShuffleCost(TargetTransformInfo::SK_ExtractSubvector,
7248                                 VectorTy, VF - 1, VectorTy);
7249 
7250     // Phi nodes in non-header blocks (not inductions, reductions, etc.) are
7251     // converted into select instructions. We require N - 1 selects per phi
7252     // node, where N is the number of incoming values.
7253     if (VF > 1 && Phi->getParent() != TheLoop->getHeader())
7254       return (Phi->getNumIncomingValues() - 1) *
7255              TTI.getCmpSelInstrCost(
7256                  Instruction::Select, ToVectorTy(Phi->getType(), VF),
7257                  ToVectorTy(Type::getInt1Ty(Phi->getContext()), VF));
7258 
7259     return TTI.getCFInstrCost(Instruction::PHI);
7260   }
7261   case Instruction::UDiv:
7262   case Instruction::SDiv:
7263   case Instruction::URem:
7264   case Instruction::SRem:
7265     // If we have a predicated instruction, it may not be executed for each
7266     // vector lane. Get the scalarization cost and scale this amount by the
7267     // probability of executing the predicated block. If the instruction is not
7268     // predicated, we fall through to the next case.
7269     if (VF > 1 && Legal->isScalarWithPredication(I)) {
7270       unsigned Cost = 0;
7271 
7272       // These instructions have a non-void type, so account for the phi nodes
7273       // that we will create. This cost is likely to be zero. The phi node
7274       // cost, if any, should be scaled by the block probability because it
7275       // models a copy at the end of each predicated block.
7276       Cost += VF * TTI.getCFInstrCost(Instruction::PHI);
7277 
7278       // The cost of the non-predicated instruction.
7279       Cost += VF * TTI.getArithmeticInstrCost(I->getOpcode(), RetTy);
7280 
7281       // The cost of insertelement and extractelement instructions needed for
7282       // scalarization.
7283       Cost += getScalarizationOverhead(I, VF, TTI);
7284 
7285       // Scale the cost by the probability of executing the predicated blocks.
7286       // This assumes the predicated block for each vector lane is equally
7287       // likely.
7288       return Cost / getReciprocalPredBlockProb();
7289     }
7290     LLVM_FALLTHROUGH;
7291   case Instruction::Add:
7292   case Instruction::FAdd:
7293   case Instruction::Sub:
7294   case Instruction::FSub:
7295   case Instruction::Mul:
7296   case Instruction::FMul:
7297   case Instruction::FDiv:
7298   case Instruction::FRem:
7299   case Instruction::Shl:
7300   case Instruction::LShr:
7301   case Instruction::AShr:
7302   case Instruction::And:
7303   case Instruction::Or:
7304   case Instruction::Xor: {
7305     // Since we will replace the stride by 1 the multiplication should go away.
7306     if (I->getOpcode() == Instruction::Mul && isStrideMul(I, Legal))
7307       return 0;
7308     // Certain instructions can be cheaper to vectorize if they have a constant
7309     // second vector operand. One example of this are shifts on x86.
7310     TargetTransformInfo::OperandValueKind Op1VK =
7311         TargetTransformInfo::OK_AnyValue;
7312     TargetTransformInfo::OperandValueKind Op2VK =
7313         TargetTransformInfo::OK_AnyValue;
7314     TargetTransformInfo::OperandValueProperties Op1VP =
7315         TargetTransformInfo::OP_None;
7316     TargetTransformInfo::OperandValueProperties Op2VP =
7317         TargetTransformInfo::OP_None;
7318     Value *Op2 = I->getOperand(1);
7319 
7320     // Check for a splat or for a non uniform vector of constants.
7321     if (isa<ConstantInt>(Op2)) {
7322       ConstantInt *CInt = cast<ConstantInt>(Op2);
7323       if (CInt && CInt->getValue().isPowerOf2())
7324         Op2VP = TargetTransformInfo::OP_PowerOf2;
7325       Op2VK = TargetTransformInfo::OK_UniformConstantValue;
7326     } else if (isa<ConstantVector>(Op2) || isa<ConstantDataVector>(Op2)) {
7327       Op2VK = TargetTransformInfo::OK_NonUniformConstantValue;
7328       Constant *SplatValue = cast<Constant>(Op2)->getSplatValue();
7329       if (SplatValue) {
7330         ConstantInt *CInt = dyn_cast<ConstantInt>(SplatValue);
7331         if (CInt && CInt->getValue().isPowerOf2())
7332           Op2VP = TargetTransformInfo::OP_PowerOf2;
7333         Op2VK = TargetTransformInfo::OK_UniformConstantValue;
7334       }
7335     } else if (Legal->isUniform(Op2)) {
7336       Op2VK = TargetTransformInfo::OK_UniformValue;
7337     }
7338     SmallVector<const Value *, 4> Operands(I->operand_values());
7339     unsigned N = isScalarAfterVectorization(I, VF) ? VF : 1;
7340     return N * TTI.getArithmeticInstrCost(I->getOpcode(), VectorTy, Op1VK,
7341                                           Op2VK, Op1VP, Op2VP, Operands);
7342   }
7343   case Instruction::Select: {
7344     SelectInst *SI = cast<SelectInst>(I);
7345     const SCEV *CondSCEV = SE->getSCEV(SI->getCondition());
7346     bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop));
7347     Type *CondTy = SI->getCondition()->getType();
7348     if (!ScalarCond)
7349       CondTy = VectorType::get(CondTy, VF);
7350 
7351     return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, CondTy, I);
7352   }
7353   case Instruction::ICmp:
7354   case Instruction::FCmp: {
7355     Type *ValTy = I->getOperand(0)->getType();
7356     Instruction *Op0AsInstruction = dyn_cast<Instruction>(I->getOperand(0));
7357     if (canTruncateToMinimalBitwidth(Op0AsInstruction, VF))
7358       ValTy = IntegerType::get(ValTy->getContext(), MinBWs[Op0AsInstruction]);
7359     VectorTy = ToVectorTy(ValTy, VF);
7360     return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, nullptr, I);
7361   }
7362   case Instruction::Store:
7363   case Instruction::Load: {
7364     unsigned Width = VF;
7365     if (Width > 1) {
7366       InstWidening Decision = getWideningDecision(I, Width);
7367       assert(Decision != CM_Unknown &&
7368              "CM decision should be taken at this point");
7369       if (Decision == CM_Scalarize)
7370         Width = 1;
7371     }
7372     VectorTy = ToVectorTy(getMemInstValueType(I), Width);
7373     return getMemoryInstructionCost(I, VF);
7374   }
7375   case Instruction::ZExt:
7376   case Instruction::SExt:
7377   case Instruction::FPToUI:
7378   case Instruction::FPToSI:
7379   case Instruction::FPExt:
7380   case Instruction::PtrToInt:
7381   case Instruction::IntToPtr:
7382   case Instruction::SIToFP:
7383   case Instruction::UIToFP:
7384   case Instruction::Trunc:
7385   case Instruction::FPTrunc:
7386   case Instruction::BitCast: {
7387     // We optimize the truncation of induction variables having constant
7388     // integer steps. The cost of these truncations is the same as the scalar
7389     // operation.
7390     if (isOptimizableIVTruncate(I, VF)) {
7391       auto *Trunc = cast<TruncInst>(I);
7392       return TTI.getCastInstrCost(Instruction::Trunc, Trunc->getDestTy(),
7393                                   Trunc->getSrcTy(), Trunc);
7394     }
7395 
7396     Type *SrcScalarTy = I->getOperand(0)->getType();
7397     Type *SrcVecTy =
7398         VectorTy->isVectorTy() ? ToVectorTy(SrcScalarTy, VF) : SrcScalarTy;
7399     if (canTruncateToMinimalBitwidth(I, VF)) {
7400       // This cast is going to be shrunk. This may remove the cast or it might
7401       // turn it into slightly different cast. For example, if MinBW == 16,
7402       // "zext i8 %1 to i32" becomes "zext i8 %1 to i16".
7403       //
7404       // Calculate the modified src and dest types.
7405       Type *MinVecTy = VectorTy;
7406       if (I->getOpcode() == Instruction::Trunc) {
7407         SrcVecTy = smallestIntegerVectorType(SrcVecTy, MinVecTy);
7408         VectorTy =
7409             largestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy);
7410       } else if (I->getOpcode() == Instruction::ZExt ||
7411                  I->getOpcode() == Instruction::SExt) {
7412         SrcVecTy = largestIntegerVectorType(SrcVecTy, MinVecTy);
7413         VectorTy =
7414             smallestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy);
7415       }
7416     }
7417 
7418     unsigned N = isScalarAfterVectorization(I, VF) ? VF : 1;
7419     return N * TTI.getCastInstrCost(I->getOpcode(), VectorTy, SrcVecTy, I);
7420   }
7421   case Instruction::Call: {
7422     bool NeedToScalarize;
7423     CallInst *CI = cast<CallInst>(I);
7424     unsigned CallCost = getVectorCallCost(CI, VF, TTI, TLI, NeedToScalarize);
7425     if (getVectorIntrinsicIDForCall(CI, TLI))
7426       return std::min(CallCost, getVectorIntrinsicCost(CI, VF, TTI, TLI));
7427     return CallCost;
7428   }
7429   default:
7430     // The cost of executing VF copies of the scalar instruction. This opcode
7431     // is unknown. Assume that it is the same as 'mul'.
7432     return VF * TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy) +
7433            getScalarizationOverhead(I, VF, TTI);
7434   } // end of switch.
7435 }
7436 
7437 char LoopVectorize::ID = 0;
7438 static const char lv_name[] = "Loop Vectorization";
7439 INITIALIZE_PASS_BEGIN(LoopVectorize, LV_NAME, lv_name, false, false)
7440 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
7441 INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass)
7442 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
7443 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
7444 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
7445 INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass)
7446 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
7447 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
7448 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
7449 INITIALIZE_PASS_DEPENDENCY(LoopAccessLegacyAnalysis)
7450 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass)
7451 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass)
7452 INITIALIZE_PASS_END(LoopVectorize, LV_NAME, lv_name, false, false)
7453 
7454 namespace llvm {
7455 Pass *createLoopVectorizePass(bool NoUnrolling, bool AlwaysVectorize) {
7456   return new LoopVectorize(NoUnrolling, AlwaysVectorize);
7457 }
7458 }
7459 
7460 bool LoopVectorizationCostModel::isConsecutiveLoadOrStore(Instruction *Inst) {
7461 
7462   // Check if the pointer operand of a load or store instruction is
7463   // consecutive.
7464   if (auto *Ptr = getPointerOperand(Inst))
7465     return Legal->isConsecutivePtr(Ptr);
7466   return false;
7467 }
7468 
7469 void LoopVectorizationCostModel::collectValuesToIgnore() {
7470   // Ignore ephemeral values.
7471   CodeMetrics::collectEphemeralValues(TheLoop, AC, ValuesToIgnore);
7472 
7473   // Ignore type-promoting instructions we identified during reduction
7474   // detection.
7475   for (auto &Reduction : *Legal->getReductionVars()) {
7476     RecurrenceDescriptor &RedDes = Reduction.second;
7477     SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts();
7478     VecValuesToIgnore.insert(Casts.begin(), Casts.end());
7479   }
7480 }
7481 
7482 LoopVectorizationCostModel::VectorizationFactor
7483 LoopVectorizationPlanner::plan(bool OptForSize, unsigned UserVF) {
7484 
7485   // Width 1 means no vectorize, cost 0 means uncomputed cost.
7486   const LoopVectorizationCostModel::VectorizationFactor NoVectorization = {1U,
7487                                                                            0U};
7488   Optional<unsigned> MaybeMaxVF = CM.computeMaxVF(OptForSize);
7489   if (!MaybeMaxVF.hasValue()) // Cases considered too costly to vectorize.
7490     return NoVectorization;
7491 
7492   if (UserVF) {
7493     DEBUG(dbgs() << "LV: Using user VF " << UserVF << ".\n");
7494     assert(isPowerOf2_32(UserVF) && "VF needs to be a power of two");
7495     // Collect the instructions (and their associated costs) that will be more
7496     // profitable to scalarize.
7497     CM.selectUserVectorizationFactor(UserVF);
7498     buildVPlans(UserVF, UserVF);
7499     DEBUG(printPlans(dbgs()));
7500     return {UserVF, 0};
7501   }
7502 
7503   unsigned MaxVF = MaybeMaxVF.getValue();
7504   assert(MaxVF != 0 && "MaxVF is zero.");
7505 
7506   for (unsigned VF = 1; VF <= MaxVF; VF *= 2) {
7507     // Collect Uniform and Scalar instructions after vectorization with VF.
7508     CM.collectUniformsAndScalars(VF);
7509 
7510     // Collect the instructions (and their associated costs) that will be more
7511     // profitable to scalarize.
7512     if (VF > 1)
7513       CM.collectInstsToScalarize(VF);
7514   }
7515 
7516   buildVPlans(1, MaxVF);
7517   DEBUG(printPlans(dbgs()));
7518   if (MaxVF == 1)
7519     return NoVectorization;
7520 
7521   // Select the optimal vectorization factor.
7522   return CM.selectVectorizationFactor(MaxVF);
7523 }
7524 
7525 void LoopVectorizationPlanner::setBestPlan(unsigned VF, unsigned UF) {
7526   DEBUG(dbgs() << "Setting best plan to VF=" << VF << ", UF=" << UF << '\n');
7527   BestVF = VF;
7528   BestUF = UF;
7529 
7530   for (auto *VPlanIter = VPlans.begin(); VPlanIter != VPlans.end();) {
7531     VPlan *Plan = *VPlanIter;
7532     if (Plan->hasVF(VF))
7533       ++VPlanIter;
7534     else {
7535       VPlanIter = VPlans.erase(VPlanIter);
7536       delete Plan;
7537     }
7538   }
7539   assert(VPlans.size() == 1 && "Best VF has not a single VPlan.");
7540 }
7541 
7542 void LoopVectorizationPlanner::executePlan(InnerLoopVectorizer &ILV,
7543                                            DominatorTree *DT) {
7544   // Perform the actual loop transformation.
7545 
7546   // 1. Create a new empty loop. Unlink the old loop and connect the new one.
7547   VPTransformState State{
7548       BestVF, BestUF, LI, DT, ILV.Builder, ILV.VectorLoopValueMap, &ILV};
7549   State.CFG.PrevBB = ILV.createVectorizedLoopSkeleton();
7550 
7551   //===------------------------------------------------===//
7552   //
7553   // Notice: any optimization or new instruction that go
7554   // into the code below should also be implemented in
7555   // the cost-model.
7556   //
7557   //===------------------------------------------------===//
7558 
7559   // 2. Copy and widen instructions from the old loop into the new loop.
7560   assert(VPlans.size() == 1 && "Not a single VPlan to execute.");
7561   VPlan *Plan = *VPlans.begin();
7562   Plan->execute(&State);
7563 
7564   // 3. Fix the vectorized code: take care of header phi's, live-outs,
7565   //    predication, updating analyses.
7566   ILV.fixVectorizedLoop();
7567 }
7568 
7569 void LoopVectorizationPlanner::collectTriviallyDeadInstructions(
7570     SmallPtrSetImpl<Instruction *> &DeadInstructions) {
7571   BasicBlock *Latch = OrigLoop->getLoopLatch();
7572 
7573   // We create new control-flow for the vectorized loop, so the original
7574   // condition will be dead after vectorization if it's only used by the
7575   // branch.
7576   auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0));
7577   if (Cmp && Cmp->hasOneUse())
7578     DeadInstructions.insert(Cmp);
7579 
7580   // We create new "steps" for induction variable updates to which the original
7581   // induction variables map. An original update instruction will be dead if
7582   // all its users except the induction variable are dead.
7583   for (auto &Induction : *Legal->getInductionVars()) {
7584     PHINode *Ind = Induction.first;
7585     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
7586     if (all_of(IndUpdate->users(), [&](User *U) -> bool {
7587           return U == Ind || DeadInstructions.count(cast<Instruction>(U));
7588         }))
7589       DeadInstructions.insert(IndUpdate);
7590   }
7591 }
7592 Value *InnerLoopUnroller::reverseVector(Value *Vec) { return Vec; }
7593 
7594 Value *InnerLoopUnroller::getBroadcastInstrs(Value *V) { return V; }
7595 
7596 Value *InnerLoopUnroller::getStepVector(Value *Val, int StartIdx, Value *Step,
7597                                         Instruction::BinaryOps BinOp) {
7598   // When unrolling and the VF is 1, we only need to add a simple scalar.
7599   Type *Ty = Val->getType();
7600   assert(!Ty->isVectorTy() && "Val must be a scalar");
7601 
7602   if (Ty->isFloatingPointTy()) {
7603     Constant *C = ConstantFP::get(Ty, (double)StartIdx);
7604 
7605     // Floating point operations had to be 'fast' to enable the unrolling.
7606     Value *MulOp = addFastMathFlag(Builder.CreateFMul(C, Step));
7607     return addFastMathFlag(Builder.CreateBinOp(BinOp, Val, MulOp));
7608   }
7609   Constant *C = ConstantInt::get(Ty, StartIdx);
7610   return Builder.CreateAdd(Val, Builder.CreateMul(C, Step), "induction");
7611 }
7612 
7613 static void AddRuntimeUnrollDisableMetaData(Loop *L) {
7614   SmallVector<Metadata *, 4> MDs;
7615   // Reserve first location for self reference to the LoopID metadata node.
7616   MDs.push_back(nullptr);
7617   bool IsUnrollMetadata = false;
7618   MDNode *LoopID = L->getLoopID();
7619   if (LoopID) {
7620     // First find existing loop unrolling disable metadata.
7621     for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) {
7622       auto *MD = dyn_cast<MDNode>(LoopID->getOperand(i));
7623       if (MD) {
7624         const auto *S = dyn_cast<MDString>(MD->getOperand(0));
7625         IsUnrollMetadata =
7626             S && S->getString().startswith("llvm.loop.unroll.disable");
7627       }
7628       MDs.push_back(LoopID->getOperand(i));
7629     }
7630   }
7631 
7632   if (!IsUnrollMetadata) {
7633     // Add runtime unroll disable metadata.
7634     LLVMContext &Context = L->getHeader()->getContext();
7635     SmallVector<Metadata *, 1> DisableOperands;
7636     DisableOperands.push_back(
7637         MDString::get(Context, "llvm.loop.unroll.runtime.disable"));
7638     MDNode *DisableNode = MDNode::get(Context, DisableOperands);
7639     MDs.push_back(DisableNode);
7640     MDNode *NewLoopID = MDNode::get(Context, MDs);
7641     // Set operand 0 to refer to the loop id itself.
7642     NewLoopID->replaceOperandWith(0, NewLoopID);
7643     L->setLoopID(NewLoopID);
7644   }
7645 }
7646 
7647 namespace {
7648 /// VPWidenRecipe is a recipe for producing a copy of vector type for each
7649 /// Instruction in its ingredients independently, in order. This recipe covers
7650 /// most of the traditional vectorization cases where each ingredient transforms
7651 /// into a vectorized version of itself.
7652 class VPWidenRecipe : public VPRecipeBase {
7653 private:
7654   /// Hold the ingredients by pointing to their original BasicBlock location.
7655   BasicBlock::iterator Begin;
7656   BasicBlock::iterator End;
7657 
7658 public:
7659   VPWidenRecipe(Instruction *I) : VPRecipeBase(VPWidenSC) {
7660     End = I->getIterator();
7661     Begin = End++;
7662   }
7663 
7664   ~VPWidenRecipe() {}
7665 
7666   /// Method to support type inquiry through isa, cast, and dyn_cast.
7667   static inline bool classof(const VPRecipeBase *V) {
7668     return V->getVPRecipeID() == VPRecipeBase::VPWidenSC;
7669   }
7670 
7671   /// Produce widened copies of all Ingredients.
7672   void execute(VPTransformState &State) override {
7673     for (auto &Instr : make_range(Begin, End))
7674       State.ILV->widenInstruction(Instr);
7675   }
7676 
7677   /// Augment the recipe to include Instr, if it lies at its End.
7678   bool appendInstruction(Instruction *Instr) {
7679     if (End != Instr->getIterator())
7680       return false;
7681     End++;
7682     return true;
7683   }
7684 
7685   /// Print the recipe.
7686   void print(raw_ostream &O, const Twine &Indent) const override {
7687     O << " +\n" << Indent << "\"WIDEN\\l\"";
7688     for (auto &Instr : make_range(Begin, End))
7689       O << " +\n" << Indent << "\"  " << VPlanIngredient(&Instr) << "\\l\"";
7690   }
7691 };
7692 
7693 /// A recipe for handling phi nodes of integer and floating-point inductions,
7694 /// producing their vector and scalar values.
7695 class VPWidenIntOrFpInductionRecipe : public VPRecipeBase {
7696 private:
7697   PHINode *IV;
7698   TruncInst *Trunc;
7699 
7700 public:
7701   VPWidenIntOrFpInductionRecipe(PHINode *IV, TruncInst *Trunc = nullptr)
7702       : VPRecipeBase(VPWidenIntOrFpInductionSC), IV(IV), Trunc(Trunc) {}
7703 
7704   ~VPWidenIntOrFpInductionRecipe() {}
7705 
7706   /// Method to support type inquiry through isa, cast, and dyn_cast.
7707   static inline bool classof(const VPRecipeBase *V) {
7708     return V->getVPRecipeID() == VPRecipeBase::VPWidenIntOrFpInductionSC;
7709   }
7710 
7711   /// Generate the vectorized and scalarized versions of the phi node as
7712   /// needed by their users.
7713   void execute(VPTransformState &State) override {
7714     assert(!State.Instance && "Int or FP induction being replicated.");
7715     State.ILV->widenIntOrFpInduction(IV, Trunc);
7716   }
7717 
7718   /// Print the recipe.
7719   void print(raw_ostream &O, const Twine &Indent) const override {
7720     O << " +\n" << Indent << "\"WIDEN-INDUCTION";
7721     if (Trunc) {
7722       O << "\\l\"";
7723       O << " +\n" << Indent << "\"  " << VPlanIngredient(IV) << "\\l\"";
7724       O << " +\n" << Indent << "\"  " << VPlanIngredient(Trunc) << "\\l\"";
7725     } else
7726       O << " " << VPlanIngredient(IV) << "\\l\"";
7727   }
7728 };
7729 
7730 /// A recipe for handling all phi nodes except for integer and FP inductions.
7731 class VPWidenPHIRecipe : public VPRecipeBase {
7732 private:
7733   PHINode *Phi;
7734 
7735 public:
7736   VPWidenPHIRecipe(PHINode *Phi) : VPRecipeBase(VPWidenPHISC), Phi(Phi) {}
7737 
7738   ~VPWidenPHIRecipe() {}
7739 
7740   /// Method to support type inquiry through isa, cast, and dyn_cast.
7741   static inline bool classof(const VPRecipeBase *V) {
7742     return V->getVPRecipeID() == VPRecipeBase::VPWidenPHISC;
7743   }
7744 
7745   /// Generate the phi/select nodes.
7746   void execute(VPTransformState &State) override {
7747     State.ILV->widenPHIInstruction(Phi, State.UF, State.VF);
7748   }
7749 
7750   /// Print the recipe.
7751   void print(raw_ostream &O, const Twine &Indent) const override {
7752     O << " +\n" << Indent << "\"WIDEN-PHI " << VPlanIngredient(Phi) << "\\l\"";
7753   }
7754 };
7755 
7756 /// VPInterleaveRecipe is a recipe for transforming an interleave group of load
7757 /// or stores into one wide load/store and shuffles.
7758 class VPInterleaveRecipe : public VPRecipeBase {
7759 private:
7760   const InterleaveGroup *IG;
7761 
7762 public:
7763   VPInterleaveRecipe(const InterleaveGroup *IG)
7764       : VPRecipeBase(VPInterleaveSC), IG(IG) {}
7765 
7766   ~VPInterleaveRecipe() {}
7767 
7768   /// Method to support type inquiry through isa, cast, and dyn_cast.
7769   static inline bool classof(const VPRecipeBase *V) {
7770     return V->getVPRecipeID() == VPRecipeBase::VPInterleaveSC;
7771   }
7772 
7773   /// Generate the wide load or store, and shuffles.
7774   void execute(VPTransformState &State) override {
7775     assert(!State.Instance && "Interleave group being replicated.");
7776     State.ILV->vectorizeInterleaveGroup(IG->getInsertPos());
7777   }
7778 
7779   /// Print the recipe.
7780   void print(raw_ostream &O, const Twine &Indent) const override;
7781 
7782   const InterleaveGroup *getInterleaveGroup() { return IG; }
7783 };
7784 
7785 /// VPReplicateRecipe replicates a given instruction producing multiple scalar
7786 /// copies of the original scalar type, one per lane, instead of producing a
7787 /// single copy of widened type for all lanes. If the instruction is known to be
7788 /// uniform only one copy, per lane zero, will be generated.
7789 class VPReplicateRecipe : public VPRecipeBase {
7790 private:
7791   /// The instruction being replicated.
7792   Instruction *Ingredient;
7793 
7794   /// Indicator if only a single replica per lane is needed.
7795   bool IsUniform;
7796 
7797   /// Indicator if the replicas are also predicated.
7798   bool IsPredicated;
7799 
7800   /// Indicator if the scalar values should also be packed into a vector.
7801   bool AlsoPack;
7802 
7803 public:
7804   VPReplicateRecipe(Instruction *I, bool IsUniform, bool IsPredicated = false)
7805       : VPRecipeBase(VPReplicateSC), Ingredient(I), IsUniform(IsUniform),
7806         IsPredicated(IsPredicated) {
7807     // Retain the previous behavior of predicateInstructions(), where an
7808     // insert-element of a predicated instruction got hoisted into the
7809     // predicated basic block iff it was its only user. This is achieved by
7810     // having predicated instructions also pack their values into a vector by
7811     // default unless they have a replicated user which uses their scalar value.
7812     AlsoPack = IsPredicated && !I->use_empty();
7813   }
7814 
7815   ~VPReplicateRecipe() {}
7816 
7817   /// Method to support type inquiry through isa, cast, and dyn_cast.
7818   static inline bool classof(const VPRecipeBase *V) {
7819     return V->getVPRecipeID() == VPRecipeBase::VPReplicateSC;
7820   }
7821 
7822   /// Generate replicas of the desired Ingredient. Replicas will be generated
7823   /// for all parts and lanes unless a specific part and lane are specified in
7824   /// the \p State.
7825   void execute(VPTransformState &State) override;
7826 
7827   void setAlsoPack(bool Pack) { AlsoPack = Pack; }
7828 
7829   /// Print the recipe.
7830   void print(raw_ostream &O, const Twine &Indent) const override {
7831     O << " +\n"
7832       << Indent << "\"" << (IsUniform ? "CLONE " : "REPLICATE ")
7833       << VPlanIngredient(Ingredient);
7834     if (AlsoPack)
7835       O << " (S->V)";
7836     O << "\\l\"";
7837   }
7838 };
7839 
7840 /// A recipe for generating conditional branches on the bits of a mask.
7841 class VPBranchOnMaskRecipe : public VPRecipeBase {
7842 private:
7843   /// The input IR basic block used to obtain the mask providing the condition
7844   /// bits for the branch.
7845   BasicBlock *MaskedBasicBlock;
7846 
7847 public:
7848   VPBranchOnMaskRecipe(BasicBlock *BB)
7849       : VPRecipeBase(VPBranchOnMaskSC), MaskedBasicBlock(BB) {}
7850 
7851   /// Method to support type inquiry through isa, cast, and dyn_cast.
7852   static inline bool classof(const VPRecipeBase *V) {
7853     return V->getVPRecipeID() == VPRecipeBase::VPBranchOnMaskSC;
7854   }
7855 
7856   /// Generate the extraction of the appropriate bit from the block mask and the
7857   /// conditional branch.
7858   void execute(VPTransformState &State) override;
7859 
7860   /// Print the recipe.
7861   void print(raw_ostream &O, const Twine &Indent) const override {
7862     O << " +\n"
7863       << Indent << "\"BRANCH-ON-MASK-OF " << MaskedBasicBlock->getName()
7864       << "\\l\"";
7865   }
7866 };
7867 
7868 /// VPPredInstPHIRecipe is a recipe for generating the phi nodes needed when
7869 /// control converges back from a Branch-on-Mask. The phi nodes are needed in
7870 /// order to merge values that are set under such a branch and feed their uses.
7871 /// The phi nodes can be scalar or vector depending on the users of the value.
7872 /// This recipe works in concert with VPBranchOnMaskRecipe.
7873 class VPPredInstPHIRecipe : public VPRecipeBase {
7874 private:
7875   Instruction *PredInst;
7876 
7877 public:
7878   /// Construct a VPPredInstPHIRecipe given \p PredInst whose value needs a phi
7879   /// nodes after merging back from a Branch-on-Mask.
7880   VPPredInstPHIRecipe(Instruction *PredInst)
7881       : VPRecipeBase(VPPredInstPHISC), PredInst(PredInst) {}
7882 
7883   ~VPPredInstPHIRecipe() {}
7884 
7885   /// Method to support type inquiry through isa, cast, and dyn_cast.
7886   static inline bool classof(const VPRecipeBase *V) {
7887     return V->getVPRecipeID() == VPRecipeBase::VPPredInstPHISC;
7888   }
7889 
7890   /// Generates phi nodes for live-outs as needed to retain SSA form.
7891   void execute(VPTransformState &State) override;
7892 
7893   /// Print the recipe.
7894   void print(raw_ostream &O, const Twine &Indent) const override {
7895     O << " +\n"
7896       << Indent << "\"PHI-PREDICATED-INSTRUCTION " << VPlanIngredient(PredInst)
7897       << "\\l\"";
7898   }
7899 };
7900 } // end anonymous namespace
7901 
7902 bool LoopVectorizationPlanner::getDecisionAndClampRange(
7903     const std::function<bool(unsigned)> &Predicate, VFRange &Range) {
7904   assert(Range.End > Range.Start && "Trying to test an empty VF range.");
7905   bool PredicateAtRangeStart = Predicate(Range.Start);
7906 
7907   for (unsigned TmpVF = Range.Start * 2; TmpVF < Range.End; TmpVF *= 2)
7908     if (Predicate(TmpVF) != PredicateAtRangeStart) {
7909       Range.End = TmpVF;
7910       break;
7911     }
7912 
7913   return PredicateAtRangeStart;
7914 }
7915 
7916 /// Build VPlans for the full range of feasible VF's = {\p MinVF, 2 * \p MinVF,
7917 /// 4 * \p MinVF, ..., \p MaxVF} by repeatedly building a VPlan for a sub-range
7918 /// of VF's starting at a given VF and extending it as much as possible. Each
7919 /// vectorization decision can potentially shorten this sub-range during
7920 /// buildVPlan().
7921 void LoopVectorizationPlanner::buildVPlans(unsigned MinVF, unsigned MaxVF) {
7922   for (unsigned VF = MinVF; VF < MaxVF + 1;) {
7923     VFRange SubRange = {VF, MaxVF + 1};
7924     VPlan *Plan = buildVPlan(SubRange);
7925     VPlans.push_back(Plan);
7926     VF = SubRange.End;
7927   }
7928 }
7929 
7930 VPInterleaveRecipe *
7931 LoopVectorizationPlanner::tryToInterleaveMemory(Instruction *I,
7932                                                 VFRange &Range) {
7933   const InterleaveGroup *IG = Legal->getInterleavedAccessGroup(I);
7934   if (!IG)
7935     return nullptr;
7936 
7937   // Now check if IG is relevant for VF's in the given range.
7938   auto isIGMember = [&](Instruction *I) -> std::function<bool(unsigned)> {
7939     return [=](unsigned VF) -> bool {
7940       return (VF >= 2 && // Query is illegal for VF == 1
7941               CM.getWideningDecision(I, VF) ==
7942                   LoopVectorizationCostModel::CM_Interleave);
7943     };
7944   };
7945   if (!getDecisionAndClampRange(isIGMember(I), Range))
7946     return nullptr;
7947 
7948   // I is a member of an InterleaveGroup for VF's in the (possibly trimmed)
7949   // range. If it's the primary member of the IG construct a VPInterleaveRecipe.
7950   // Otherwise, it's an adjunct member of the IG, do not construct any Recipe.
7951   assert(I == IG->getInsertPos() &&
7952          "Generating a recipe for an adjunct member of an interleave group");
7953 
7954   return new VPInterleaveRecipe(IG);
7955 }
7956 
7957 VPWidenIntOrFpInductionRecipe *
7958 LoopVectorizationPlanner::tryToOptimizeInduction(Instruction *I,
7959                                                  VFRange &Range) {
7960   if (PHINode *Phi = dyn_cast<PHINode>(I)) {
7961     // Check if this is an integer or fp induction. If so, build the recipe that
7962     // produces its scalar and vector values.
7963     InductionDescriptor II = Legal->getInductionVars()->lookup(Phi);
7964     if (II.getKind() == InductionDescriptor::IK_IntInduction ||
7965         II.getKind() == InductionDescriptor::IK_FpInduction)
7966       return new VPWidenIntOrFpInductionRecipe(Phi);
7967 
7968     return nullptr;
7969   }
7970 
7971   // Optimize the special case where the source is a constant integer
7972   // induction variable. Notice that we can only optimize the 'trunc' case
7973   // because (a) FP conversions lose precision, (b) sext/zext may wrap, and
7974   // (c) other casts depend on pointer size.
7975 
7976   // Determine whether \p K is a truncation based on an induction variable that
7977   // can be optimized.
7978   auto isOptimizableIVTruncate =
7979       [&](Instruction *K) -> std::function<bool(unsigned)> {
7980     return
7981         [=](unsigned VF) -> bool { return CM.isOptimizableIVTruncate(K, VF); };
7982   };
7983 
7984   if (isa<TruncInst>(I) &&
7985       getDecisionAndClampRange(isOptimizableIVTruncate(I), Range))
7986     return new VPWidenIntOrFpInductionRecipe(cast<PHINode>(I->getOperand(0)),
7987                                              cast<TruncInst>(I));
7988   return nullptr;
7989 }
7990 
7991 VPWidenRecipe *LoopVectorizationPlanner::tryToWiden(
7992     Instruction *I, VPWidenRecipe *LastWidenRecipe, VFRange &Range) {
7993 
7994   if (Legal->isScalarWithPredication(I))
7995     return nullptr;
7996 
7997   auto IsVectorizableOpcode = [](unsigned Opcode) {
7998     switch (Opcode) {
7999     case Instruction::Add:
8000     case Instruction::And:
8001     case Instruction::AShr:
8002     case Instruction::BitCast:
8003     case Instruction::Br:
8004     case Instruction::Call:
8005     case Instruction::FAdd:
8006     case Instruction::FCmp:
8007     case Instruction::FDiv:
8008     case Instruction::FMul:
8009     case Instruction::FPExt:
8010     case Instruction::FPToSI:
8011     case Instruction::FPToUI:
8012     case Instruction::FPTrunc:
8013     case Instruction::FRem:
8014     case Instruction::FSub:
8015     case Instruction::GetElementPtr:
8016     case Instruction::ICmp:
8017     case Instruction::IntToPtr:
8018     case Instruction::Load:
8019     case Instruction::LShr:
8020     case Instruction::Mul:
8021     case Instruction::Or:
8022     case Instruction::PHI:
8023     case Instruction::PtrToInt:
8024     case Instruction::SDiv:
8025     case Instruction::Select:
8026     case Instruction::SExt:
8027     case Instruction::Shl:
8028     case Instruction::SIToFP:
8029     case Instruction::SRem:
8030     case Instruction::Store:
8031     case Instruction::Sub:
8032     case Instruction::Trunc:
8033     case Instruction::UDiv:
8034     case Instruction::UIToFP:
8035     case Instruction::URem:
8036     case Instruction::Xor:
8037     case Instruction::ZExt:
8038       return true;
8039     }
8040     return false;
8041   };
8042 
8043   if (!IsVectorizableOpcode(I->getOpcode()))
8044     return nullptr;
8045 
8046   if (CallInst *CI = dyn_cast<CallInst>(I)) {
8047     Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
8048     if (ID && (ID == Intrinsic::assume || ID == Intrinsic::lifetime_end ||
8049                ID == Intrinsic::lifetime_start))
8050       return nullptr;
8051   }
8052 
8053   auto willWiden = [&](unsigned VF) -> bool {
8054     if (!isa<PHINode>(I) && (CM.isScalarAfterVectorization(I, VF) ||
8055                              CM.isProfitableToScalarize(I, VF)))
8056       return false;
8057     if (CallInst *CI = dyn_cast<CallInst>(I)) {
8058       Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
8059       // The following case may be scalarized depending on the VF.
8060       // The flag shows whether we use Intrinsic or a usual Call for vectorized
8061       // version of the instruction.
8062       // Is it beneficial to perform intrinsic call compared to lib call?
8063       bool NeedToScalarize;
8064       unsigned CallCost = getVectorCallCost(CI, VF, *TTI, TLI, NeedToScalarize);
8065       bool UseVectorIntrinsic =
8066           ID && getVectorIntrinsicCost(CI, VF, *TTI, TLI) <= CallCost;
8067       return UseVectorIntrinsic || !NeedToScalarize;
8068     }
8069     if (isa<LoadInst>(I) || isa<StoreInst>(I)) {
8070       LoopVectorizationCostModel::InstWidening Decision =
8071           CM.getWideningDecision(I, VF);
8072       assert(Decision != LoopVectorizationCostModel::CM_Unknown &&
8073              "CM decision should be taken at this point.");
8074       assert(Decision != LoopVectorizationCostModel::CM_Interleave &&
8075              "Interleave memory opportunity should be caught earlier.");
8076       return Decision != LoopVectorizationCostModel::CM_Scalarize;
8077     }
8078     return true;
8079   };
8080 
8081   if (!getDecisionAndClampRange(willWiden, Range))
8082     return nullptr;
8083 
8084   // Success: widen this instruction. We optimize the common case where
8085   // consecutive instructions can be represented by a single recipe.
8086   if (LastWidenRecipe && LastWidenRecipe->appendInstruction(I))
8087     return LastWidenRecipe;
8088   return new VPWidenRecipe(I);
8089 }
8090 
8091 VPBasicBlock *LoopVectorizationPlanner::handleReplication(
8092     Instruction *I, VFRange &Range, VPBasicBlock *VPBB,
8093     DenseMap<Instruction *, VPReplicateRecipe *> &PredInst2Recipe) {
8094 
8095   bool IsUniform = getDecisionAndClampRange(
8096       [&](unsigned VF) { return CM.isUniformAfterVectorization(I, VF); },
8097       Range);
8098 
8099   bool IsPredicated = Legal->isScalarWithPredication(I);
8100   auto *Recipe = new VPReplicateRecipe(I, IsUniform, IsPredicated);
8101 
8102   // Find if I uses a predicated instruction. If so, it will use its scalar
8103   // value. Avoid hoisting the insert-element which packs the scalar value into
8104   // a vector value, as that happens iff all users use the vector value.
8105   for (auto &Op : I->operands())
8106     if (auto *PredInst = dyn_cast<Instruction>(Op))
8107       if (PredInst2Recipe.find(PredInst) != PredInst2Recipe.end())
8108         PredInst2Recipe[PredInst]->setAlsoPack(false);
8109 
8110   // Finalize the recipe for Instr, first if it is not predicated.
8111   if (!IsPredicated) {
8112     DEBUG(dbgs() << "LV: Scalarizing:" << *I << "\n");
8113     VPBB->appendRecipe(Recipe);
8114     return VPBB;
8115   }
8116   DEBUG(dbgs() << "LV: Scalarizing and predicating:" << *I << "\n");
8117   assert(VPBB->getSuccessors().empty() &&
8118          "VPBB has successors when handling predicated replication.");
8119   // Record predicated instructions for above packing optimizations.
8120   PredInst2Recipe[I] = Recipe;
8121   VPBlockBase *Region = VPBB->setOneSuccessor(createReplicateRegion(I, Recipe));
8122   return cast<VPBasicBlock>(Region->setOneSuccessor(new VPBasicBlock()));
8123 }
8124 
8125 VPRegionBlock *
8126 LoopVectorizationPlanner::createReplicateRegion(Instruction *Instr,
8127                                                 VPRecipeBase *PredRecipe) {
8128   // Instructions marked for predication are replicated and placed under an
8129   // if-then construct to prevent side-effects.
8130 
8131   // Build the triangular if-then region.
8132   std::string RegionName = (Twine("pred.") + Instr->getOpcodeName()).str();
8133   assert(Instr->getParent() && "Predicated instruction not in any basic block");
8134   auto *BOMRecipe = new VPBranchOnMaskRecipe(Instr->getParent());
8135   auto *Entry = new VPBasicBlock(Twine(RegionName) + ".entry", BOMRecipe);
8136   auto *PHIRecipe =
8137       Instr->getType()->isVoidTy() ? nullptr : new VPPredInstPHIRecipe(Instr);
8138   auto *Exit = new VPBasicBlock(Twine(RegionName) + ".continue", PHIRecipe);
8139   auto *Pred = new VPBasicBlock(Twine(RegionName) + ".if", PredRecipe);
8140   VPRegionBlock *Region = new VPRegionBlock(Entry, Exit, RegionName, true);
8141 
8142   // Note: first set Entry as region entry and then connect successors starting
8143   // from it in order, to propagate the "parent" of each VPBasicBlock.
8144   Entry->setTwoSuccessors(Pred, Exit);
8145   Pred->setOneSuccessor(Exit);
8146 
8147   return Region;
8148 }
8149 
8150 VPlan *LoopVectorizationPlanner::buildVPlan(VFRange &Range) {
8151 
8152   DenseMap<Instruction *, Instruction *> &SinkAfter = Legal->getSinkAfter();
8153   DenseMap<Instruction *, Instruction *> SinkAfterInverse;
8154 
8155   // Collect instructions from the original loop that will become trivially dead
8156   // in the vectorized loop. We don't need to vectorize these instructions. For
8157   // example, original induction update instructions can become dead because we
8158   // separately emit induction "steps" when generating code for the new loop.
8159   // Similarly, we create a new latch condition when setting up the structure
8160   // of the new loop, so the old one can become dead.
8161   SmallPtrSet<Instruction *, 4> DeadInstructions;
8162   collectTriviallyDeadInstructions(DeadInstructions);
8163 
8164   // Hold a mapping from predicated instructions to their recipes, in order to
8165   // fix their AlsoPack behavior if a user is determined to replicate and use a
8166   // scalar instead of vector value.
8167   DenseMap<Instruction *, VPReplicateRecipe *> PredInst2Recipe;
8168 
8169   // Create a dummy pre-entry VPBasicBlock to start building the VPlan.
8170   VPBasicBlock *VPBB = new VPBasicBlock("Pre-Entry");
8171   VPlan *Plan = new VPlan(VPBB);
8172 
8173   // Scan the body of the loop in a topological order to visit each basic block
8174   // after having visited its predecessor basic blocks.
8175   LoopBlocksDFS DFS(OrigLoop);
8176   DFS.perform(LI);
8177 
8178   for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) {
8179     // Relevant instructions from basic block BB will be grouped into VPRecipe
8180     // ingredients and fill a new VPBasicBlock.
8181     unsigned VPBBsForBB = 0;
8182     auto *FirstVPBBForBB = new VPBasicBlock(BB->getName());
8183     VPBB->setOneSuccessor(FirstVPBBForBB);
8184     VPBB = FirstVPBBForBB;
8185     VPWidenRecipe *LastWidenRecipe = nullptr;
8186 
8187     std::vector<Instruction *> Ingredients;
8188 
8189     // Organize the ingredients to vectorize from current basic block in the
8190     // right order.
8191     for (Instruction &I : *BB) {
8192       Instruction *Instr = &I;
8193 
8194       // First filter out irrelevant instructions, to ensure no recipes are
8195       // built for them.
8196       if (isa<BranchInst>(Instr) || isa<DbgInfoIntrinsic>(Instr) ||
8197           DeadInstructions.count(Instr))
8198         continue;
8199 
8200       // I is a member of an InterleaveGroup for Range.Start. If it's an adjunct
8201       // member of the IG, do not construct any Recipe for it.
8202       const InterleaveGroup *IG = Legal->getInterleavedAccessGroup(Instr);
8203       if (IG && Instr != IG->getInsertPos() &&
8204           Range.Start >= 2 && // Query is illegal for VF == 1
8205           CM.getWideningDecision(Instr, Range.Start) ==
8206               LoopVectorizationCostModel::CM_Interleave)
8207         continue;
8208 
8209       // Move instructions to handle first-order recurrences, step 1: avoid
8210       // handling this instruction until after we've handled the instruction it
8211       // should follow.
8212       auto SAIt = SinkAfter.find(Instr);
8213       if (SAIt != SinkAfter.end()) {
8214         DEBUG(dbgs() << "Sinking" << *SAIt->first << " after" << *SAIt->second
8215                      << " to vectorize a 1st order recurrence.\n");
8216         SinkAfterInverse[SAIt->second] = Instr;
8217         continue;
8218       }
8219 
8220       Ingredients.push_back(Instr);
8221 
8222       // Move instructions to handle first-order recurrences, step 2: push the
8223       // instruction to be sunk at its insertion point.
8224       auto SAInvIt = SinkAfterInverse.find(Instr);
8225       if (SAInvIt != SinkAfterInverse.end())
8226         Ingredients.push_back(SAInvIt->second);
8227     }
8228 
8229     // Introduce each ingredient into VPlan.
8230     for (Instruction *Instr : Ingredients) {
8231       VPRecipeBase *Recipe = nullptr;
8232 
8233       // Check if Instr should belong to an interleave memory recipe, or already
8234       // does. In the latter case Instr is irrelevant.
8235       if ((Recipe = tryToInterleaveMemory(Instr, Range))) {
8236         VPBB->appendRecipe(Recipe);
8237         continue;
8238       }
8239 
8240       // Check if Instr should form some PHI recipe.
8241       if ((Recipe = tryToOptimizeInduction(Instr, Range))) {
8242         VPBB->appendRecipe(Recipe);
8243         continue;
8244       }
8245       if (PHINode *Phi = dyn_cast<PHINode>(Instr)) {
8246         VPBB->appendRecipe(new VPWidenPHIRecipe(Phi));
8247         continue;
8248       }
8249 
8250       // Check if Instr is to be widened by a general VPWidenRecipe, after
8251       // having first checked for specific widening recipes that deal with
8252       // Interleave Groups, Inductions and Phi nodes.
8253       if ((Recipe = tryToWiden(Instr, LastWidenRecipe, Range))) {
8254         if (Recipe != LastWidenRecipe)
8255           VPBB->appendRecipe(Recipe);
8256         LastWidenRecipe = cast<VPWidenRecipe>(Recipe);
8257         continue;
8258       }
8259 
8260       // Otherwise, if all widening options failed, Instruction is to be
8261       // replicated. This may create a successor for VPBB.
8262       VPBasicBlock *NextVPBB =
8263           handleReplication(Instr, Range, VPBB, PredInst2Recipe);
8264       if (NextVPBB != VPBB) {
8265         VPBB = NextVPBB;
8266         VPBB->setName(BB->hasName() ? BB->getName() + "." + Twine(VPBBsForBB++)
8267                                     : "");
8268       }
8269     }
8270   }
8271 
8272   // Discard empty dummy pre-entry VPBasicBlock. Note that other VPBasicBlocks
8273   // may also be empty, such as the last one VPBB, reflecting original
8274   // basic-blocks with no recipes.
8275   VPBasicBlock *PreEntry = cast<VPBasicBlock>(Plan->getEntry());
8276   assert(PreEntry->empty() && "Expecting empty pre-entry block.");
8277   VPBlockBase *Entry = Plan->setEntry(PreEntry->getSingleSuccessor());
8278   PreEntry->disconnectSuccessor(Entry);
8279   delete PreEntry;
8280 
8281   std::string PlanName;
8282   raw_string_ostream RSO(PlanName);
8283   unsigned VF = Range.Start;
8284   Plan->addVF(VF);
8285   RSO << "Initial VPlan for VF={" << VF;
8286   for (VF *= 2; VF < Range.End; VF *= 2) {
8287     Plan->addVF(VF);
8288     RSO << "," << VF;
8289   }
8290   RSO << "},UF>=1";
8291   RSO.flush();
8292   Plan->setName(PlanName);
8293 
8294   return Plan;
8295 }
8296 
8297 void VPInterleaveRecipe::print(raw_ostream &O, const Twine &Indent) const {
8298   O << " +\n"
8299     << Indent << "\"INTERLEAVE-GROUP with factor " << IG->getFactor() << " at ";
8300   IG->getInsertPos()->printAsOperand(O, false);
8301   O << "\\l\"";
8302   for (unsigned i = 0; i < IG->getFactor(); ++i)
8303     if (Instruction *I = IG->getMember(i))
8304       O << " +\n"
8305         << Indent << "\"  " << VPlanIngredient(I) << " " << i << "\\l\"";
8306 }
8307 
8308 void VPReplicateRecipe::execute(VPTransformState &State) {
8309 
8310   if (State.Instance) { // Generate a single instance.
8311     State.ILV->scalarizeInstruction(Ingredient, *State.Instance, IsPredicated);
8312     // Insert scalar instance packing it into a vector.
8313     if (AlsoPack && State.VF > 1) {
8314       // If we're constructing lane 0, initialize to start from undef.
8315       if (State.Instance->Lane == 0) {
8316         Value *Undef =
8317             UndefValue::get(VectorType::get(Ingredient->getType(), State.VF));
8318         State.ValueMap.setVectorValue(Ingredient, State.Instance->Part, Undef);
8319       }
8320       State.ILV->packScalarIntoVectorValue(Ingredient, *State.Instance);
8321     }
8322     return;
8323   }
8324 
8325   // Generate scalar instances for all VF lanes of all UF parts, unless the
8326   // instruction is uniform inwhich case generate only the first lane for each
8327   // of the UF parts.
8328   unsigned EndLane = IsUniform ? 1 : State.VF;
8329   for (unsigned Part = 0; Part < State.UF; ++Part)
8330     for (unsigned Lane = 0; Lane < EndLane; ++Lane)
8331       State.ILV->scalarizeInstruction(Ingredient, {Part, Lane}, IsPredicated);
8332 }
8333 
8334 void VPBranchOnMaskRecipe::execute(VPTransformState &State) {
8335   assert(State.Instance && "Branch on Mask works only on single instance.");
8336 
8337   unsigned Part = State.Instance->Part;
8338   unsigned Lane = State.Instance->Lane;
8339 
8340   auto Cond = State.ILV->createBlockInMask(MaskedBasicBlock);
8341 
8342   Value *ConditionBit = Cond[Part];
8343   if (!ConditionBit) // Block in mask is all-one.
8344     ConditionBit = State.Builder.getTrue();
8345   else if (ConditionBit->getType()->isVectorTy())
8346     ConditionBit = State.Builder.CreateExtractElement(
8347         ConditionBit, State.Builder.getInt32(Lane));
8348 
8349   // Replace the temporary unreachable terminator with a new conditional branch,
8350   // whose two destinations will be set later when they are created.
8351   auto *CurrentTerminator = State.CFG.PrevBB->getTerminator();
8352   assert(isa<UnreachableInst>(CurrentTerminator) &&
8353          "Expected to replace unreachable terminator with conditional branch.");
8354   auto *CondBr = BranchInst::Create(State.CFG.PrevBB, nullptr, ConditionBit);
8355   CondBr->setSuccessor(0, nullptr);
8356   ReplaceInstWithInst(CurrentTerminator, CondBr);
8357 
8358   DEBUG(dbgs() << "\nLV: vectorizing BranchOnMask recipe "
8359                << MaskedBasicBlock->getName());
8360 }
8361 
8362 void VPPredInstPHIRecipe::execute(VPTransformState &State) {
8363   assert(State.Instance && "Predicated instruction PHI works per instance.");
8364   Instruction *ScalarPredInst = cast<Instruction>(
8365       State.ValueMap.getScalarValue(PredInst, *State.Instance));
8366   BasicBlock *PredicatedBB = ScalarPredInst->getParent();
8367   BasicBlock *PredicatingBB = PredicatedBB->getSinglePredecessor();
8368   assert(PredicatingBB && "Predicated block has no single predecessor.");
8369 
8370   // By current pack/unpack logic we need to generate only a single phi node: if
8371   // a vector value for the predicated instruction exists at this point it means
8372   // the instruction has vector users only, and a phi for the vector value is
8373   // needed. In this case the recipe of the predicated instruction is marked to
8374   // also do that packing, thereby "hoisting" the insert-element sequence.
8375   // Otherwise, a phi node for the scalar value is needed.
8376   unsigned Part = State.Instance->Part;
8377   if (State.ValueMap.hasVectorValue(PredInst, Part)) {
8378     Value *VectorValue = State.ValueMap.getVectorValue(PredInst, Part);
8379     InsertElementInst *IEI = cast<InsertElementInst>(VectorValue);
8380     PHINode *VPhi = State.Builder.CreatePHI(IEI->getType(), 2);
8381     VPhi->addIncoming(IEI->getOperand(0), PredicatingBB); // Unmodified vector.
8382     VPhi->addIncoming(IEI, PredicatedBB); // New vector with inserted element.
8383     State.ValueMap.resetVectorValue(PredInst, Part, VPhi); // Update cache.
8384   } else {
8385     Type *PredInstType = PredInst->getType();
8386     PHINode *Phi = State.Builder.CreatePHI(PredInstType, 2);
8387     Phi->addIncoming(UndefValue::get(ScalarPredInst->getType()), PredicatingBB);
8388     Phi->addIncoming(ScalarPredInst, PredicatedBB);
8389     State.ValueMap.resetScalarValue(PredInst, *State.Instance, Phi);
8390   }
8391 }
8392 
8393 bool LoopVectorizePass::processLoop(Loop *L) {
8394   assert(L->empty() && "Only process inner loops.");
8395 
8396 #ifndef NDEBUG
8397   const std::string DebugLocStr = getDebugLocString(L);
8398 #endif /* NDEBUG */
8399 
8400   DEBUG(dbgs() << "\nLV: Checking a loop in \""
8401                << L->getHeader()->getParent()->getName() << "\" from "
8402                << DebugLocStr << "\n");
8403 
8404   LoopVectorizeHints Hints(L, DisableUnrolling, *ORE);
8405 
8406   DEBUG(dbgs() << "LV: Loop hints:"
8407                << " force="
8408                << (Hints.getForce() == LoopVectorizeHints::FK_Disabled
8409                        ? "disabled"
8410                        : (Hints.getForce() == LoopVectorizeHints::FK_Enabled
8411                               ? "enabled"
8412                               : "?"))
8413                << " width=" << Hints.getWidth()
8414                << " unroll=" << Hints.getInterleave() << "\n");
8415 
8416   // Function containing loop
8417   Function *F = L->getHeader()->getParent();
8418 
8419   // Looking at the diagnostic output is the only way to determine if a loop
8420   // was vectorized (other than looking at the IR or machine code), so it
8421   // is important to generate an optimization remark for each loop. Most of
8422   // these messages are generated as OptimizationRemarkAnalysis. Remarks
8423   // generated as OptimizationRemark and OptimizationRemarkMissed are
8424   // less verbose reporting vectorized loops and unvectorized loops that may
8425   // benefit from vectorization, respectively.
8426 
8427   if (!Hints.allowVectorization(F, L, AlwaysVectorize)) {
8428     DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n");
8429     return false;
8430   }
8431 
8432   PredicatedScalarEvolution PSE(*SE, *L);
8433 
8434   // Check if it is legal to vectorize the loop.
8435   LoopVectorizationRequirements Requirements(*ORE);
8436   LoopVectorizationLegality LVL(L, PSE, DT, TLI, AA, F, TTI, GetLAA, LI, ORE,
8437                                 &Requirements, &Hints);
8438   if (!LVL.canVectorize()) {
8439     DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n");
8440     emitMissedWarning(F, L, Hints, ORE);
8441     return false;
8442   }
8443 
8444   // Check the function attributes to find out if this function should be
8445   // optimized for size.
8446   bool OptForSize =
8447       Hints.getForce() != LoopVectorizeHints::FK_Enabled && F->optForSize();
8448 
8449   // Check the loop for a trip count threshold: vectorize loops with a tiny trip
8450   // count by optimizing for size, to minimize overheads.
8451   unsigned ExpectedTC = SE->getSmallConstantMaxTripCount(L);
8452   bool HasExpectedTC = (ExpectedTC > 0);
8453 
8454   if (!HasExpectedTC && LoopVectorizeWithBlockFrequency) {
8455     auto EstimatedTC = getLoopEstimatedTripCount(L);
8456     if (EstimatedTC) {
8457       ExpectedTC = *EstimatedTC;
8458       HasExpectedTC = true;
8459     }
8460   }
8461 
8462   if (HasExpectedTC && ExpectedTC < TinyTripCountVectorThreshold) {
8463     DEBUG(dbgs() << "LV: Found a loop with a very small trip count. "
8464                  << "This loop is worth vectorizing only if no scalar "
8465                  << "iteration overheads are incurred.");
8466     if (Hints.getForce() == LoopVectorizeHints::FK_Enabled)
8467       DEBUG(dbgs() << " But vectorizing was explicitly forced.\n");
8468     else {
8469       DEBUG(dbgs() << "\n");
8470       // Loops with a very small trip count are considered for vectorization
8471       // under OptForSize, thereby making sure the cost of their loop body is
8472       // dominant, free of runtime guards and scalar iteration overheads.
8473       OptForSize = true;
8474     }
8475   }
8476 
8477   // Check the function attributes to see if implicit floats are allowed.
8478   // FIXME: This check doesn't seem possibly correct -- what if the loop is
8479   // an integer loop and the vector instructions selected are purely integer
8480   // vector instructions?
8481   if (F->hasFnAttribute(Attribute::NoImplicitFloat)) {
8482     DEBUG(dbgs() << "LV: Can't vectorize when the NoImplicitFloat"
8483                     "attribute is used.\n");
8484     ORE->emit(createMissedAnalysis(Hints.vectorizeAnalysisPassName(),
8485                                    "NoImplicitFloat", L)
8486               << "loop not vectorized due to NoImplicitFloat attribute");
8487     emitMissedWarning(F, L, Hints, ORE);
8488     return false;
8489   }
8490 
8491   // Check if the target supports potentially unsafe FP vectorization.
8492   // FIXME: Add a check for the type of safety issue (denormal, signaling)
8493   // for the target we're vectorizing for, to make sure none of the
8494   // additional fp-math flags can help.
8495   if (Hints.isPotentiallyUnsafe() &&
8496       TTI->isFPVectorizationPotentiallyUnsafe()) {
8497     DEBUG(dbgs() << "LV: Potentially unsafe FP op prevents vectorization.\n");
8498     ORE->emit(
8499         createMissedAnalysis(Hints.vectorizeAnalysisPassName(), "UnsafeFP", L)
8500         << "loop not vectorized due to unsafe FP support.");
8501     emitMissedWarning(F, L, Hints, ORE);
8502     return false;
8503   }
8504 
8505   // Use the cost model.
8506   LoopVectorizationCostModel CM(L, PSE, LI, &LVL, *TTI, TLI, DB, AC, ORE, F,
8507                                 &Hints);
8508   CM.collectValuesToIgnore();
8509 
8510   // Use the planner for vectorization.
8511   LoopVectorizationPlanner LVP(L, LI, TLI, TTI, &LVL, CM);
8512 
8513   // Get user vectorization factor.
8514   unsigned UserVF = Hints.getWidth();
8515 
8516   // Plan how to best vectorize, return the best VF and its cost.
8517   LoopVectorizationCostModel::VectorizationFactor VF =
8518       LVP.plan(OptForSize, UserVF);
8519 
8520   // Select the interleave count.
8521   unsigned IC = CM.selectInterleaveCount(OptForSize, VF.Width, VF.Cost);
8522 
8523   // Get user interleave count.
8524   unsigned UserIC = Hints.getInterleave();
8525 
8526   // Identify the diagnostic messages that should be produced.
8527   std::pair<StringRef, std::string> VecDiagMsg, IntDiagMsg;
8528   bool VectorizeLoop = true, InterleaveLoop = true;
8529   if (Requirements.doesNotMeet(F, L, Hints)) {
8530     DEBUG(dbgs() << "LV: Not vectorizing: loop did not meet vectorization "
8531                     "requirements.\n");
8532     emitMissedWarning(F, L, Hints, ORE);
8533     return false;
8534   }
8535 
8536   if (VF.Width == 1) {
8537     DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n");
8538     VecDiagMsg = std::make_pair(
8539         "VectorizationNotBeneficial",
8540         "the cost-model indicates that vectorization is not beneficial");
8541     VectorizeLoop = false;
8542   }
8543 
8544   if (IC == 1 && UserIC <= 1) {
8545     // Tell the user interleaving is not beneficial.
8546     DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n");
8547     IntDiagMsg = std::make_pair(
8548         "InterleavingNotBeneficial",
8549         "the cost-model indicates that interleaving is not beneficial");
8550     InterleaveLoop = false;
8551     if (UserIC == 1) {
8552       IntDiagMsg.first = "InterleavingNotBeneficialAndDisabled";
8553       IntDiagMsg.second +=
8554           " and is explicitly disabled or interleave count is set to 1";
8555     }
8556   } else if (IC > 1 && UserIC == 1) {
8557     // Tell the user interleaving is beneficial, but it explicitly disabled.
8558     DEBUG(dbgs()
8559           << "LV: Interleaving is beneficial but is explicitly disabled.");
8560     IntDiagMsg = std::make_pair(
8561         "InterleavingBeneficialButDisabled",
8562         "the cost-model indicates that interleaving is beneficial "
8563         "but is explicitly disabled or interleave count is set to 1");
8564     InterleaveLoop = false;
8565   }
8566 
8567   // Override IC if user provided an interleave count.
8568   IC = UserIC > 0 ? UserIC : IC;
8569 
8570   // Emit diagnostic messages, if any.
8571   const char *VAPassName = Hints.vectorizeAnalysisPassName();
8572   if (!VectorizeLoop && !InterleaveLoop) {
8573     // Do not vectorize or interleaving the loop.
8574     ORE->emit(OptimizationRemarkMissed(VAPassName, VecDiagMsg.first,
8575                                          L->getStartLoc(), L->getHeader())
8576               << VecDiagMsg.second);
8577     ORE->emit(OptimizationRemarkMissed(LV_NAME, IntDiagMsg.first,
8578                                          L->getStartLoc(), L->getHeader())
8579               << IntDiagMsg.second);
8580     return false;
8581   } else if (!VectorizeLoop && InterleaveLoop) {
8582     DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n');
8583     ORE->emit(OptimizationRemarkAnalysis(VAPassName, VecDiagMsg.first,
8584                                          L->getStartLoc(), L->getHeader())
8585               << VecDiagMsg.second);
8586   } else if (VectorizeLoop && !InterleaveLoop) {
8587     DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width << ") in "
8588                  << DebugLocStr << '\n');
8589     ORE->emit(OptimizationRemarkAnalysis(LV_NAME, IntDiagMsg.first,
8590                                          L->getStartLoc(), L->getHeader())
8591               << IntDiagMsg.second);
8592   } else if (VectorizeLoop && InterleaveLoop) {
8593     DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width << ") in "
8594                  << DebugLocStr << '\n');
8595     DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n');
8596   }
8597 
8598   LVP.setBestPlan(VF.Width, IC);
8599 
8600   using namespace ore;
8601   if (!VectorizeLoop) {
8602     assert(IC > 1 && "interleave count should not be 1 or 0");
8603     // If we decided that it is not legal to vectorize the loop, then
8604     // interleave it.
8605     InnerLoopUnroller Unroller(L, PSE, LI, DT, TLI, TTI, AC, ORE, IC, &LVL,
8606                                &CM);
8607     LVP.executePlan(Unroller, DT);
8608 
8609     ORE->emit(OptimizationRemark(LV_NAME, "Interleaved", L->getStartLoc(),
8610                                  L->getHeader())
8611               << "interleaved loop (interleaved count: "
8612               << NV("InterleaveCount", IC) << ")");
8613   } else {
8614     // If we decided that it is *legal* to vectorize the loop, then do it.
8615     InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, IC,
8616                            &LVL, &CM);
8617     LVP.executePlan(LB, DT);
8618     ++LoopsVectorized;
8619 
8620     // Add metadata to disable runtime unrolling a scalar loop when there are
8621     // no runtime checks about strides and memory. A scalar loop that is
8622     // rarely used is not worth unrolling.
8623     if (!LB.areSafetyChecksAdded())
8624       AddRuntimeUnrollDisableMetaData(L);
8625 
8626     // Report the vectorization decision.
8627     ORE->emit(OptimizationRemark(LV_NAME, "Vectorized", L->getStartLoc(),
8628                                  L->getHeader())
8629               << "vectorized loop (vectorization width: "
8630               << NV("VectorizationFactor", VF.Width)
8631               << ", interleaved count: " << NV("InterleaveCount", IC) << ")");
8632   }
8633 
8634   // Mark the loop as already vectorized to avoid vectorizing again.
8635   Hints.setAlreadyVectorized();
8636 
8637   DEBUG(verifyFunction(*L->getHeader()->getParent()));
8638   return true;
8639 }
8640 
8641 bool LoopVectorizePass::runImpl(
8642     Function &F, ScalarEvolution &SE_, LoopInfo &LI_, TargetTransformInfo &TTI_,
8643     DominatorTree &DT_, BlockFrequencyInfo &BFI_, TargetLibraryInfo *TLI_,
8644     DemandedBits &DB_, AliasAnalysis &AA_, AssumptionCache &AC_,
8645     std::function<const LoopAccessInfo &(Loop &)> &GetLAA_,
8646     OptimizationRemarkEmitter &ORE_) {
8647 
8648   SE = &SE_;
8649   LI = &LI_;
8650   TTI = &TTI_;
8651   DT = &DT_;
8652   BFI = &BFI_;
8653   TLI = TLI_;
8654   AA = &AA_;
8655   AC = &AC_;
8656   GetLAA = &GetLAA_;
8657   DB = &DB_;
8658   ORE = &ORE_;
8659 
8660   // Don't attempt if
8661   // 1. the target claims to have no vector registers, and
8662   // 2. interleaving won't help ILP.
8663   //
8664   // The second condition is necessary because, even if the target has no
8665   // vector registers, loop vectorization may still enable scalar
8666   // interleaving.
8667   if (!TTI->getNumberOfRegisters(true) && TTI->getMaxInterleaveFactor(1) < 2)
8668     return false;
8669 
8670   bool Changed = false;
8671 
8672   // The vectorizer requires loops to be in simplified form.
8673   // Since simplification may add new inner loops, it has to run before the
8674   // legality and profitability checks. This means running the loop vectorizer
8675   // will simplify all loops, regardless of whether anything end up being
8676   // vectorized.
8677   for (auto &L : *LI)
8678     Changed |= simplifyLoop(L, DT, LI, SE, AC, false /* PreserveLCSSA */);
8679 
8680   // Build up a worklist of inner-loops to vectorize. This is necessary as
8681   // the act of vectorizing or partially unrolling a loop creates new loops
8682   // and can invalidate iterators across the loops.
8683   SmallVector<Loop *, 8> Worklist;
8684 
8685   for (Loop *L : *LI)
8686     addAcyclicInnerLoop(*L, Worklist);
8687 
8688   LoopsAnalyzed += Worklist.size();
8689 
8690   // Now walk the identified inner loops.
8691   while (!Worklist.empty()) {
8692     Loop *L = Worklist.pop_back_val();
8693 
8694     // For the inner loops we actually process, form LCSSA to simplify the
8695     // transform.
8696     Changed |= formLCSSARecursively(*L, *DT, LI, SE);
8697 
8698     Changed |= processLoop(L);
8699   }
8700 
8701   // Process each loop nest in the function.
8702   return Changed;
8703 
8704 }
8705 
8706 
8707 PreservedAnalyses LoopVectorizePass::run(Function &F,
8708                                          FunctionAnalysisManager &AM) {
8709     auto &SE = AM.getResult<ScalarEvolutionAnalysis>(F);
8710     auto &LI = AM.getResult<LoopAnalysis>(F);
8711     auto &TTI = AM.getResult<TargetIRAnalysis>(F);
8712     auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
8713     auto &BFI = AM.getResult<BlockFrequencyAnalysis>(F);
8714     auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
8715     auto &AA = AM.getResult<AAManager>(F);
8716     auto &AC = AM.getResult<AssumptionAnalysis>(F);
8717     auto &DB = AM.getResult<DemandedBitsAnalysis>(F);
8718     auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F);
8719 
8720     auto &LAM = AM.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager();
8721     std::function<const LoopAccessInfo &(Loop &)> GetLAA =
8722         [&](Loop &L) -> const LoopAccessInfo & {
8723       LoopStandardAnalysisResults AR = {AA, AC, DT, LI, SE, TLI, TTI};
8724       return LAM.getResult<LoopAccessAnalysis>(L, AR);
8725     };
8726     bool Changed =
8727         runImpl(F, SE, LI, TTI, DT, BFI, &TLI, DB, AA, AC, GetLAA, ORE);
8728     if (!Changed)
8729       return PreservedAnalyses::all();
8730     PreservedAnalyses PA;
8731     PA.preserve<LoopAnalysis>();
8732     PA.preserve<DominatorTreeAnalysis>();
8733     PA.preserve<BasicAA>();
8734     PA.preserve<GlobalsAA>();
8735     return PA;
8736 }
8737