1 //===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops
11 // and generates target-independent LLVM-IR.
12 // The vectorizer uses the TargetTransformInfo analysis to estimate the costs
13 // of instructions in order to estimate the profitability of vectorization.
14 //
15 // The loop vectorizer combines consecutive loop iterations into a single
16 // 'wide' iteration. After this transformation the index is incremented
17 // by the SIMD vector width, and not by one.
18 //
19 // This pass has three parts:
20 // 1. The main loop pass that drives the different parts.
21 // 2. LoopVectorizationLegality - A unit that checks for the legality
22 //    of the vectorization.
23 // 3. InnerLoopVectorizer - A unit that performs the actual
24 //    widening of instructions.
25 // 4. LoopVectorizationCostModel - A unit that checks for the profitability
26 //    of vectorization. It decides on the optimal vector width, which
27 //    can be one, if vectorization is not profitable.
28 //
29 //===----------------------------------------------------------------------===//
30 //
31 // The reduction-variable vectorization is based on the paper:
32 //  D. Nuzman and R. Henderson. Multi-platform Auto-vectorization.
33 //
34 // Variable uniformity checks are inspired by:
35 //  Karrenberg, R. and Hack, S. Whole Function Vectorization.
36 //
37 // The interleaved access vectorization is based on the paper:
38 //  Dorit Nuzman, Ira Rosen and Ayal Zaks.  Auto-Vectorization of Interleaved
39 //  Data for SIMD
40 //
41 // Other ideas/concepts are from:
42 //  A. Zaks and D. Nuzman. Autovectorization in GCC-two years later.
43 //
44 //  S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua.  An Evaluation of
45 //  Vectorizing Compilers.
46 //
47 //===----------------------------------------------------------------------===//
48 
49 #include "llvm/Transforms/Vectorize/LoopVectorize.h"
50 #include "VPlan.h"
51 #include "VPlanBuilder.h"
52 #include "llvm/ADT/APInt.h"
53 #include "llvm/ADT/ArrayRef.h"
54 #include "llvm/ADT/DenseMap.h"
55 #include "llvm/ADT/DenseMapInfo.h"
56 #include "llvm/ADT/Hashing.h"
57 #include "llvm/ADT/MapVector.h"
58 #include "llvm/ADT/None.h"
59 #include "llvm/ADT/Optional.h"
60 #include "llvm/ADT/SCCIterator.h"
61 #include "llvm/ADT/STLExtras.h"
62 #include "llvm/ADT/SetVector.h"
63 #include "llvm/ADT/SmallPtrSet.h"
64 #include "llvm/ADT/SmallSet.h"
65 #include "llvm/ADT/SmallVector.h"
66 #include "llvm/ADT/Statistic.h"
67 #include "llvm/ADT/StringRef.h"
68 #include "llvm/ADT/Twine.h"
69 #include "llvm/ADT/iterator_range.h"
70 #include "llvm/Analysis/AssumptionCache.h"
71 #include "llvm/Analysis/BasicAliasAnalysis.h"
72 #include "llvm/Analysis/BlockFrequencyInfo.h"
73 #include "llvm/Analysis/CodeMetrics.h"
74 #include "llvm/Analysis/DemandedBits.h"
75 #include "llvm/Analysis/GlobalsModRef.h"
76 #include "llvm/Analysis/LoopAccessAnalysis.h"
77 #include "llvm/Analysis/LoopAnalysisManager.h"
78 #include "llvm/Analysis/LoopInfo.h"
79 #include "llvm/Analysis/LoopIterator.h"
80 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
81 #include "llvm/Analysis/ScalarEvolution.h"
82 #include "llvm/Analysis/ScalarEvolutionExpander.h"
83 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
84 #include "llvm/Analysis/TargetLibraryInfo.h"
85 #include "llvm/Analysis/TargetTransformInfo.h"
86 #include "llvm/Analysis/VectorUtils.h"
87 #include "llvm/IR/Attributes.h"
88 #include "llvm/IR/BasicBlock.h"
89 #include "llvm/IR/CFG.h"
90 #include "llvm/IR/Constant.h"
91 #include "llvm/IR/Constants.h"
92 #include "llvm/IR/DataLayout.h"
93 #include "llvm/IR/DebugInfoMetadata.h"
94 #include "llvm/IR/DebugLoc.h"
95 #include "llvm/IR/DerivedTypes.h"
96 #include "llvm/IR/DiagnosticInfo.h"
97 #include "llvm/IR/Dominators.h"
98 #include "llvm/IR/Function.h"
99 #include "llvm/IR/IRBuilder.h"
100 #include "llvm/IR/InstrTypes.h"
101 #include "llvm/IR/Instruction.h"
102 #include "llvm/IR/Instructions.h"
103 #include "llvm/IR/IntrinsicInst.h"
104 #include "llvm/IR/Intrinsics.h"
105 #include "llvm/IR/LLVMContext.h"
106 #include "llvm/IR/Metadata.h"
107 #include "llvm/IR/Module.h"
108 #include "llvm/IR/Operator.h"
109 #include "llvm/IR/Type.h"
110 #include "llvm/IR/Use.h"
111 #include "llvm/IR/User.h"
112 #include "llvm/IR/Value.h"
113 #include "llvm/IR/ValueHandle.h"
114 #include "llvm/IR/Verifier.h"
115 #include "llvm/Pass.h"
116 #include "llvm/Support/Casting.h"
117 #include "llvm/Support/CommandLine.h"
118 #include "llvm/Support/Compiler.h"
119 #include "llvm/Support/Debug.h"
120 #include "llvm/Support/ErrorHandling.h"
121 #include "llvm/Support/MathExtras.h"
122 #include "llvm/Support/raw_ostream.h"
123 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
124 #include "llvm/Transforms/Utils/LoopSimplify.h"
125 #include "llvm/Transforms/Utils/LoopUtils.h"
126 #include "llvm/Transforms/Utils/LoopVersioning.h"
127 #include <algorithm>
128 #include <cassert>
129 #include <cstdint>
130 #include <cstdlib>
131 #include <functional>
132 #include <iterator>
133 #include <limits>
134 #include <memory>
135 #include <string>
136 #include <tuple>
137 #include <utility>
138 #include <vector>
139 
140 using namespace llvm;
141 
142 #define LV_NAME "loop-vectorize"
143 #define DEBUG_TYPE LV_NAME
144 
145 STATISTIC(LoopsVectorized, "Number of loops vectorized");
146 STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization");
147 
148 static cl::opt<bool>
149     EnableIfConversion("enable-if-conversion", cl::init(true), cl::Hidden,
150                        cl::desc("Enable if-conversion during vectorization."));
151 
152 /// Loops with a known constant trip count below this number are vectorized only
153 /// if no scalar iteration overheads are incurred.
154 static cl::opt<unsigned> TinyTripCountVectorThreshold(
155     "vectorizer-min-trip-count", cl::init(16), cl::Hidden,
156     cl::desc("Loops with a constant trip count that is smaller than this "
157              "value are vectorized only if no scalar iteration overheads "
158              "are incurred."));
159 
160 static cl::opt<bool> MaximizeBandwidth(
161     "vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden,
162     cl::desc("Maximize bandwidth when selecting vectorization factor which "
163              "will be determined by the smallest type in loop."));
164 
165 static cl::opt<bool> EnableInterleavedMemAccesses(
166     "enable-interleaved-mem-accesses", cl::init(false), cl::Hidden,
167     cl::desc("Enable vectorization on interleaved memory accesses in a loop"));
168 
169 /// Maximum factor for an interleaved memory access.
170 static cl::opt<unsigned> MaxInterleaveGroupFactor(
171     "max-interleave-group-factor", cl::Hidden,
172     cl::desc("Maximum factor for an interleaved access group (default = 8)"),
173     cl::init(8));
174 
175 /// We don't interleave loops with a known constant trip count below this
176 /// number.
177 static const unsigned TinyTripCountInterleaveThreshold = 128;
178 
179 static cl::opt<unsigned> ForceTargetNumScalarRegs(
180     "force-target-num-scalar-regs", cl::init(0), cl::Hidden,
181     cl::desc("A flag that overrides the target's number of scalar registers."));
182 
183 static cl::opt<unsigned> ForceTargetNumVectorRegs(
184     "force-target-num-vector-regs", cl::init(0), cl::Hidden,
185     cl::desc("A flag that overrides the target's number of vector registers."));
186 
187 /// Maximum vectorization interleave count.
188 static const unsigned MaxInterleaveFactor = 16;
189 
190 static cl::opt<unsigned> ForceTargetMaxScalarInterleaveFactor(
191     "force-target-max-scalar-interleave", cl::init(0), cl::Hidden,
192     cl::desc("A flag that overrides the target's max interleave factor for "
193              "scalar loops."));
194 
195 static cl::opt<unsigned> ForceTargetMaxVectorInterleaveFactor(
196     "force-target-max-vector-interleave", cl::init(0), cl::Hidden,
197     cl::desc("A flag that overrides the target's max interleave factor for "
198              "vectorized loops."));
199 
200 static cl::opt<unsigned> ForceTargetInstructionCost(
201     "force-target-instruction-cost", cl::init(0), cl::Hidden,
202     cl::desc("A flag that overrides the target's expected cost for "
203              "an instruction to a single constant value. Mostly "
204              "useful for getting consistent testing."));
205 
206 static cl::opt<unsigned> SmallLoopCost(
207     "small-loop-cost", cl::init(20), cl::Hidden,
208     cl::desc(
209         "The cost of a loop that is considered 'small' by the interleaver."));
210 
211 static cl::opt<bool> LoopVectorizeWithBlockFrequency(
212     "loop-vectorize-with-block-frequency", cl::init(false), cl::Hidden,
213     cl::desc("Enable the use of the block frequency analysis to access PGO "
214              "heuristics minimizing code growth in cold regions and being more "
215              "aggressive in hot regions."));
216 
217 // Runtime interleave loops for load/store throughput.
218 static cl::opt<bool> EnableLoadStoreRuntimeInterleave(
219     "enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden,
220     cl::desc(
221         "Enable runtime interleaving until load/store ports are saturated"));
222 
223 /// The number of stores in a loop that are allowed to need predication.
224 static cl::opt<unsigned> NumberOfStoresToPredicate(
225     "vectorize-num-stores-pred", cl::init(1), cl::Hidden,
226     cl::desc("Max number of stores to be predicated behind an if."));
227 
228 static cl::opt<bool> EnableIndVarRegisterHeur(
229     "enable-ind-var-reg-heur", cl::init(true), cl::Hidden,
230     cl::desc("Count the induction variable only once when interleaving"));
231 
232 static cl::opt<bool> EnableCondStoresVectorization(
233     "enable-cond-stores-vec", cl::init(true), cl::Hidden,
234     cl::desc("Enable if predication of stores during vectorization."));
235 
236 static cl::opt<unsigned> MaxNestedScalarReductionIC(
237     "max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden,
238     cl::desc("The maximum interleave count to use when interleaving a scalar "
239              "reduction in a nested loop."));
240 
241 static cl::opt<unsigned> PragmaVectorizeMemoryCheckThreshold(
242     "pragma-vectorize-memory-check-threshold", cl::init(128), cl::Hidden,
243     cl::desc("The maximum allowed number of runtime memory checks with a "
244              "vectorize(enable) pragma."));
245 
246 static cl::opt<unsigned> VectorizeSCEVCheckThreshold(
247     "vectorize-scev-check-threshold", cl::init(16), cl::Hidden,
248     cl::desc("The maximum number of SCEV checks allowed."));
249 
250 static cl::opt<unsigned> PragmaVectorizeSCEVCheckThreshold(
251     "pragma-vectorize-scev-check-threshold", cl::init(128), cl::Hidden,
252     cl::desc("The maximum number of SCEV checks allowed with a "
253              "vectorize(enable) pragma"));
254 
255 /// Create an analysis remark that explains why vectorization failed
256 ///
257 /// \p PassName is the name of the pass (e.g. can be AlwaysPrint).  \p
258 /// RemarkName is the identifier for the remark.  If \p I is passed it is an
259 /// instruction that prevents vectorization.  Otherwise \p TheLoop is used for
260 /// the location of the remark.  \return the remark object that can be
261 /// streamed to.
262 static OptimizationRemarkAnalysis
263 createMissedAnalysis(const char *PassName, StringRef RemarkName, Loop *TheLoop,
264                      Instruction *I = nullptr) {
265   Value *CodeRegion = TheLoop->getHeader();
266   DebugLoc DL = TheLoop->getStartLoc();
267 
268   if (I) {
269     CodeRegion = I->getParent();
270     // If there is no debug location attached to the instruction, revert back to
271     // using the loop's.
272     if (I->getDebugLoc())
273       DL = I->getDebugLoc();
274   }
275 
276   OptimizationRemarkAnalysis R(PassName, RemarkName, DL, CodeRegion);
277   R << "loop not vectorized: ";
278   return R;
279 }
280 
281 namespace {
282 
283 class LoopVectorizationLegality;
284 class LoopVectorizationCostModel;
285 class LoopVectorizationRequirements;
286 
287 } // end anonymous namespace
288 
289 /// Returns true if the given loop body has a cycle, excluding the loop
290 /// itself.
291 static bool hasCyclesInLoopBody(const Loop &L) {
292   if (!L.empty())
293     return true;
294 
295   for (const auto &SCC :
296        make_range(scc_iterator<Loop, LoopBodyTraits>::begin(L),
297                   scc_iterator<Loop, LoopBodyTraits>::end(L))) {
298     if (SCC.size() > 1) {
299       DEBUG(dbgs() << "LVL: Detected a cycle in the loop body:\n");
300       DEBUG(L.dump());
301       return true;
302     }
303   }
304   return false;
305 }
306 
307 /// A helper function for converting Scalar types to vector types.
308 /// If the incoming type is void, we return void. If the VF is 1, we return
309 /// the scalar type.
310 static Type *ToVectorTy(Type *Scalar, unsigned VF) {
311   if (Scalar->isVoidTy() || VF == 1)
312     return Scalar;
313   return VectorType::get(Scalar, VF);
314 }
315 
316 // FIXME: The following helper functions have multiple implementations
317 // in the project. They can be effectively organized in a common Load/Store
318 // utilities unit.
319 
320 /// A helper function that returns the pointer operand of a load or store
321 /// instruction.
322 static Value *getPointerOperand(Value *I) {
323   if (auto *LI = dyn_cast<LoadInst>(I))
324     return LI->getPointerOperand();
325   if (auto *SI = dyn_cast<StoreInst>(I))
326     return SI->getPointerOperand();
327   return nullptr;
328 }
329 
330 /// A helper function that returns the type of loaded or stored value.
331 static Type *getMemInstValueType(Value *I) {
332   assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
333          "Expected Load or Store instruction");
334   if (auto *LI = dyn_cast<LoadInst>(I))
335     return LI->getType();
336   return cast<StoreInst>(I)->getValueOperand()->getType();
337 }
338 
339 /// A helper function that returns the alignment of load or store instruction.
340 static unsigned getMemInstAlignment(Value *I) {
341   assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
342          "Expected Load or Store instruction");
343   if (auto *LI = dyn_cast<LoadInst>(I))
344     return LI->getAlignment();
345   return cast<StoreInst>(I)->getAlignment();
346 }
347 
348 /// A helper function that returns the address space of the pointer operand of
349 /// load or store instruction.
350 static unsigned getMemInstAddressSpace(Value *I) {
351   assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
352          "Expected Load or Store instruction");
353   if (auto *LI = dyn_cast<LoadInst>(I))
354     return LI->getPointerAddressSpace();
355   return cast<StoreInst>(I)->getPointerAddressSpace();
356 }
357 
358 /// A helper function that returns true if the given type is irregular. The
359 /// type is irregular if its allocated size doesn't equal the store size of an
360 /// element of the corresponding vector type at the given vectorization factor.
361 static bool hasIrregularType(Type *Ty, const DataLayout &DL, unsigned VF) {
362   // Determine if an array of VF elements of type Ty is "bitcast compatible"
363   // with a <VF x Ty> vector.
364   if (VF > 1) {
365     auto *VectorTy = VectorType::get(Ty, VF);
366     return VF * DL.getTypeAllocSize(Ty) != DL.getTypeStoreSize(VectorTy);
367   }
368 
369   // If the vectorization factor is one, we just check if an array of type Ty
370   // requires padding between elements.
371   return DL.getTypeAllocSizeInBits(Ty) != DL.getTypeSizeInBits(Ty);
372 }
373 
374 /// A helper function that returns the reciprocal of the block probability of
375 /// predicated blocks. If we return X, we are assuming the predicated block
376 /// will execute once for for every X iterations of the loop header.
377 ///
378 /// TODO: We should use actual block probability here, if available. Currently,
379 ///       we always assume predicated blocks have a 50% chance of executing.
380 static unsigned getReciprocalPredBlockProb() { return 2; }
381 
382 /// A helper function that adds a 'fast' flag to floating-point operations.
383 static Value *addFastMathFlag(Value *V) {
384   if (isa<FPMathOperator>(V)) {
385     FastMathFlags Flags;
386     Flags.setFast();
387     cast<Instruction>(V)->setFastMathFlags(Flags);
388   }
389   return V;
390 }
391 
392 /// A helper function that returns an integer or floating-point constant with
393 /// value C.
394 static Constant *getSignedIntOrFpConstant(Type *Ty, int64_t C) {
395   return Ty->isIntegerTy() ? ConstantInt::getSigned(Ty, C)
396                            : ConstantFP::get(Ty, C);
397 }
398 
399 namespace llvm {
400 
401 /// InnerLoopVectorizer vectorizes loops which contain only one basic
402 /// block to a specified vectorization factor (VF).
403 /// This class performs the widening of scalars into vectors, or multiple
404 /// scalars. This class also implements the following features:
405 /// * It inserts an epilogue loop for handling loops that don't have iteration
406 ///   counts that are known to be a multiple of the vectorization factor.
407 /// * It handles the code generation for reduction variables.
408 /// * Scalarization (implementation using scalars) of un-vectorizable
409 ///   instructions.
410 /// InnerLoopVectorizer does not perform any vectorization-legality
411 /// checks, and relies on the caller to check for the different legality
412 /// aspects. The InnerLoopVectorizer relies on the
413 /// LoopVectorizationLegality class to provide information about the induction
414 /// and reduction variables that were found to a given vectorization factor.
415 class InnerLoopVectorizer {
416 public:
417   InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE,
418                       LoopInfo *LI, DominatorTree *DT,
419                       const TargetLibraryInfo *TLI,
420                       const TargetTransformInfo *TTI, AssumptionCache *AC,
421                       OptimizationRemarkEmitter *ORE, unsigned VecWidth,
422                       unsigned UnrollFactor, LoopVectorizationLegality *LVL,
423                       LoopVectorizationCostModel *CM)
424       : OrigLoop(OrigLoop), PSE(PSE), LI(LI), DT(DT), TLI(TLI), TTI(TTI),
425         AC(AC), ORE(ORE), VF(VecWidth), UF(UnrollFactor),
426         Builder(PSE.getSE()->getContext()),
427         VectorLoopValueMap(UnrollFactor, VecWidth), Legal(LVL), Cost(CM) {}
428   virtual ~InnerLoopVectorizer() = default;
429 
430   /// Create a new empty loop. Unlink the old loop and connect the new one.
431   /// Return the pre-header block of the new loop.
432   BasicBlock *createVectorizedLoopSkeleton();
433 
434   /// Widen a single instruction within the innermost loop.
435   void widenInstruction(Instruction &I);
436 
437   /// Fix the vectorized code, taking care of header phi's, live-outs, and more.
438   void fixVectorizedLoop();
439 
440   // Return true if any runtime check is added.
441   bool areSafetyChecksAdded() { return AddedSafetyChecks; }
442 
443   /// A type for vectorized values in the new loop. Each value from the
444   /// original loop, when vectorized, is represented by UF vector values in the
445   /// new unrolled loop, where UF is the unroll factor.
446   using VectorParts = SmallVector<Value *, 2>;
447 
448   /// Vectorize a single PHINode in a block. This method handles the induction
449   /// variable canonicalization. It supports both VF = 1 for unrolled loops and
450   /// arbitrary length vectors.
451   void widenPHIInstruction(Instruction *PN, unsigned UF, unsigned VF);
452 
453   /// A helper function to scalarize a single Instruction in the innermost loop.
454   /// Generates a sequence of scalar instances for each lane between \p MinLane
455   /// and \p MaxLane, times each part between \p MinPart and \p MaxPart,
456   /// inclusive..
457   void scalarizeInstruction(Instruction *Instr, const VPIteration &Instance,
458                             bool IfPredicateInstr);
459 
460   /// Widen an integer or floating-point induction variable \p IV. If \p Trunc
461   /// is provided, the integer induction variable will first be truncated to
462   /// the corresponding type.
463   void widenIntOrFpInduction(PHINode *IV, TruncInst *Trunc = nullptr);
464 
465   /// getOrCreateVectorValue and getOrCreateScalarValue coordinate to generate a
466   /// vector or scalar value on-demand if one is not yet available. When
467   /// vectorizing a loop, we visit the definition of an instruction before its
468   /// uses. When visiting the definition, we either vectorize or scalarize the
469   /// instruction, creating an entry for it in the corresponding map. (In some
470   /// cases, such as induction variables, we will create both vector and scalar
471   /// entries.) Then, as we encounter uses of the definition, we derive values
472   /// for each scalar or vector use unless such a value is already available.
473   /// For example, if we scalarize a definition and one of its uses is vector,
474   /// we build the required vector on-demand with an insertelement sequence
475   /// when visiting the use. Otherwise, if the use is scalar, we can use the
476   /// existing scalar definition.
477   ///
478   /// Return a value in the new loop corresponding to \p V from the original
479   /// loop at unroll index \p Part. If the value has already been vectorized,
480   /// the corresponding vector entry in VectorLoopValueMap is returned. If,
481   /// however, the value has a scalar entry in VectorLoopValueMap, we construct
482   /// a new vector value on-demand by inserting the scalar values into a vector
483   /// with an insertelement sequence. If the value has been neither vectorized
484   /// nor scalarized, it must be loop invariant, so we simply broadcast the
485   /// value into a vector.
486   Value *getOrCreateVectorValue(Value *V, unsigned Part);
487 
488   /// Return a value in the new loop corresponding to \p V from the original
489   /// loop at unroll and vector indices \p Instance. If the value has been
490   /// vectorized but not scalarized, the necessary extractelement instruction
491   /// will be generated.
492   Value *getOrCreateScalarValue(Value *V, const VPIteration &Instance);
493 
494   /// Construct the vector value of a scalarized value \p V one lane at a time.
495   void packScalarIntoVectorValue(Value *V, const VPIteration &Instance);
496 
497   /// Try to vectorize the interleaved access group that \p Instr belongs to.
498   void vectorizeInterleaveGroup(Instruction *Instr);
499 
500   /// Vectorize Load and Store instructions, optionally masking the vector
501   /// operations if \p BlockInMask is non-null.
502   void vectorizeMemoryInstruction(Instruction *Instr,
503                                   VectorParts *BlockInMask = nullptr);
504 
505   /// \brief Set the debug location in the builder using the debug location in
506   /// the instruction.
507   void setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr);
508 
509 protected:
510   friend class LoopVectorizationPlanner;
511 
512   /// A small list of PHINodes.
513   using PhiVector = SmallVector<PHINode *, 4>;
514 
515   /// A type for scalarized values in the new loop. Each value from the
516   /// original loop, when scalarized, is represented by UF x VF scalar values
517   /// in the new unrolled loop, where UF is the unroll factor and VF is the
518   /// vectorization factor.
519   using ScalarParts = SmallVector<SmallVector<Value *, 4>, 2>;
520 
521   /// Set up the values of the IVs correctly when exiting the vector loop.
522   void fixupIVUsers(PHINode *OrigPhi, const InductionDescriptor &II,
523                     Value *CountRoundDown, Value *EndValue,
524                     BasicBlock *MiddleBlock);
525 
526   /// Create a new induction variable inside L.
527   PHINode *createInductionVariable(Loop *L, Value *Start, Value *End,
528                                    Value *Step, Instruction *DL);
529 
530   /// Handle all cross-iteration phis in the header.
531   void fixCrossIterationPHIs();
532 
533   /// Fix a first-order recurrence. This is the second phase of vectorizing
534   /// this phi node.
535   void fixFirstOrderRecurrence(PHINode *Phi);
536 
537   /// Fix a reduction cross-iteration phi. This is the second phase of
538   /// vectorizing this phi node.
539   void fixReduction(PHINode *Phi);
540 
541   /// \brief The Loop exit block may have single value PHI nodes with some
542   /// incoming value. While vectorizing we only handled real values
543   /// that were defined inside the loop and we should have one value for
544   /// each predecessor of its parent basic block. See PR14725.
545   void fixLCSSAPHIs();
546 
547   /// Iteratively sink the scalarized operands of a predicated instruction into
548   /// the block that was created for it.
549   void sinkScalarOperands(Instruction *PredInst);
550 
551   /// Shrinks vector element sizes to the smallest bitwidth they can be legally
552   /// represented as.
553   void truncateToMinimalBitwidths();
554 
555   /// Insert the new loop to the loop hierarchy and pass manager
556   /// and update the analysis passes.
557   void updateAnalysis();
558 
559   /// Create a broadcast instruction. This method generates a broadcast
560   /// instruction (shuffle) for loop invariant values and for the induction
561   /// value. If this is the induction variable then we extend it to N, N+1, ...
562   /// this is needed because each iteration in the loop corresponds to a SIMD
563   /// element.
564   virtual Value *getBroadcastInstrs(Value *V);
565 
566   /// This function adds (StartIdx, StartIdx + Step, StartIdx + 2*Step, ...)
567   /// to each vector element of Val. The sequence starts at StartIndex.
568   /// \p Opcode is relevant for FP induction variable.
569   virtual Value *getStepVector(Value *Val, int StartIdx, Value *Step,
570                                Instruction::BinaryOps Opcode =
571                                Instruction::BinaryOpsEnd);
572 
573   /// Compute scalar induction steps. \p ScalarIV is the scalar induction
574   /// variable on which to base the steps, \p Step is the size of the step, and
575   /// \p EntryVal is the value from the original loop that maps to the steps.
576   /// Note that \p EntryVal doesn't have to be an induction variable (e.g., it
577   /// can be a truncate instruction).
578   void buildScalarSteps(Value *ScalarIV, Value *Step, Value *EntryVal,
579                         const InductionDescriptor &ID);
580 
581   /// Create a vector induction phi node based on an existing scalar one. \p
582   /// EntryVal is the value from the original loop that maps to the vector phi
583   /// node, and \p Step is the loop-invariant step. If \p EntryVal is a
584   /// truncate instruction, instead of widening the original IV, we widen a
585   /// version of the IV truncated to \p EntryVal's type.
586   void createVectorIntOrFpInductionPHI(const InductionDescriptor &II,
587                                        Value *Step, Instruction *EntryVal);
588 
589   /// Returns true if an instruction \p I should be scalarized instead of
590   /// vectorized for the chosen vectorization factor.
591   bool shouldScalarizeInstruction(Instruction *I) const;
592 
593   /// Returns true if we should generate a scalar version of \p IV.
594   bool needsScalarInduction(Instruction *IV) const;
595 
596   /// If there is a cast involved in the induction variable \p ID, which should
597   /// be ignored in the vectorized loop body, this function records the
598   /// VectorLoopValue of the respective Phi also as the VectorLoopValue of the
599   /// cast. We had already proved that the casted Phi is equal to the uncasted
600   /// Phi in the vectorized loop (under a runtime guard), and therefore
601   /// there is no need to vectorize the cast - the same value can be used in the
602   /// vector loop for both the Phi and the cast.
603   /// If \p VectorLoopValue is a scalarized value, \p Lane is also specified,
604   /// Otherwise, \p VectorLoopValue is a widened/vectorized value.
605   void recordVectorLoopValueForInductionCast (const InductionDescriptor &ID,
606                                               Value *VectorLoopValue,
607                                               unsigned Part,
608                                               unsigned Lane = UINT_MAX);
609 
610   /// Generate a shuffle sequence that will reverse the vector Vec.
611   virtual Value *reverseVector(Value *Vec);
612 
613   /// Returns (and creates if needed) the original loop trip count.
614   Value *getOrCreateTripCount(Loop *NewLoop);
615 
616   /// Returns (and creates if needed) the trip count of the widened loop.
617   Value *getOrCreateVectorTripCount(Loop *NewLoop);
618 
619   /// Returns a bitcasted value to the requested vector type.
620   /// Also handles bitcasts of vector<float> <-> vector<pointer> types.
621   Value *createBitOrPointerCast(Value *V, VectorType *DstVTy,
622                                 const DataLayout &DL);
623 
624   /// Emit a bypass check to see if the vector trip count is zero, including if
625   /// it overflows.
626   void emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass);
627 
628   /// Emit a bypass check to see if all of the SCEV assumptions we've
629   /// had to make are correct.
630   void emitSCEVChecks(Loop *L, BasicBlock *Bypass);
631 
632   /// Emit bypass checks to check any memory assumptions we may have made.
633   void emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass);
634 
635   /// Add additional metadata to \p To that was not present on \p Orig.
636   ///
637   /// Currently this is used to add the noalias annotations based on the
638   /// inserted memchecks.  Use this for instructions that are *cloned* into the
639   /// vector loop.
640   void addNewMetadata(Instruction *To, const Instruction *Orig);
641 
642   /// Add metadata from one instruction to another.
643   ///
644   /// This includes both the original MDs from \p From and additional ones (\see
645   /// addNewMetadata).  Use this for *newly created* instructions in the vector
646   /// loop.
647   void addMetadata(Instruction *To, Instruction *From);
648 
649   /// \brief Similar to the previous function but it adds the metadata to a
650   /// vector of instructions.
651   void addMetadata(ArrayRef<Value *> To, Instruction *From);
652 
653   /// The original loop.
654   Loop *OrigLoop;
655 
656   /// A wrapper around ScalarEvolution used to add runtime SCEV checks. Applies
657   /// dynamic knowledge to simplify SCEV expressions and converts them to a
658   /// more usable form.
659   PredicatedScalarEvolution &PSE;
660 
661   /// Loop Info.
662   LoopInfo *LI;
663 
664   /// Dominator Tree.
665   DominatorTree *DT;
666 
667   /// Alias Analysis.
668   AliasAnalysis *AA;
669 
670   /// Target Library Info.
671   const TargetLibraryInfo *TLI;
672 
673   /// Target Transform Info.
674   const TargetTransformInfo *TTI;
675 
676   /// Assumption Cache.
677   AssumptionCache *AC;
678 
679   /// Interface to emit optimization remarks.
680   OptimizationRemarkEmitter *ORE;
681 
682   /// \brief LoopVersioning.  It's only set up (non-null) if memchecks were
683   /// used.
684   ///
685   /// This is currently only used to add no-alias metadata based on the
686   /// memchecks.  The actually versioning is performed manually.
687   std::unique_ptr<LoopVersioning> LVer;
688 
689   /// The vectorization SIMD factor to use. Each vector will have this many
690   /// vector elements.
691   unsigned VF;
692 
693   /// The vectorization unroll factor to use. Each scalar is vectorized to this
694   /// many different vector instructions.
695   unsigned UF;
696 
697   /// The builder that we use
698   IRBuilder<> Builder;
699 
700   // --- Vectorization state ---
701 
702   /// The vector-loop preheader.
703   BasicBlock *LoopVectorPreHeader;
704 
705   /// The scalar-loop preheader.
706   BasicBlock *LoopScalarPreHeader;
707 
708   /// Middle Block between the vector and the scalar.
709   BasicBlock *LoopMiddleBlock;
710 
711   /// The ExitBlock of the scalar loop.
712   BasicBlock *LoopExitBlock;
713 
714   /// The vector loop body.
715   BasicBlock *LoopVectorBody;
716 
717   /// The scalar loop body.
718   BasicBlock *LoopScalarBody;
719 
720   /// A list of all bypass blocks. The first block is the entry of the loop.
721   SmallVector<BasicBlock *, 4> LoopBypassBlocks;
722 
723   /// The new Induction variable which was added to the new block.
724   PHINode *Induction = nullptr;
725 
726   /// The induction variable of the old basic block.
727   PHINode *OldInduction = nullptr;
728 
729   /// Maps values from the original loop to their corresponding values in the
730   /// vectorized loop. A key value can map to either vector values, scalar
731   /// values or both kinds of values, depending on whether the key was
732   /// vectorized and scalarized.
733   VectorizerValueMap VectorLoopValueMap;
734 
735   /// Store instructions that were predicated.
736   SmallVector<Instruction *, 4> PredicatedInstructions;
737 
738   /// Trip count of the original loop.
739   Value *TripCount = nullptr;
740 
741   /// Trip count of the widened loop (TripCount - TripCount % (VF*UF))
742   Value *VectorTripCount = nullptr;
743 
744   /// The legality analysis.
745   LoopVectorizationLegality *Legal;
746 
747   /// The profitablity analysis.
748   LoopVectorizationCostModel *Cost;
749 
750   // Record whether runtime checks are added.
751   bool AddedSafetyChecks = false;
752 
753   // Holds the end values for each induction variable. We save the end values
754   // so we can later fix-up the external users of the induction variables.
755   DenseMap<PHINode *, Value *> IVEndValues;
756 };
757 
758 class InnerLoopUnroller : public InnerLoopVectorizer {
759 public:
760   InnerLoopUnroller(Loop *OrigLoop, PredicatedScalarEvolution &PSE,
761                     LoopInfo *LI, DominatorTree *DT,
762                     const TargetLibraryInfo *TLI,
763                     const TargetTransformInfo *TTI, AssumptionCache *AC,
764                     OptimizationRemarkEmitter *ORE, unsigned UnrollFactor,
765                     LoopVectorizationLegality *LVL,
766                     LoopVectorizationCostModel *CM)
767       : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 1,
768                             UnrollFactor, LVL, CM) {}
769 
770 private:
771   Value *getBroadcastInstrs(Value *V) override;
772   Value *getStepVector(Value *Val, int StartIdx, Value *Step,
773                        Instruction::BinaryOps Opcode =
774                        Instruction::BinaryOpsEnd) override;
775   Value *reverseVector(Value *Vec) override;
776 };
777 
778 } // end namespace llvm
779 
780 /// \brief Look for a meaningful debug location on the instruction or it's
781 /// operands.
782 static Instruction *getDebugLocFromInstOrOperands(Instruction *I) {
783   if (!I)
784     return I;
785 
786   DebugLoc Empty;
787   if (I->getDebugLoc() != Empty)
788     return I;
789 
790   for (User::op_iterator OI = I->op_begin(), OE = I->op_end(); OI != OE; ++OI) {
791     if (Instruction *OpInst = dyn_cast<Instruction>(*OI))
792       if (OpInst->getDebugLoc() != Empty)
793         return OpInst;
794   }
795 
796   return I;
797 }
798 
799 void InnerLoopVectorizer::setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr) {
800   if (const Instruction *Inst = dyn_cast_or_null<Instruction>(Ptr)) {
801     const DILocation *DIL = Inst->getDebugLoc();
802     if (DIL && Inst->getFunction()->isDebugInfoForProfiling() &&
803         !isa<DbgInfoIntrinsic>(Inst))
804       B.SetCurrentDebugLocation(DIL->cloneWithDuplicationFactor(UF * VF));
805     else
806       B.SetCurrentDebugLocation(DIL);
807   } else
808     B.SetCurrentDebugLocation(DebugLoc());
809 }
810 
811 #ifndef NDEBUG
812 /// \return string containing a file name and a line # for the given loop.
813 static std::string getDebugLocString(const Loop *L) {
814   std::string Result;
815   if (L) {
816     raw_string_ostream OS(Result);
817     if (const DebugLoc LoopDbgLoc = L->getStartLoc())
818       LoopDbgLoc.print(OS);
819     else
820       // Just print the module name.
821       OS << L->getHeader()->getParent()->getParent()->getModuleIdentifier();
822     OS.flush();
823   }
824   return Result;
825 }
826 #endif
827 
828 void InnerLoopVectorizer::addNewMetadata(Instruction *To,
829                                          const Instruction *Orig) {
830   // If the loop was versioned with memchecks, add the corresponding no-alias
831   // metadata.
832   if (LVer && (isa<LoadInst>(Orig) || isa<StoreInst>(Orig)))
833     LVer->annotateInstWithNoAlias(To, Orig);
834 }
835 
836 void InnerLoopVectorizer::addMetadata(Instruction *To,
837                                       Instruction *From) {
838   propagateMetadata(To, From);
839   addNewMetadata(To, From);
840 }
841 
842 void InnerLoopVectorizer::addMetadata(ArrayRef<Value *> To,
843                                       Instruction *From) {
844   for (Value *V : To) {
845     if (Instruction *I = dyn_cast<Instruction>(V))
846       addMetadata(I, From);
847   }
848 }
849 
850 namespace llvm {
851 
852 /// \brief The group of interleaved loads/stores sharing the same stride and
853 /// close to each other.
854 ///
855 /// Each member in this group has an index starting from 0, and the largest
856 /// index should be less than interleaved factor, which is equal to the absolute
857 /// value of the access's stride.
858 ///
859 /// E.g. An interleaved load group of factor 4:
860 ///        for (unsigned i = 0; i < 1024; i+=4) {
861 ///          a = A[i];                           // Member of index 0
862 ///          b = A[i+1];                         // Member of index 1
863 ///          d = A[i+3];                         // Member of index 3
864 ///          ...
865 ///        }
866 ///
867 ///      An interleaved store group of factor 4:
868 ///        for (unsigned i = 0; i < 1024; i+=4) {
869 ///          ...
870 ///          A[i]   = a;                         // Member of index 0
871 ///          A[i+1] = b;                         // Member of index 1
872 ///          A[i+2] = c;                         // Member of index 2
873 ///          A[i+3] = d;                         // Member of index 3
874 ///        }
875 ///
876 /// Note: the interleaved load group could have gaps (missing members), but
877 /// the interleaved store group doesn't allow gaps.
878 class InterleaveGroup {
879 public:
880   InterleaveGroup(Instruction *Instr, int Stride, unsigned Align)
881       : Align(Align), InsertPos(Instr) {
882     assert(Align && "The alignment should be non-zero");
883 
884     Factor = std::abs(Stride);
885     assert(Factor > 1 && "Invalid interleave factor");
886 
887     Reverse = Stride < 0;
888     Members[0] = Instr;
889   }
890 
891   bool isReverse() const { return Reverse; }
892   unsigned getFactor() const { return Factor; }
893   unsigned getAlignment() const { return Align; }
894   unsigned getNumMembers() const { return Members.size(); }
895 
896   /// \brief Try to insert a new member \p Instr with index \p Index and
897   /// alignment \p NewAlign. The index is related to the leader and it could be
898   /// negative if it is the new leader.
899   ///
900   /// \returns false if the instruction doesn't belong to the group.
901   bool insertMember(Instruction *Instr, int Index, unsigned NewAlign) {
902     assert(NewAlign && "The new member's alignment should be non-zero");
903 
904     int Key = Index + SmallestKey;
905 
906     // Skip if there is already a member with the same index.
907     if (Members.count(Key))
908       return false;
909 
910     if (Key > LargestKey) {
911       // The largest index is always less than the interleave factor.
912       if (Index >= static_cast<int>(Factor))
913         return false;
914 
915       LargestKey = Key;
916     } else if (Key < SmallestKey) {
917       // The largest index is always less than the interleave factor.
918       if (LargestKey - Key >= static_cast<int>(Factor))
919         return false;
920 
921       SmallestKey = Key;
922     }
923 
924     // It's always safe to select the minimum alignment.
925     Align = std::min(Align, NewAlign);
926     Members[Key] = Instr;
927     return true;
928   }
929 
930   /// \brief Get the member with the given index \p Index
931   ///
932   /// \returns nullptr if contains no such member.
933   Instruction *getMember(unsigned Index) const {
934     int Key = SmallestKey + Index;
935     if (!Members.count(Key))
936       return nullptr;
937 
938     return Members.find(Key)->second;
939   }
940 
941   /// \brief Get the index for the given member. Unlike the key in the member
942   /// map, the index starts from 0.
943   unsigned getIndex(Instruction *Instr) const {
944     for (auto I : Members)
945       if (I.second == Instr)
946         return I.first - SmallestKey;
947 
948     llvm_unreachable("InterleaveGroup contains no such member");
949   }
950 
951   Instruction *getInsertPos() const { return InsertPos; }
952   void setInsertPos(Instruction *Inst) { InsertPos = Inst; }
953 
954   /// Add metadata (e.g. alias info) from the instructions in this group to \p
955   /// NewInst.
956   ///
957   /// FIXME: this function currently does not add noalias metadata a'la
958   /// addNewMedata.  To do that we need to compute the intersection of the
959   /// noalias info from all members.
960   void addMetadata(Instruction *NewInst) const {
961     SmallVector<Value *, 4> VL;
962     std::transform(Members.begin(), Members.end(), std::back_inserter(VL),
963                    [](std::pair<int, Instruction *> p) { return p.second; });
964     propagateMetadata(NewInst, VL);
965   }
966 
967 private:
968   unsigned Factor; // Interleave Factor.
969   bool Reverse;
970   unsigned Align;
971   DenseMap<int, Instruction *> Members;
972   int SmallestKey = 0;
973   int LargestKey = 0;
974 
975   // To avoid breaking dependences, vectorized instructions of an interleave
976   // group should be inserted at either the first load or the last store in
977   // program order.
978   //
979   // E.g. %even = load i32             // Insert Position
980   //      %add = add i32 %even         // Use of %even
981   //      %odd = load i32
982   //
983   //      store i32 %even
984   //      %odd = add i32               // Def of %odd
985   //      store i32 %odd               // Insert Position
986   Instruction *InsertPos;
987 };
988 } // end namespace llvm
989 
990 namespace {
991 
992 /// \brief Drive the analysis of interleaved memory accesses in the loop.
993 ///
994 /// Use this class to analyze interleaved accesses only when we can vectorize
995 /// a loop. Otherwise it's meaningless to do analysis as the vectorization
996 /// on interleaved accesses is unsafe.
997 ///
998 /// The analysis collects interleave groups and records the relationships
999 /// between the member and the group in a map.
1000 class InterleavedAccessInfo {
1001 public:
1002   InterleavedAccessInfo(PredicatedScalarEvolution &PSE, Loop *L,
1003                         DominatorTree *DT, LoopInfo *LI)
1004       : PSE(PSE), TheLoop(L), DT(DT), LI(LI) {}
1005 
1006   ~InterleavedAccessInfo() {
1007     SmallSet<InterleaveGroup *, 4> DelSet;
1008     // Avoid releasing a pointer twice.
1009     for (auto &I : InterleaveGroupMap)
1010       DelSet.insert(I.second);
1011     for (auto *Ptr : DelSet)
1012       delete Ptr;
1013   }
1014 
1015   /// \brief Analyze the interleaved accesses and collect them in interleave
1016   /// groups. Substitute symbolic strides using \p Strides.
1017   void analyzeInterleaving(const ValueToValueMap &Strides);
1018 
1019   /// \brief Check if \p Instr belongs to any interleave group.
1020   bool isInterleaved(Instruction *Instr) const {
1021     return InterleaveGroupMap.count(Instr);
1022   }
1023 
1024   /// \brief Get the interleave group that \p Instr belongs to.
1025   ///
1026   /// \returns nullptr if doesn't have such group.
1027   InterleaveGroup *getInterleaveGroup(Instruction *Instr) const {
1028     if (InterleaveGroupMap.count(Instr))
1029       return InterleaveGroupMap.find(Instr)->second;
1030     return nullptr;
1031   }
1032 
1033   /// \brief Returns true if an interleaved group that may access memory
1034   /// out-of-bounds requires a scalar epilogue iteration for correctness.
1035   bool requiresScalarEpilogue() const { return RequiresScalarEpilogue; }
1036 
1037   /// \brief Initialize the LoopAccessInfo used for dependence checking.
1038   void setLAI(const LoopAccessInfo *Info) { LAI = Info; }
1039 
1040 private:
1041   /// A wrapper around ScalarEvolution, used to add runtime SCEV checks.
1042   /// Simplifies SCEV expressions in the context of existing SCEV assumptions.
1043   /// The interleaved access analysis can also add new predicates (for example
1044   /// by versioning strides of pointers).
1045   PredicatedScalarEvolution &PSE;
1046 
1047   Loop *TheLoop;
1048   DominatorTree *DT;
1049   LoopInfo *LI;
1050   const LoopAccessInfo *LAI = nullptr;
1051 
1052   /// True if the loop may contain non-reversed interleaved groups with
1053   /// out-of-bounds accesses. We ensure we don't speculatively access memory
1054   /// out-of-bounds by executing at least one scalar epilogue iteration.
1055   bool RequiresScalarEpilogue = false;
1056 
1057   /// Holds the relationships between the members and the interleave group.
1058   DenseMap<Instruction *, InterleaveGroup *> InterleaveGroupMap;
1059 
1060   /// Holds dependences among the memory accesses in the loop. It maps a source
1061   /// access to a set of dependent sink accesses.
1062   DenseMap<Instruction *, SmallPtrSet<Instruction *, 2>> Dependences;
1063 
1064   /// \brief The descriptor for a strided memory access.
1065   struct StrideDescriptor {
1066     StrideDescriptor() = default;
1067     StrideDescriptor(int64_t Stride, const SCEV *Scev, uint64_t Size,
1068                      unsigned Align)
1069         : Stride(Stride), Scev(Scev), Size(Size), Align(Align) {}
1070 
1071     // The access's stride. It is negative for a reverse access.
1072     int64_t Stride = 0;
1073 
1074     // The scalar expression of this access.
1075     const SCEV *Scev = nullptr;
1076 
1077     // The size of the memory object.
1078     uint64_t Size = 0;
1079 
1080     // The alignment of this access.
1081     unsigned Align = 0;
1082   };
1083 
1084   /// \brief A type for holding instructions and their stride descriptors.
1085   using StrideEntry = std::pair<Instruction *, StrideDescriptor>;
1086 
1087   /// \brief Create a new interleave group with the given instruction \p Instr,
1088   /// stride \p Stride and alignment \p Align.
1089   ///
1090   /// \returns the newly created interleave group.
1091   InterleaveGroup *createInterleaveGroup(Instruction *Instr, int Stride,
1092                                          unsigned Align) {
1093     assert(!InterleaveGroupMap.count(Instr) &&
1094            "Already in an interleaved access group");
1095     InterleaveGroupMap[Instr] = new InterleaveGroup(Instr, Stride, Align);
1096     return InterleaveGroupMap[Instr];
1097   }
1098 
1099   /// \brief Release the group and remove all the relationships.
1100   void releaseGroup(InterleaveGroup *Group) {
1101     for (unsigned i = 0; i < Group->getFactor(); i++)
1102       if (Instruction *Member = Group->getMember(i))
1103         InterleaveGroupMap.erase(Member);
1104 
1105     delete Group;
1106   }
1107 
1108   /// \brief Collect all the accesses with a constant stride in program order.
1109   void collectConstStrideAccesses(
1110       MapVector<Instruction *, StrideDescriptor> &AccessStrideInfo,
1111       const ValueToValueMap &Strides);
1112 
1113   /// \brief Returns true if \p Stride is allowed in an interleaved group.
1114   static bool isStrided(int Stride) {
1115     unsigned Factor = std::abs(Stride);
1116     return Factor >= 2 && Factor <= MaxInterleaveGroupFactor;
1117   }
1118 
1119   /// \brief Returns true if \p BB is a predicated block.
1120   bool isPredicated(BasicBlock *BB) const {
1121     return LoopAccessInfo::blockNeedsPredication(BB, TheLoop, DT);
1122   }
1123 
1124   /// \brief Returns true if LoopAccessInfo can be used for dependence queries.
1125   bool areDependencesValid() const {
1126     return LAI && LAI->getDepChecker().getDependences();
1127   }
1128 
1129   /// \brief Returns true if memory accesses \p A and \p B can be reordered, if
1130   /// necessary, when constructing interleaved groups.
1131   ///
1132   /// \p A must precede \p B in program order. We return false if reordering is
1133   /// not necessary or is prevented because \p A and \p B may be dependent.
1134   bool canReorderMemAccessesForInterleavedGroups(StrideEntry *A,
1135                                                  StrideEntry *B) const {
1136     // Code motion for interleaved accesses can potentially hoist strided loads
1137     // and sink strided stores. The code below checks the legality of the
1138     // following two conditions:
1139     //
1140     // 1. Potentially moving a strided load (B) before any store (A) that
1141     //    precedes B, or
1142     //
1143     // 2. Potentially moving a strided store (A) after any load or store (B)
1144     //    that A precedes.
1145     //
1146     // It's legal to reorder A and B if we know there isn't a dependence from A
1147     // to B. Note that this determination is conservative since some
1148     // dependences could potentially be reordered safely.
1149 
1150     // A is potentially the source of a dependence.
1151     auto *Src = A->first;
1152     auto SrcDes = A->second;
1153 
1154     // B is potentially the sink of a dependence.
1155     auto *Sink = B->first;
1156     auto SinkDes = B->second;
1157 
1158     // Code motion for interleaved accesses can't violate WAR dependences.
1159     // Thus, reordering is legal if the source isn't a write.
1160     if (!Src->mayWriteToMemory())
1161       return true;
1162 
1163     // At least one of the accesses must be strided.
1164     if (!isStrided(SrcDes.Stride) && !isStrided(SinkDes.Stride))
1165       return true;
1166 
1167     // If dependence information is not available from LoopAccessInfo,
1168     // conservatively assume the instructions can't be reordered.
1169     if (!areDependencesValid())
1170       return false;
1171 
1172     // If we know there is a dependence from source to sink, assume the
1173     // instructions can't be reordered. Otherwise, reordering is legal.
1174     return !Dependences.count(Src) || !Dependences.lookup(Src).count(Sink);
1175   }
1176 
1177   /// \brief Collect the dependences from LoopAccessInfo.
1178   ///
1179   /// We process the dependences once during the interleaved access analysis to
1180   /// enable constant-time dependence queries.
1181   void collectDependences() {
1182     if (!areDependencesValid())
1183       return;
1184     auto *Deps = LAI->getDepChecker().getDependences();
1185     for (auto Dep : *Deps)
1186       Dependences[Dep.getSource(*LAI)].insert(Dep.getDestination(*LAI));
1187   }
1188 };
1189 
1190 /// Utility class for getting and setting loop vectorizer hints in the form
1191 /// of loop metadata.
1192 /// This class keeps a number of loop annotations locally (as member variables)
1193 /// and can, upon request, write them back as metadata on the loop. It will
1194 /// initially scan the loop for existing metadata, and will update the local
1195 /// values based on information in the loop.
1196 /// We cannot write all values to metadata, as the mere presence of some info,
1197 /// for example 'force', means a decision has been made. So, we need to be
1198 /// careful NOT to add them if the user hasn't specifically asked so.
1199 class LoopVectorizeHints {
1200   enum HintKind { HK_WIDTH, HK_UNROLL, HK_FORCE, HK_ISVECTORIZED };
1201 
1202   /// Hint - associates name and validation with the hint value.
1203   struct Hint {
1204     const char *Name;
1205     unsigned Value; // This may have to change for non-numeric values.
1206     HintKind Kind;
1207 
1208     Hint(const char *Name, unsigned Value, HintKind Kind)
1209         : Name(Name), Value(Value), Kind(Kind) {}
1210 
1211     bool validate(unsigned Val) {
1212       switch (Kind) {
1213       case HK_WIDTH:
1214         return isPowerOf2_32(Val) && Val <= VectorizerParams::MaxVectorWidth;
1215       case HK_UNROLL:
1216         return isPowerOf2_32(Val) && Val <= MaxInterleaveFactor;
1217       case HK_FORCE:
1218         return (Val <= 1);
1219       case HK_ISVECTORIZED:
1220         return (Val==0 || Val==1);
1221       }
1222       return false;
1223     }
1224   };
1225 
1226   /// Vectorization width.
1227   Hint Width;
1228 
1229   /// Vectorization interleave factor.
1230   Hint Interleave;
1231 
1232   /// Vectorization forced
1233   Hint Force;
1234 
1235   /// Already Vectorized
1236   Hint IsVectorized;
1237 
1238   /// Return the loop metadata prefix.
1239   static StringRef Prefix() { return "llvm.loop."; }
1240 
1241   /// True if there is any unsafe math in the loop.
1242   bool PotentiallyUnsafe = false;
1243 
1244 public:
1245   enum ForceKind {
1246     FK_Undefined = -1, ///< Not selected.
1247     FK_Disabled = 0,   ///< Forcing disabled.
1248     FK_Enabled = 1,    ///< Forcing enabled.
1249   };
1250 
1251   LoopVectorizeHints(const Loop *L, bool DisableInterleaving,
1252                      OptimizationRemarkEmitter &ORE)
1253       : Width("vectorize.width", VectorizerParams::VectorizationFactor,
1254               HK_WIDTH),
1255         Interleave("interleave.count", DisableInterleaving, HK_UNROLL),
1256         Force("vectorize.enable", FK_Undefined, HK_FORCE),
1257         IsVectorized("isvectorized", 0, HK_ISVECTORIZED), TheLoop(L), ORE(ORE) {
1258     // Populate values with existing loop metadata.
1259     getHintsFromMetadata();
1260 
1261     // force-vector-interleave overrides DisableInterleaving.
1262     if (VectorizerParams::isInterleaveForced())
1263       Interleave.Value = VectorizerParams::VectorizationInterleave;
1264 
1265     if (IsVectorized.Value != 1)
1266       // If the vectorization width and interleaving count are both 1 then
1267       // consider the loop to have been already vectorized because there's
1268       // nothing more that we can do.
1269       IsVectorized.Value = Width.Value == 1 && Interleave.Value == 1;
1270     DEBUG(if (DisableInterleaving && Interleave.Value == 1) dbgs()
1271           << "LV: Interleaving disabled by the pass manager\n");
1272   }
1273 
1274   /// Mark the loop L as already vectorized by setting the width to 1.
1275   void setAlreadyVectorized() {
1276     IsVectorized.Value = 1;
1277     Hint Hints[] = {IsVectorized};
1278     writeHintsToMetadata(Hints);
1279   }
1280 
1281   bool allowVectorization(Function *F, Loop *L, bool AlwaysVectorize) const {
1282     if (getForce() == LoopVectorizeHints::FK_Disabled) {
1283       DEBUG(dbgs() << "LV: Not vectorizing: #pragma vectorize disable.\n");
1284       emitRemarkWithHints();
1285       return false;
1286     }
1287 
1288     if (!AlwaysVectorize && getForce() != LoopVectorizeHints::FK_Enabled) {
1289       DEBUG(dbgs() << "LV: Not vectorizing: No #pragma vectorize enable.\n");
1290       emitRemarkWithHints();
1291       return false;
1292     }
1293 
1294     if (getIsVectorized() == 1) {
1295       DEBUG(dbgs() << "LV: Not vectorizing: Disabled/already vectorized.\n");
1296       // FIXME: Add interleave.disable metadata. This will allow
1297       // vectorize.disable to be used without disabling the pass and errors
1298       // to differentiate between disabled vectorization and a width of 1.
1299       ORE.emit([&]() {
1300         return OptimizationRemarkAnalysis(vectorizeAnalysisPassName(),
1301                                           "AllDisabled", L->getStartLoc(),
1302                                           L->getHeader())
1303                << "loop not vectorized: vectorization and interleaving are "
1304                   "explicitly disabled, or the loop has already been "
1305                   "vectorized";
1306       });
1307       return false;
1308     }
1309 
1310     return true;
1311   }
1312 
1313   /// Dumps all the hint information.
1314   void emitRemarkWithHints() const {
1315     using namespace ore;
1316 
1317     ORE.emit([&]() {
1318       if (Force.Value == LoopVectorizeHints::FK_Disabled)
1319         return OptimizationRemarkMissed(LV_NAME, "MissedExplicitlyDisabled",
1320                                         TheLoop->getStartLoc(),
1321                                         TheLoop->getHeader())
1322                << "loop not vectorized: vectorization is explicitly disabled";
1323       else {
1324         OptimizationRemarkMissed R(LV_NAME, "MissedDetails",
1325                                    TheLoop->getStartLoc(),
1326                                    TheLoop->getHeader());
1327         R << "loop not vectorized";
1328         if (Force.Value == LoopVectorizeHints::FK_Enabled) {
1329           R << " (Force=" << NV("Force", true);
1330           if (Width.Value != 0)
1331             R << ", Vector Width=" << NV("VectorWidth", Width.Value);
1332           if (Interleave.Value != 0)
1333             R << ", Interleave Count="
1334               << NV("InterleaveCount", Interleave.Value);
1335           R << ")";
1336         }
1337         return R;
1338       }
1339     });
1340   }
1341 
1342   unsigned getWidth() const { return Width.Value; }
1343   unsigned getInterleave() const { return Interleave.Value; }
1344   unsigned getIsVectorized() const { return IsVectorized.Value; }
1345   enum ForceKind getForce() const { return (ForceKind)Force.Value; }
1346 
1347   /// \brief If hints are provided that force vectorization, use the AlwaysPrint
1348   /// pass name to force the frontend to print the diagnostic.
1349   const char *vectorizeAnalysisPassName() const {
1350     if (getWidth() == 1)
1351       return LV_NAME;
1352     if (getForce() == LoopVectorizeHints::FK_Disabled)
1353       return LV_NAME;
1354     if (getForce() == LoopVectorizeHints::FK_Undefined && getWidth() == 0)
1355       return LV_NAME;
1356     return OptimizationRemarkAnalysis::AlwaysPrint;
1357   }
1358 
1359   bool allowReordering() const {
1360     // When enabling loop hints are provided we allow the vectorizer to change
1361     // the order of operations that is given by the scalar loop. This is not
1362     // enabled by default because can be unsafe or inefficient. For example,
1363     // reordering floating-point operations will change the way round-off
1364     // error accumulates in the loop.
1365     return getForce() == LoopVectorizeHints::FK_Enabled || getWidth() > 1;
1366   }
1367 
1368   bool isPotentiallyUnsafe() const {
1369     // Avoid FP vectorization if the target is unsure about proper support.
1370     // This may be related to the SIMD unit in the target not handling
1371     // IEEE 754 FP ops properly, or bad single-to-double promotions.
1372     // Otherwise, a sequence of vectorized loops, even without reduction,
1373     // could lead to different end results on the destination vectors.
1374     return getForce() != LoopVectorizeHints::FK_Enabled && PotentiallyUnsafe;
1375   }
1376 
1377   void setPotentiallyUnsafe() { PotentiallyUnsafe = true; }
1378 
1379 private:
1380   /// Find hints specified in the loop metadata and update local values.
1381   void getHintsFromMetadata() {
1382     MDNode *LoopID = TheLoop->getLoopID();
1383     if (!LoopID)
1384       return;
1385 
1386     // First operand should refer to the loop id itself.
1387     assert(LoopID->getNumOperands() > 0 && "requires at least one operand");
1388     assert(LoopID->getOperand(0) == LoopID && "invalid loop id");
1389 
1390     for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) {
1391       const MDString *S = nullptr;
1392       SmallVector<Metadata *, 4> Args;
1393 
1394       // The expected hint is either a MDString or a MDNode with the first
1395       // operand a MDString.
1396       if (const MDNode *MD = dyn_cast<MDNode>(LoopID->getOperand(i))) {
1397         if (!MD || MD->getNumOperands() == 0)
1398           continue;
1399         S = dyn_cast<MDString>(MD->getOperand(0));
1400         for (unsigned i = 1, ie = MD->getNumOperands(); i < ie; ++i)
1401           Args.push_back(MD->getOperand(i));
1402       } else {
1403         S = dyn_cast<MDString>(LoopID->getOperand(i));
1404         assert(Args.size() == 0 && "too many arguments for MDString");
1405       }
1406 
1407       if (!S)
1408         continue;
1409 
1410       // Check if the hint starts with the loop metadata prefix.
1411       StringRef Name = S->getString();
1412       if (Args.size() == 1)
1413         setHint(Name, Args[0]);
1414     }
1415   }
1416 
1417   /// Checks string hint with one operand and set value if valid.
1418   void setHint(StringRef Name, Metadata *Arg) {
1419     if (!Name.startswith(Prefix()))
1420       return;
1421     Name = Name.substr(Prefix().size(), StringRef::npos);
1422 
1423     const ConstantInt *C = mdconst::dyn_extract<ConstantInt>(Arg);
1424     if (!C)
1425       return;
1426     unsigned Val = C->getZExtValue();
1427 
1428     Hint *Hints[] = {&Width, &Interleave, &Force, &IsVectorized};
1429     for (auto H : Hints) {
1430       if (Name == H->Name) {
1431         if (H->validate(Val))
1432           H->Value = Val;
1433         else
1434           DEBUG(dbgs() << "LV: ignoring invalid hint '" << Name << "'\n");
1435         break;
1436       }
1437     }
1438   }
1439 
1440   /// Create a new hint from name / value pair.
1441   MDNode *createHintMetadata(StringRef Name, unsigned V) const {
1442     LLVMContext &Context = TheLoop->getHeader()->getContext();
1443     Metadata *MDs[] = {MDString::get(Context, Name),
1444                        ConstantAsMetadata::get(
1445                            ConstantInt::get(Type::getInt32Ty(Context), V))};
1446     return MDNode::get(Context, MDs);
1447   }
1448 
1449   /// Matches metadata with hint name.
1450   bool matchesHintMetadataName(MDNode *Node, ArrayRef<Hint> HintTypes) {
1451     MDString *Name = dyn_cast<MDString>(Node->getOperand(0));
1452     if (!Name)
1453       return false;
1454 
1455     for (auto H : HintTypes)
1456       if (Name->getString().endswith(H.Name))
1457         return true;
1458     return false;
1459   }
1460 
1461   /// Sets current hints into loop metadata, keeping other values intact.
1462   void writeHintsToMetadata(ArrayRef<Hint> HintTypes) {
1463     if (HintTypes.empty())
1464       return;
1465 
1466     // Reserve the first element to LoopID (see below).
1467     SmallVector<Metadata *, 4> MDs(1);
1468     // If the loop already has metadata, then ignore the existing operands.
1469     MDNode *LoopID = TheLoop->getLoopID();
1470     if (LoopID) {
1471       for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) {
1472         MDNode *Node = cast<MDNode>(LoopID->getOperand(i));
1473         // If node in update list, ignore old value.
1474         if (!matchesHintMetadataName(Node, HintTypes))
1475           MDs.push_back(Node);
1476       }
1477     }
1478 
1479     // Now, add the missing hints.
1480     for (auto H : HintTypes)
1481       MDs.push_back(createHintMetadata(Twine(Prefix(), H.Name).str(), H.Value));
1482 
1483     // Replace current metadata node with new one.
1484     LLVMContext &Context = TheLoop->getHeader()->getContext();
1485     MDNode *NewLoopID = MDNode::get(Context, MDs);
1486     // Set operand 0 to refer to the loop id itself.
1487     NewLoopID->replaceOperandWith(0, NewLoopID);
1488 
1489     TheLoop->setLoopID(NewLoopID);
1490   }
1491 
1492   /// The loop these hints belong to.
1493   const Loop *TheLoop;
1494 
1495   /// Interface to emit optimization remarks.
1496   OptimizationRemarkEmitter &ORE;
1497 };
1498 
1499 } // end anonymous namespace
1500 
1501 static void emitMissedWarning(Function *F, Loop *L,
1502                               const LoopVectorizeHints &LH,
1503                               OptimizationRemarkEmitter *ORE) {
1504   LH.emitRemarkWithHints();
1505 
1506   if (LH.getForce() == LoopVectorizeHints::FK_Enabled) {
1507     if (LH.getWidth() != 1)
1508       ORE->emit(DiagnosticInfoOptimizationFailure(
1509                     DEBUG_TYPE, "FailedRequestedVectorization",
1510                     L->getStartLoc(), L->getHeader())
1511                 << "loop not vectorized: "
1512                 << "failed explicitly specified loop vectorization");
1513     else if (LH.getInterleave() != 1)
1514       ORE->emit(DiagnosticInfoOptimizationFailure(
1515                     DEBUG_TYPE, "FailedRequestedInterleaving", L->getStartLoc(),
1516                     L->getHeader())
1517                 << "loop not interleaved: "
1518                 << "failed explicitly specified loop interleaving");
1519   }
1520 }
1521 
1522 namespace {
1523 
1524 /// LoopVectorizationLegality checks if it is legal to vectorize a loop, and
1525 /// to what vectorization factor.
1526 /// This class does not look at the profitability of vectorization, only the
1527 /// legality. This class has two main kinds of checks:
1528 /// * Memory checks - The code in canVectorizeMemory checks if vectorization
1529 ///   will change the order of memory accesses in a way that will change the
1530 ///   correctness of the program.
1531 /// * Scalars checks - The code in canVectorizeInstrs and canVectorizeMemory
1532 /// checks for a number of different conditions, such as the availability of a
1533 /// single induction variable, that all types are supported and vectorize-able,
1534 /// etc. This code reflects the capabilities of InnerLoopVectorizer.
1535 /// This class is also used by InnerLoopVectorizer for identifying
1536 /// induction variable and the different reduction variables.
1537 class LoopVectorizationLegality {
1538 public:
1539   LoopVectorizationLegality(
1540       Loop *L, PredicatedScalarEvolution &PSE, DominatorTree *DT,
1541       TargetLibraryInfo *TLI, AliasAnalysis *AA, Function *F,
1542       const TargetTransformInfo *TTI,
1543       std::function<const LoopAccessInfo &(Loop &)> *GetLAA, LoopInfo *LI,
1544       OptimizationRemarkEmitter *ORE, LoopVectorizationRequirements *R,
1545       LoopVectorizeHints *H)
1546       : TheLoop(L), PSE(PSE), TLI(TLI), TTI(TTI), DT(DT), GetLAA(GetLAA),
1547         ORE(ORE), InterleaveInfo(PSE, L, DT, LI), Requirements(R), Hints(H) {}
1548 
1549   /// ReductionList contains the reduction descriptors for all
1550   /// of the reductions that were found in the loop.
1551   using ReductionList = DenseMap<PHINode *, RecurrenceDescriptor>;
1552 
1553   /// InductionList saves induction variables and maps them to the
1554   /// induction descriptor.
1555   using InductionList = MapVector<PHINode *, InductionDescriptor>;
1556 
1557   /// RecurrenceSet contains the phi nodes that are recurrences other than
1558   /// inductions and reductions.
1559   using RecurrenceSet = SmallPtrSet<const PHINode *, 8>;
1560 
1561   /// Returns true if it is legal to vectorize this loop.
1562   /// This does not mean that it is profitable to vectorize this
1563   /// loop, only that it is legal to do so.
1564   bool canVectorize();
1565 
1566   /// Returns the primary induction variable.
1567   PHINode *getPrimaryInduction() { return PrimaryInduction; }
1568 
1569   /// Returns the reduction variables found in the loop.
1570   ReductionList *getReductionVars() { return &Reductions; }
1571 
1572   /// Returns the induction variables found in the loop.
1573   InductionList *getInductionVars() { return &Inductions; }
1574 
1575   /// Return the first-order recurrences found in the loop.
1576   RecurrenceSet *getFirstOrderRecurrences() { return &FirstOrderRecurrences; }
1577 
1578   /// Return the set of instructions to sink to handle first-order recurrences.
1579   DenseMap<Instruction *, Instruction *> &getSinkAfter() { return SinkAfter; }
1580 
1581   /// Returns the widest induction type.
1582   Type *getWidestInductionType() { return WidestIndTy; }
1583 
1584   /// Returns True if V is a Phi node of an induction variable in this loop.
1585   bool isInductionPhi(const Value *V);
1586 
1587   /// Returns True if V is a cast that is part of an induction def-use chain,
1588   /// and had been proven to be redundant under a runtime guard (in other
1589   /// words, the cast has the same SCEV expression as the induction phi).
1590   bool isCastedInductionVariable(const Value *V);
1591 
1592   /// Returns True if V can be considered as an induction variable in this
1593   /// loop. V can be the induction phi, or some redundant cast in the def-use
1594   /// chain of the inducion phi.
1595   bool isInductionVariable(const Value *V);
1596 
1597   /// Returns True if PN is a reduction variable in this loop.
1598   bool isReductionVariable(PHINode *PN) { return Reductions.count(PN); }
1599 
1600   /// Returns True if Phi is a first-order recurrence in this loop.
1601   bool isFirstOrderRecurrence(const PHINode *Phi);
1602 
1603   /// Return true if the block BB needs to be predicated in order for the loop
1604   /// to be vectorized.
1605   bool blockNeedsPredication(BasicBlock *BB);
1606 
1607   /// Check if this pointer is consecutive when vectorizing. This happens
1608   /// when the last index of the GEP is the induction variable, or that the
1609   /// pointer itself is an induction variable.
1610   /// This check allows us to vectorize A[idx] into a wide load/store.
1611   /// Returns:
1612   /// 0 - Stride is unknown or non-consecutive.
1613   /// 1 - Address is consecutive.
1614   /// -1 - Address is consecutive, and decreasing.
1615   /// NOTE: This method must only be used before modifying the original scalar
1616   /// loop. Do not use after invoking 'createVectorizedLoopSkeleton' (PR34965).
1617   int isConsecutivePtr(Value *Ptr);
1618 
1619   /// Returns true if the value V is uniform within the loop.
1620   bool isUniform(Value *V);
1621 
1622   /// Returns the information that we collected about runtime memory check.
1623   const RuntimePointerChecking *getRuntimePointerChecking() const {
1624     return LAI->getRuntimePointerChecking();
1625   }
1626 
1627   const LoopAccessInfo *getLAI() const { return LAI; }
1628 
1629   /// \brief Check if \p Instr belongs to any interleaved access group.
1630   bool isAccessInterleaved(Instruction *Instr) {
1631     return InterleaveInfo.isInterleaved(Instr);
1632   }
1633 
1634   /// \brief Get the interleaved access group that \p Instr belongs to.
1635   const InterleaveGroup *getInterleavedAccessGroup(Instruction *Instr) {
1636     return InterleaveInfo.getInterleaveGroup(Instr);
1637   }
1638 
1639   /// \brief Returns true if an interleaved group requires a scalar iteration
1640   /// to handle accesses with gaps.
1641   bool requiresScalarEpilogue() const {
1642     return InterleaveInfo.requiresScalarEpilogue();
1643   }
1644 
1645   unsigned getMaxSafeDepDistBytes() { return LAI->getMaxSafeDepDistBytes(); }
1646 
1647   uint64_t getMaxSafeRegisterWidth() const {
1648 	  return LAI->getDepChecker().getMaxSafeRegisterWidth();
1649   }
1650 
1651   bool hasStride(Value *V) { return LAI->hasStride(V); }
1652 
1653   /// Returns true if the target machine supports masked store operation
1654   /// for the given \p DataType and kind of access to \p Ptr.
1655   bool isLegalMaskedStore(Type *DataType, Value *Ptr) {
1656     return isConsecutivePtr(Ptr) && TTI->isLegalMaskedStore(DataType);
1657   }
1658 
1659   /// Returns true if the target machine supports masked load operation
1660   /// for the given \p DataType and kind of access to \p Ptr.
1661   bool isLegalMaskedLoad(Type *DataType, Value *Ptr) {
1662     return isConsecutivePtr(Ptr) && TTI->isLegalMaskedLoad(DataType);
1663   }
1664 
1665   /// Returns true if the target machine supports masked scatter operation
1666   /// for the given \p DataType.
1667   bool isLegalMaskedScatter(Type *DataType) {
1668     return TTI->isLegalMaskedScatter(DataType);
1669   }
1670 
1671   /// Returns true if the target machine supports masked gather operation
1672   /// for the given \p DataType.
1673   bool isLegalMaskedGather(Type *DataType) {
1674     return TTI->isLegalMaskedGather(DataType);
1675   }
1676 
1677   /// Returns true if the target machine can represent \p V as a masked gather
1678   /// or scatter operation.
1679   bool isLegalGatherOrScatter(Value *V) {
1680     auto *LI = dyn_cast<LoadInst>(V);
1681     auto *SI = dyn_cast<StoreInst>(V);
1682     if (!LI && !SI)
1683       return false;
1684     auto *Ptr = getPointerOperand(V);
1685     auto *Ty = cast<PointerType>(Ptr->getType())->getElementType();
1686     return (LI && isLegalMaskedGather(Ty)) || (SI && isLegalMaskedScatter(Ty));
1687   }
1688 
1689   /// Returns true if vector representation of the instruction \p I
1690   /// requires mask.
1691   bool isMaskRequired(const Instruction *I) { return (MaskedOp.count(I) != 0); }
1692 
1693   unsigned getNumStores() const { return LAI->getNumStores(); }
1694   unsigned getNumLoads() const { return LAI->getNumLoads(); }
1695   unsigned getNumPredStores() const { return NumPredStores; }
1696 
1697   /// Returns true if \p I is an instruction that will be scalarized with
1698   /// predication. Such instructions include conditional stores and
1699   /// instructions that may divide by zero.
1700   bool isScalarWithPredication(Instruction *I);
1701 
1702   /// Returns true if \p I is a memory instruction with consecutive memory
1703   /// access that can be widened.
1704   bool memoryInstructionCanBeWidened(Instruction *I, unsigned VF = 1);
1705 
1706   // Returns true if the NoNaN attribute is set on the function.
1707   bool hasFunNoNaNAttr() const { return HasFunNoNaNAttr; }
1708 
1709 private:
1710   /// Check if a single basic block loop is vectorizable.
1711   /// At this point we know that this is a loop with a constant trip count
1712   /// and we only need to check individual instructions.
1713   bool canVectorizeInstrs();
1714 
1715   /// When we vectorize loops we may change the order in which
1716   /// we read and write from memory. This method checks if it is
1717   /// legal to vectorize the code, considering only memory constrains.
1718   /// Returns true if the loop is vectorizable
1719   bool canVectorizeMemory();
1720 
1721   /// Return true if we can vectorize this loop using the IF-conversion
1722   /// transformation.
1723   bool canVectorizeWithIfConvert();
1724 
1725   /// Return true if all of the instructions in the block can be speculatively
1726   /// executed. \p SafePtrs is a list of addresses that are known to be legal
1727   /// and we know that we can read from them without segfault.
1728   bool blockCanBePredicated(BasicBlock *BB, SmallPtrSetImpl<Value *> &SafePtrs);
1729 
1730   /// Updates the vectorization state by adding \p Phi to the inductions list.
1731   /// This can set \p Phi as the main induction of the loop if \p Phi is a
1732   /// better choice for the main induction than the existing one.
1733   void addInductionPhi(PHINode *Phi, const InductionDescriptor &ID,
1734                        SmallPtrSetImpl<Value *> &AllowedExit);
1735 
1736   /// Create an analysis remark that explains why vectorization failed
1737   ///
1738   /// \p RemarkName is the identifier for the remark.  If \p I is passed it is
1739   /// an instruction that prevents vectorization.  Otherwise the loop is used
1740   /// for the location of the remark.  \return the remark object that can be
1741   /// streamed to.
1742   OptimizationRemarkAnalysis
1743   createMissedAnalysis(StringRef RemarkName, Instruction *I = nullptr) const {
1744     return ::createMissedAnalysis(Hints->vectorizeAnalysisPassName(),
1745                                   RemarkName, TheLoop, I);
1746   }
1747 
1748   /// \brief If an access has a symbolic strides, this maps the pointer value to
1749   /// the stride symbol.
1750   const ValueToValueMap *getSymbolicStrides() {
1751     // FIXME: Currently, the set of symbolic strides is sometimes queried before
1752     // it's collected.  This happens from canVectorizeWithIfConvert, when the
1753     // pointer is checked to reference consecutive elements suitable for a
1754     // masked access.
1755     return LAI ? &LAI->getSymbolicStrides() : nullptr;
1756   }
1757 
1758   unsigned NumPredStores = 0;
1759 
1760   /// The loop that we evaluate.
1761   Loop *TheLoop;
1762 
1763   /// A wrapper around ScalarEvolution used to add runtime SCEV checks.
1764   /// Applies dynamic knowledge to simplify SCEV expressions in the context
1765   /// of existing SCEV assumptions. The analysis will also add a minimal set
1766   /// of new predicates if this is required to enable vectorization and
1767   /// unrolling.
1768   PredicatedScalarEvolution &PSE;
1769 
1770   /// Target Library Info.
1771   TargetLibraryInfo *TLI;
1772 
1773   /// Target Transform Info
1774   const TargetTransformInfo *TTI;
1775 
1776   /// Dominator Tree.
1777   DominatorTree *DT;
1778 
1779   // LoopAccess analysis.
1780   std::function<const LoopAccessInfo &(Loop &)> *GetLAA;
1781 
1782   // And the loop-accesses info corresponding to this loop.  This pointer is
1783   // null until canVectorizeMemory sets it up.
1784   const LoopAccessInfo *LAI = nullptr;
1785 
1786   /// Interface to emit optimization remarks.
1787   OptimizationRemarkEmitter *ORE;
1788 
1789   /// The interleave access information contains groups of interleaved accesses
1790   /// with the same stride and close to each other.
1791   InterleavedAccessInfo InterleaveInfo;
1792 
1793   //  ---  vectorization state --- //
1794 
1795   /// Holds the primary induction variable. This is the counter of the
1796   /// loop.
1797   PHINode *PrimaryInduction = nullptr;
1798 
1799   /// Holds the reduction variables.
1800   ReductionList Reductions;
1801 
1802   /// Holds all of the induction variables that we found in the loop.
1803   /// Notice that inductions don't need to start at zero and that induction
1804   /// variables can be pointers.
1805   InductionList Inductions;
1806 
1807   /// Holds all the casts that participate in the update chain of the induction
1808   /// variables, and that have been proven to be redundant (possibly under a
1809   /// runtime guard). These casts can be ignored when creating the vectorized
1810   /// loop body.
1811   SmallPtrSet<Instruction *, 4> InductionCastsToIgnore;
1812 
1813   /// Holds the phi nodes that are first-order recurrences.
1814   RecurrenceSet FirstOrderRecurrences;
1815 
1816   /// Holds instructions that need to sink past other instructions to handle
1817   /// first-order recurrences.
1818   DenseMap<Instruction *, Instruction *> SinkAfter;
1819 
1820   /// Holds the widest induction type encountered.
1821   Type *WidestIndTy = nullptr;
1822 
1823   /// Allowed outside users. This holds the induction and reduction
1824   /// vars which can be accessed from outside the loop.
1825   SmallPtrSet<Value *, 4> AllowedExit;
1826 
1827   /// Can we assume the absence of NaNs.
1828   bool HasFunNoNaNAttr = false;
1829 
1830   /// Vectorization requirements that will go through late-evaluation.
1831   LoopVectorizationRequirements *Requirements;
1832 
1833   /// Used to emit an analysis of any legality issues.
1834   LoopVectorizeHints *Hints;
1835 
1836   /// While vectorizing these instructions we have to generate a
1837   /// call to the appropriate masked intrinsic
1838   SmallPtrSet<const Instruction *, 8> MaskedOp;
1839 };
1840 
1841 /// LoopVectorizationCostModel - estimates the expected speedups due to
1842 /// vectorization.
1843 /// In many cases vectorization is not profitable. This can happen because of
1844 /// a number of reasons. In this class we mainly attempt to predict the
1845 /// expected speedup/slowdowns due to the supported instruction set. We use the
1846 /// TargetTransformInfo to query the different backends for the cost of
1847 /// different operations.
1848 class LoopVectorizationCostModel {
1849 public:
1850   LoopVectorizationCostModel(Loop *L, PredicatedScalarEvolution &PSE,
1851                              LoopInfo *LI, LoopVectorizationLegality *Legal,
1852                              const TargetTransformInfo &TTI,
1853                              const TargetLibraryInfo *TLI, DemandedBits *DB,
1854                              AssumptionCache *AC,
1855                              OptimizationRemarkEmitter *ORE, const Function *F,
1856                              const LoopVectorizeHints *Hints)
1857       : TheLoop(L), PSE(PSE), LI(LI), Legal(Legal), TTI(TTI), TLI(TLI), DB(DB),
1858         AC(AC), ORE(ORE), TheFunction(F), Hints(Hints) {}
1859 
1860   /// \return An upper bound for the vectorization factor, or None if
1861   /// vectorization should be avoided up front.
1862   Optional<unsigned> computeMaxVF(bool OptForSize);
1863 
1864   /// Information about vectorization costs
1865   struct VectorizationFactor {
1866     // Vector width with best cost
1867     unsigned Width;
1868 
1869     // Cost of the loop with that width
1870     unsigned Cost;
1871   };
1872 
1873   /// \return The most profitable vectorization factor and the cost of that VF.
1874   /// This method checks every power of two up to MaxVF. If UserVF is not ZERO
1875   /// then this vectorization factor will be selected if vectorization is
1876   /// possible.
1877   VectorizationFactor selectVectorizationFactor(unsigned MaxVF);
1878 
1879   /// Setup cost-based decisions for user vectorization factor.
1880   void selectUserVectorizationFactor(unsigned UserVF) {
1881     collectUniformsAndScalars(UserVF);
1882     collectInstsToScalarize(UserVF);
1883   }
1884 
1885   /// \return The size (in bits) of the smallest and widest types in the code
1886   /// that needs to be vectorized. We ignore values that remain scalar such as
1887   /// 64 bit loop indices.
1888   std::pair<unsigned, unsigned> getSmallestAndWidestTypes();
1889 
1890   /// \return The desired interleave count.
1891   /// If interleave count has been specified by metadata it will be returned.
1892   /// Otherwise, the interleave count is computed and returned. VF and LoopCost
1893   /// are the selected vectorization factor and the cost of the selected VF.
1894   unsigned selectInterleaveCount(bool OptForSize, unsigned VF,
1895                                  unsigned LoopCost);
1896 
1897   /// Memory access instruction may be vectorized in more than one way.
1898   /// Form of instruction after vectorization depends on cost.
1899   /// This function takes cost-based decisions for Load/Store instructions
1900   /// and collects them in a map. This decisions map is used for building
1901   /// the lists of loop-uniform and loop-scalar instructions.
1902   /// The calculated cost is saved with widening decision in order to
1903   /// avoid redundant calculations.
1904   void setCostBasedWideningDecision(unsigned VF);
1905 
1906   /// \brief A struct that represents some properties of the register usage
1907   /// of a loop.
1908   struct RegisterUsage {
1909     /// Holds the number of loop invariant values that are used in the loop.
1910     unsigned LoopInvariantRegs;
1911 
1912     /// Holds the maximum number of concurrent live intervals in the loop.
1913     unsigned MaxLocalUsers;
1914 
1915     /// Holds the number of instructions in the loop.
1916     unsigned NumInstructions;
1917   };
1918 
1919   /// \return Returns information about the register usages of the loop for the
1920   /// given vectorization factors.
1921   SmallVector<RegisterUsage, 8> calculateRegisterUsage(ArrayRef<unsigned> VFs);
1922 
1923   /// Collect values we want to ignore in the cost model.
1924   void collectValuesToIgnore();
1925 
1926   /// \returns The smallest bitwidth each instruction can be represented with.
1927   /// The vector equivalents of these instructions should be truncated to this
1928   /// type.
1929   const MapVector<Instruction *, uint64_t> &getMinimalBitwidths() const {
1930     return MinBWs;
1931   }
1932 
1933   /// \returns True if it is more profitable to scalarize instruction \p I for
1934   /// vectorization factor \p VF.
1935   bool isProfitableToScalarize(Instruction *I, unsigned VF) const {
1936     assert(VF > 1 && "Profitable to scalarize relevant only for VF > 1.");
1937     auto Scalars = InstsToScalarize.find(VF);
1938     assert(Scalars != InstsToScalarize.end() &&
1939            "VF not yet analyzed for scalarization profitability");
1940     return Scalars->second.count(I);
1941   }
1942 
1943   /// Returns true if \p I is known to be uniform after vectorization.
1944   bool isUniformAfterVectorization(Instruction *I, unsigned VF) const {
1945     if (VF == 1)
1946       return true;
1947     assert(Uniforms.count(VF) && "VF not yet analyzed for uniformity");
1948     auto UniformsPerVF = Uniforms.find(VF);
1949     return UniformsPerVF->second.count(I);
1950   }
1951 
1952   /// Returns true if \p I is known to be scalar after vectorization.
1953   bool isScalarAfterVectorization(Instruction *I, unsigned VF) const {
1954     if (VF == 1)
1955       return true;
1956     assert(Scalars.count(VF) && "Scalar values are not calculated for VF");
1957     auto ScalarsPerVF = Scalars.find(VF);
1958     return ScalarsPerVF->second.count(I);
1959   }
1960 
1961   /// \returns True if instruction \p I can be truncated to a smaller bitwidth
1962   /// for vectorization factor \p VF.
1963   bool canTruncateToMinimalBitwidth(Instruction *I, unsigned VF) const {
1964     return VF > 1 && MinBWs.count(I) && !isProfitableToScalarize(I, VF) &&
1965            !isScalarAfterVectorization(I, VF);
1966   }
1967 
1968   /// Decision that was taken during cost calculation for memory instruction.
1969   enum InstWidening {
1970     CM_Unknown,
1971     CM_Widen,         // For consecutive accesses with stride +1.
1972     CM_Widen_Reverse, // For consecutive accesses with stride -1.
1973     CM_Interleave,
1974     CM_GatherScatter,
1975     CM_Scalarize
1976   };
1977 
1978   /// Save vectorization decision \p W and \p Cost taken by the cost model for
1979   /// instruction \p I and vector width \p VF.
1980   void setWideningDecision(Instruction *I, unsigned VF, InstWidening W,
1981                            unsigned Cost) {
1982     assert(VF >= 2 && "Expected VF >=2");
1983     WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost);
1984   }
1985 
1986   /// Save vectorization decision \p W and \p Cost taken by the cost model for
1987   /// interleaving group \p Grp and vector width \p VF.
1988   void setWideningDecision(const InterleaveGroup *Grp, unsigned VF,
1989                            InstWidening W, unsigned Cost) {
1990     assert(VF >= 2 && "Expected VF >=2");
1991     /// Broadcast this decicion to all instructions inside the group.
1992     /// But the cost will be assigned to one instruction only.
1993     for (unsigned i = 0; i < Grp->getFactor(); ++i) {
1994       if (auto *I = Grp->getMember(i)) {
1995         if (Grp->getInsertPos() == I)
1996           WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost);
1997         else
1998           WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, 0);
1999       }
2000     }
2001   }
2002 
2003   /// Return the cost model decision for the given instruction \p I and vector
2004   /// width \p VF. Return CM_Unknown if this instruction did not pass
2005   /// through the cost modeling.
2006   InstWidening getWideningDecision(Instruction *I, unsigned VF) {
2007     assert(VF >= 2 && "Expected VF >=2");
2008     std::pair<Instruction *, unsigned> InstOnVF = std::make_pair(I, VF);
2009     auto Itr = WideningDecisions.find(InstOnVF);
2010     if (Itr == WideningDecisions.end())
2011       return CM_Unknown;
2012     return Itr->second.first;
2013   }
2014 
2015   /// Return the vectorization cost for the given instruction \p I and vector
2016   /// width \p VF.
2017   unsigned getWideningCost(Instruction *I, unsigned VF) {
2018     assert(VF >= 2 && "Expected VF >=2");
2019     std::pair<Instruction *, unsigned> InstOnVF = std::make_pair(I, VF);
2020     assert(WideningDecisions.count(InstOnVF) && "The cost is not calculated");
2021     return WideningDecisions[InstOnVF].second;
2022   }
2023 
2024   /// Return True if instruction \p I is an optimizable truncate whose operand
2025   /// is an induction variable. Such a truncate will be removed by adding a new
2026   /// induction variable with the destination type.
2027   bool isOptimizableIVTruncate(Instruction *I, unsigned VF) {
2028     // If the instruction is not a truncate, return false.
2029     auto *Trunc = dyn_cast<TruncInst>(I);
2030     if (!Trunc)
2031       return false;
2032 
2033     // Get the source and destination types of the truncate.
2034     Type *SrcTy = ToVectorTy(cast<CastInst>(I)->getSrcTy(), VF);
2035     Type *DestTy = ToVectorTy(cast<CastInst>(I)->getDestTy(), VF);
2036 
2037     // If the truncate is free for the given types, return false. Replacing a
2038     // free truncate with an induction variable would add an induction variable
2039     // update instruction to each iteration of the loop. We exclude from this
2040     // check the primary induction variable since it will need an update
2041     // instruction regardless.
2042     Value *Op = Trunc->getOperand(0);
2043     if (Op != Legal->getPrimaryInduction() && TTI.isTruncateFree(SrcTy, DestTy))
2044       return false;
2045 
2046     // If the truncated value is not an induction variable, return false.
2047     return Legal->isInductionPhi(Op);
2048   }
2049 
2050   /// Collects the instructions to scalarize for each predicated instruction in
2051   /// the loop.
2052   void collectInstsToScalarize(unsigned VF);
2053 
2054   /// Collect Uniform and Scalar values for the given \p VF.
2055   /// The sets depend on CM decision for Load/Store instructions
2056   /// that may be vectorized as interleave, gather-scatter or scalarized.
2057   void collectUniformsAndScalars(unsigned VF) {
2058     // Do the analysis once.
2059     if (VF == 1 || Uniforms.count(VF))
2060       return;
2061     setCostBasedWideningDecision(VF);
2062     collectLoopUniforms(VF);
2063     collectLoopScalars(VF);
2064   }
2065 
2066 private:
2067   /// \return An upper bound for the vectorization factor, larger than zero.
2068   /// One is returned if vectorization should best be avoided due to cost.
2069   unsigned computeFeasibleMaxVF(bool OptForSize, unsigned ConstTripCount);
2070 
2071   /// The vectorization cost is a combination of the cost itself and a boolean
2072   /// indicating whether any of the contributing operations will actually
2073   /// operate on
2074   /// vector values after type legalization in the backend. If this latter value
2075   /// is
2076   /// false, then all operations will be scalarized (i.e. no vectorization has
2077   /// actually taken place).
2078   using VectorizationCostTy = std::pair<unsigned, bool>;
2079 
2080   /// Returns the expected execution cost. The unit of the cost does
2081   /// not matter because we use the 'cost' units to compare different
2082   /// vector widths. The cost that is returned is *not* normalized by
2083   /// the factor width.
2084   VectorizationCostTy expectedCost(unsigned VF);
2085 
2086   /// Returns the execution time cost of an instruction for a given vector
2087   /// width. Vector width of one means scalar.
2088   VectorizationCostTy getInstructionCost(Instruction *I, unsigned VF);
2089 
2090   /// The cost-computation logic from getInstructionCost which provides
2091   /// the vector type as an output parameter.
2092   unsigned getInstructionCost(Instruction *I, unsigned VF, Type *&VectorTy);
2093 
2094   /// Calculate vectorization cost of memory instruction \p I.
2095   unsigned getMemoryInstructionCost(Instruction *I, unsigned VF);
2096 
2097   /// The cost computation for scalarized memory instruction.
2098   unsigned getMemInstScalarizationCost(Instruction *I, unsigned VF);
2099 
2100   /// The cost computation for interleaving group of memory instructions.
2101   unsigned getInterleaveGroupCost(Instruction *I, unsigned VF);
2102 
2103   /// The cost computation for Gather/Scatter instruction.
2104   unsigned getGatherScatterCost(Instruction *I, unsigned VF);
2105 
2106   /// The cost computation for widening instruction \p I with consecutive
2107   /// memory access.
2108   unsigned getConsecutiveMemOpCost(Instruction *I, unsigned VF);
2109 
2110   /// The cost calculation for Load instruction \p I with uniform pointer -
2111   /// scalar load + broadcast.
2112   unsigned getUniformMemOpCost(Instruction *I, unsigned VF);
2113 
2114   /// Returns whether the instruction is a load or store and will be a emitted
2115   /// as a vector operation.
2116   bool isConsecutiveLoadOrStore(Instruction *I);
2117 
2118   /// Create an analysis remark that explains why vectorization failed
2119   ///
2120   /// \p RemarkName is the identifier for the remark.  \return the remark object
2121   /// that can be streamed to.
2122   OptimizationRemarkAnalysis createMissedAnalysis(StringRef RemarkName) {
2123     return ::createMissedAnalysis(Hints->vectorizeAnalysisPassName(),
2124                                   RemarkName, TheLoop);
2125   }
2126 
2127   /// Map of scalar integer values to the smallest bitwidth they can be legally
2128   /// represented as. The vector equivalents of these values should be truncated
2129   /// to this type.
2130   MapVector<Instruction *, uint64_t> MinBWs;
2131 
2132   /// A type representing the costs for instructions if they were to be
2133   /// scalarized rather than vectorized. The entries are Instruction-Cost
2134   /// pairs.
2135   using ScalarCostsTy = DenseMap<Instruction *, unsigned>;
2136 
2137   /// A set containing all BasicBlocks that are known to present after
2138   /// vectorization as a predicated block.
2139   SmallPtrSet<BasicBlock *, 4> PredicatedBBsAfterVectorization;
2140 
2141   /// A map holding scalar costs for different vectorization factors. The
2142   /// presence of a cost for an instruction in the mapping indicates that the
2143   /// instruction will be scalarized when vectorizing with the associated
2144   /// vectorization factor. The entries are VF-ScalarCostTy pairs.
2145   DenseMap<unsigned, ScalarCostsTy> InstsToScalarize;
2146 
2147   /// Holds the instructions known to be uniform after vectorization.
2148   /// The data is collected per VF.
2149   DenseMap<unsigned, SmallPtrSet<Instruction *, 4>> Uniforms;
2150 
2151   /// Holds the instructions known to be scalar after vectorization.
2152   /// The data is collected per VF.
2153   DenseMap<unsigned, SmallPtrSet<Instruction *, 4>> Scalars;
2154 
2155   /// Holds the instructions (address computations) that are forced to be
2156   /// scalarized.
2157   DenseMap<unsigned, SmallPtrSet<Instruction *, 4>> ForcedScalars;
2158 
2159   /// Returns the expected difference in cost from scalarizing the expression
2160   /// feeding a predicated instruction \p PredInst. The instructions to
2161   /// scalarize and their scalar costs are collected in \p ScalarCosts. A
2162   /// non-negative return value implies the expression will be scalarized.
2163   /// Currently, only single-use chains are considered for scalarization.
2164   int computePredInstDiscount(Instruction *PredInst, ScalarCostsTy &ScalarCosts,
2165                               unsigned VF);
2166 
2167   /// Collect the instructions that are uniform after vectorization. An
2168   /// instruction is uniform if we represent it with a single scalar value in
2169   /// the vectorized loop corresponding to each vector iteration. Examples of
2170   /// uniform instructions include pointer operands of consecutive or
2171   /// interleaved memory accesses. Note that although uniformity implies an
2172   /// instruction will be scalar, the reverse is not true. In general, a
2173   /// scalarized instruction will be represented by VF scalar values in the
2174   /// vectorized loop, each corresponding to an iteration of the original
2175   /// scalar loop.
2176   void collectLoopUniforms(unsigned VF);
2177 
2178   /// Collect the instructions that are scalar after vectorization. An
2179   /// instruction is scalar if it is known to be uniform or will be scalarized
2180   /// during vectorization. Non-uniform scalarized instructions will be
2181   /// represented by VF values in the vectorized loop, each corresponding to an
2182   /// iteration of the original scalar loop.
2183   void collectLoopScalars(unsigned VF);
2184 
2185   /// Keeps cost model vectorization decision and cost for instructions.
2186   /// Right now it is used for memory instructions only.
2187   using DecisionList = DenseMap<std::pair<Instruction *, unsigned>,
2188                                 std::pair<InstWidening, unsigned>>;
2189 
2190   DecisionList WideningDecisions;
2191 
2192 public:
2193   /// The loop that we evaluate.
2194   Loop *TheLoop;
2195 
2196   /// Predicated scalar evolution analysis.
2197   PredicatedScalarEvolution &PSE;
2198 
2199   /// Loop Info analysis.
2200   LoopInfo *LI;
2201 
2202   /// Vectorization legality.
2203   LoopVectorizationLegality *Legal;
2204 
2205   /// Vector target information.
2206   const TargetTransformInfo &TTI;
2207 
2208   /// Target Library Info.
2209   const TargetLibraryInfo *TLI;
2210 
2211   /// Demanded bits analysis.
2212   DemandedBits *DB;
2213 
2214   /// Assumption cache.
2215   AssumptionCache *AC;
2216 
2217   /// Interface to emit optimization remarks.
2218   OptimizationRemarkEmitter *ORE;
2219 
2220   const Function *TheFunction;
2221 
2222   /// Loop Vectorize Hint.
2223   const LoopVectorizeHints *Hints;
2224 
2225   /// Values to ignore in the cost model.
2226   SmallPtrSet<const Value *, 16> ValuesToIgnore;
2227 
2228   /// Values to ignore in the cost model when VF > 1.
2229   SmallPtrSet<const Value *, 16> VecValuesToIgnore;
2230 };
2231 
2232 } // end anonymous namespace
2233 
2234 namespace llvm {
2235 
2236 /// InnerLoopVectorizer vectorizes loops which contain only one basic
2237 /// LoopVectorizationPlanner - drives the vectorization process after having
2238 /// passed Legality checks.
2239 /// The planner builds and optimizes the Vectorization Plans which record the
2240 /// decisions how to vectorize the given loop. In particular, represent the
2241 /// control-flow of the vectorized version, the replication of instructions that
2242 /// are to be scalarized, and interleave access groups.
2243 class LoopVectorizationPlanner {
2244   /// The loop that we evaluate.
2245   Loop *OrigLoop;
2246 
2247   /// Loop Info analysis.
2248   LoopInfo *LI;
2249 
2250   /// Target Library Info.
2251   const TargetLibraryInfo *TLI;
2252 
2253   /// Target Transform Info.
2254   const TargetTransformInfo *TTI;
2255 
2256   /// The legality analysis.
2257   LoopVectorizationLegality *Legal;
2258 
2259   /// The profitablity analysis.
2260   LoopVectorizationCostModel &CM;
2261 
2262   using VPlanPtr = std::unique_ptr<VPlan>;
2263 
2264   SmallVector<VPlanPtr, 4> VPlans;
2265 
2266   /// This class is used to enable the VPlan to invoke a method of ILV. This is
2267   /// needed until the method is refactored out of ILV and becomes reusable.
2268   struct VPCallbackILV : public VPCallback {
2269     InnerLoopVectorizer &ILV;
2270 
2271     VPCallbackILV(InnerLoopVectorizer &ILV) : ILV(ILV) {}
2272 
2273     Value *getOrCreateVectorValues(Value *V, unsigned Part) override {
2274       return ILV.getOrCreateVectorValue(V, Part);
2275     }
2276   };
2277 
2278   /// A builder used to construct the current plan.
2279   VPBuilder Builder;
2280 
2281   /// When we if-convert we need to create edge masks. We have to cache values
2282   /// so that we don't end up with exponential recursion/IR. Note that
2283   /// if-conversion currently takes place during VPlan-construction, so these
2284   /// caches are only used at that stage.
2285   using EdgeMaskCacheTy =
2286       DenseMap<std::pair<BasicBlock *, BasicBlock *>, VPValue *>;
2287   using BlockMaskCacheTy = DenseMap<BasicBlock *, VPValue *>;
2288   EdgeMaskCacheTy EdgeMaskCache;
2289   BlockMaskCacheTy BlockMaskCache;
2290 
2291   unsigned BestVF = 0;
2292   unsigned BestUF = 0;
2293 
2294 public:
2295   LoopVectorizationPlanner(Loop *L, LoopInfo *LI, const TargetLibraryInfo *TLI,
2296                            const TargetTransformInfo *TTI,
2297                            LoopVectorizationLegality *Legal,
2298                            LoopVectorizationCostModel &CM)
2299       : OrigLoop(L), LI(LI), TLI(TLI), TTI(TTI), Legal(Legal), CM(CM) {}
2300 
2301   /// Plan how to best vectorize, return the best VF and its cost.
2302   LoopVectorizationCostModel::VectorizationFactor plan(bool OptForSize,
2303                                                        unsigned UserVF);
2304 
2305   /// Finalize the best decision and dispose of all other VPlans.
2306   void setBestPlan(unsigned VF, unsigned UF);
2307 
2308   /// Generate the IR code for the body of the vectorized loop according to the
2309   /// best selected VPlan.
2310   void executePlan(InnerLoopVectorizer &LB, DominatorTree *DT);
2311 
2312   void printPlans(raw_ostream &O) {
2313     for (const auto &Plan : VPlans)
2314       O << *Plan;
2315   }
2316 
2317 protected:
2318   /// Collect the instructions from the original loop that would be trivially
2319   /// dead in the vectorized loop if generated.
2320   void collectTriviallyDeadInstructions(
2321       SmallPtrSetImpl<Instruction *> &DeadInstructions);
2322 
2323   /// A range of powers-of-2 vectorization factors with fixed start and
2324   /// adjustable end. The range includes start and excludes end, e.g.,:
2325   /// [1, 9) = {1, 2, 4, 8}
2326   struct VFRange {
2327     // A power of 2.
2328     const unsigned Start;
2329 
2330     // Need not be a power of 2. If End <= Start range is empty.
2331     unsigned End;
2332   };
2333 
2334   /// Test a \p Predicate on a \p Range of VF's. Return the value of applying
2335   /// \p Predicate on Range.Start, possibly decreasing Range.End such that the
2336   /// returned value holds for the entire \p Range.
2337   bool getDecisionAndClampRange(const std::function<bool(unsigned)> &Predicate,
2338                                 VFRange &Range);
2339 
2340   /// Build VPlans for power-of-2 VF's between \p MinVF and \p MaxVF inclusive,
2341   /// according to the information gathered by Legal when it checked if it is
2342   /// legal to vectorize the loop.
2343   void buildVPlans(unsigned MinVF, unsigned MaxVF);
2344 
2345 private:
2346   /// A helper function that computes the predicate of the block BB, assuming
2347   /// that the header block of the loop is set to True. It returns the *entry*
2348   /// mask for the block BB.
2349   VPValue *createBlockInMask(BasicBlock *BB, VPlanPtr &Plan);
2350 
2351   /// A helper function that computes the predicate of the edge between SRC
2352   /// and DST.
2353   VPValue *createEdgeMask(BasicBlock *Src, BasicBlock *Dst, VPlanPtr &Plan);
2354 
2355   /// Check if \I belongs to an Interleave Group within the given VF \p Range,
2356   /// \return true in the first returned value if so and false otherwise.
2357   /// Build a new VPInterleaveGroup Recipe if \I is the primary member of an IG
2358   /// for \p Range.Start, and provide it as the second returned value.
2359   /// Note that if \I is an adjunct member of an IG for \p Range.Start, the
2360   /// \return value is <true, nullptr>, as it is handled by another recipe.
2361   /// \p Range.End may be decreased to ensure same decision from \p Range.Start
2362   /// to \p Range.End.
2363   VPInterleaveRecipe *tryToInterleaveMemory(Instruction *I, VFRange &Range);
2364 
2365   // Check if \I is a memory instruction to be widened for \p Range.Start and
2366   // potentially masked. Such instructions are handled by a recipe that takes an
2367   // additional VPInstruction for the mask.
2368   VPWidenMemoryInstructionRecipe *tryToWidenMemory(Instruction *I,
2369                                                    VFRange &Range,
2370                                                    VPlanPtr &Plan);
2371 
2372   /// Check if an induction recipe should be constructed for \I within the given
2373   /// VF \p Range. If so build and return it. If not, return null. \p Range.End
2374   /// may be decreased to ensure same decision from \p Range.Start to
2375   /// \p Range.End.
2376   VPWidenIntOrFpInductionRecipe *tryToOptimizeInduction(Instruction *I,
2377                                                         VFRange &Range);
2378 
2379   /// Handle non-loop phi nodes. Currently all such phi nodes are turned into
2380   /// a sequence of select instructions as the vectorizer currently performs
2381   /// full if-conversion.
2382   VPBlendRecipe *tryToBlend(Instruction *I, VPlanPtr &Plan);
2383 
2384   /// Check if \p I can be widened within the given VF \p Range. If \p I can be
2385   /// widened for \p Range.Start, check if the last recipe of \p VPBB can be
2386   /// extended to include \p I or else build a new VPWidenRecipe for it and
2387   /// append it to \p VPBB. Return true if \p I can be widened for Range.Start,
2388   /// false otherwise. Range.End may be decreased to ensure same decision from
2389   /// \p Range.Start to \p Range.End.
2390   bool tryToWiden(Instruction *I, VPBasicBlock *VPBB, VFRange &Range);
2391 
2392   /// Build a VPReplicationRecipe for \p I and enclose it within a Region if it
2393   /// is predicated. \return \p VPBB augmented with this new recipe if \p I is
2394   /// not predicated, otherwise \return a new VPBasicBlock that succeeds the new
2395   /// Region. Update the packing decision of predicated instructions if they
2396   /// feed \p I. Range.End may be decreased to ensure same recipe behavior from
2397   /// \p Range.Start to \p Range.End.
2398   VPBasicBlock *handleReplication(
2399       Instruction *I, VFRange &Range, VPBasicBlock *VPBB,
2400       DenseMap<Instruction *, VPReplicateRecipe *> &PredInst2Recipe,
2401       VPlanPtr &Plan);
2402 
2403   /// Create a replicating region for instruction \p I that requires
2404   /// predication. \p PredRecipe is a VPReplicateRecipe holding \p I.
2405   VPRegionBlock *createReplicateRegion(Instruction *I, VPRecipeBase *PredRecipe,
2406                                        VPlanPtr &Plan);
2407 
2408   /// Build a VPlan according to the information gathered by Legal. \return a
2409   /// VPlan for vectorization factors \p Range.Start and up to \p Range.End
2410   /// exclusive, possibly decreasing \p Range.End.
2411   VPlanPtr buildVPlan(VFRange &Range,
2412                                     const SmallPtrSetImpl<Value *> &NeedDef);
2413 };
2414 
2415 } // end namespace llvm
2416 
2417 namespace {
2418 
2419 /// \brief This holds vectorization requirements that must be verified late in
2420 /// the process. The requirements are set by legalize and costmodel. Once
2421 /// vectorization has been determined to be possible and profitable the
2422 /// requirements can be verified by looking for metadata or compiler options.
2423 /// For example, some loops require FP commutativity which is only allowed if
2424 /// vectorization is explicitly specified or if the fast-math compiler option
2425 /// has been provided.
2426 /// Late evaluation of these requirements allows helpful diagnostics to be
2427 /// composed that tells the user what need to be done to vectorize the loop. For
2428 /// example, by specifying #pragma clang loop vectorize or -ffast-math. Late
2429 /// evaluation should be used only when diagnostics can generated that can be
2430 /// followed by a non-expert user.
2431 class LoopVectorizationRequirements {
2432 public:
2433   LoopVectorizationRequirements(OptimizationRemarkEmitter &ORE) : ORE(ORE) {}
2434 
2435   void addUnsafeAlgebraInst(Instruction *I) {
2436     // First unsafe algebra instruction.
2437     if (!UnsafeAlgebraInst)
2438       UnsafeAlgebraInst = I;
2439   }
2440 
2441   void addRuntimePointerChecks(unsigned Num) { NumRuntimePointerChecks = Num; }
2442 
2443   bool doesNotMeet(Function *F, Loop *L, const LoopVectorizeHints &Hints) {
2444     const char *PassName = Hints.vectorizeAnalysisPassName();
2445     bool Failed = false;
2446     if (UnsafeAlgebraInst && !Hints.allowReordering()) {
2447       ORE.emit([&]() {
2448         return OptimizationRemarkAnalysisFPCommute(
2449                    PassName, "CantReorderFPOps",
2450                    UnsafeAlgebraInst->getDebugLoc(),
2451                    UnsafeAlgebraInst->getParent())
2452                << "loop not vectorized: cannot prove it is safe to reorder "
2453                   "floating-point operations";
2454       });
2455       Failed = true;
2456     }
2457 
2458     // Test if runtime memcheck thresholds are exceeded.
2459     bool PragmaThresholdReached =
2460         NumRuntimePointerChecks > PragmaVectorizeMemoryCheckThreshold;
2461     bool ThresholdReached =
2462         NumRuntimePointerChecks > VectorizerParams::RuntimeMemoryCheckThreshold;
2463     if ((ThresholdReached && !Hints.allowReordering()) ||
2464         PragmaThresholdReached) {
2465       ORE.emit([&]() {
2466         return OptimizationRemarkAnalysisAliasing(PassName, "CantReorderMemOps",
2467                                                   L->getStartLoc(),
2468                                                   L->getHeader())
2469                << "loop not vectorized: cannot prove it is safe to reorder "
2470                   "memory operations";
2471       });
2472       DEBUG(dbgs() << "LV: Too many memory checks needed.\n");
2473       Failed = true;
2474     }
2475 
2476     return Failed;
2477   }
2478 
2479 private:
2480   unsigned NumRuntimePointerChecks = 0;
2481   Instruction *UnsafeAlgebraInst = nullptr;
2482 
2483   /// Interface to emit optimization remarks.
2484   OptimizationRemarkEmitter &ORE;
2485 };
2486 
2487 } // end anonymous namespace
2488 
2489 static void addAcyclicInnerLoop(Loop &L, SmallVectorImpl<Loop *> &V) {
2490   if (L.empty()) {
2491     if (!hasCyclesInLoopBody(L))
2492       V.push_back(&L);
2493     return;
2494   }
2495   for (Loop *InnerL : L)
2496     addAcyclicInnerLoop(*InnerL, V);
2497 }
2498 
2499 namespace {
2500 
2501 /// The LoopVectorize Pass.
2502 struct LoopVectorize : public FunctionPass {
2503   /// Pass identification, replacement for typeid
2504   static char ID;
2505 
2506   LoopVectorizePass Impl;
2507 
2508   explicit LoopVectorize(bool NoUnrolling = false, bool AlwaysVectorize = true)
2509       : FunctionPass(ID) {
2510     Impl.DisableUnrolling = NoUnrolling;
2511     Impl.AlwaysVectorize = AlwaysVectorize;
2512     initializeLoopVectorizePass(*PassRegistry::getPassRegistry());
2513   }
2514 
2515   bool runOnFunction(Function &F) override {
2516     if (skipFunction(F))
2517       return false;
2518 
2519     auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
2520     auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
2521     auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
2522     auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
2523     auto *BFI = &getAnalysis<BlockFrequencyInfoWrapperPass>().getBFI();
2524     auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
2525     auto *TLI = TLIP ? &TLIP->getTLI() : nullptr;
2526     auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
2527     auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
2528     auto *LAA = &getAnalysis<LoopAccessLegacyAnalysis>();
2529     auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits();
2530     auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE();
2531 
2532     std::function<const LoopAccessInfo &(Loop &)> GetLAA =
2533         [&](Loop &L) -> const LoopAccessInfo & { return LAA->getInfo(&L); };
2534 
2535     return Impl.runImpl(F, *SE, *LI, *TTI, *DT, *BFI, TLI, *DB, *AA, *AC,
2536                         GetLAA, *ORE);
2537   }
2538 
2539   void getAnalysisUsage(AnalysisUsage &AU) const override {
2540     AU.addRequired<AssumptionCacheTracker>();
2541     AU.addRequired<BlockFrequencyInfoWrapperPass>();
2542     AU.addRequired<DominatorTreeWrapperPass>();
2543     AU.addRequired<LoopInfoWrapperPass>();
2544     AU.addRequired<ScalarEvolutionWrapperPass>();
2545     AU.addRequired<TargetTransformInfoWrapperPass>();
2546     AU.addRequired<AAResultsWrapperPass>();
2547     AU.addRequired<LoopAccessLegacyAnalysis>();
2548     AU.addRequired<DemandedBitsWrapperPass>();
2549     AU.addRequired<OptimizationRemarkEmitterWrapperPass>();
2550     AU.addPreserved<LoopInfoWrapperPass>();
2551     AU.addPreserved<DominatorTreeWrapperPass>();
2552     AU.addPreserved<BasicAAWrapperPass>();
2553     AU.addPreserved<GlobalsAAWrapperPass>();
2554   }
2555 };
2556 
2557 } // end anonymous namespace
2558 
2559 //===----------------------------------------------------------------------===//
2560 // Implementation of LoopVectorizationLegality, InnerLoopVectorizer and
2561 // LoopVectorizationCostModel and LoopVectorizationPlanner.
2562 //===----------------------------------------------------------------------===//
2563 
2564 Value *InnerLoopVectorizer::getBroadcastInstrs(Value *V) {
2565   // We need to place the broadcast of invariant variables outside the loop.
2566   Instruction *Instr = dyn_cast<Instruction>(V);
2567   bool NewInstr = (Instr && Instr->getParent() == LoopVectorBody);
2568   bool Invariant = OrigLoop->isLoopInvariant(V) && !NewInstr;
2569 
2570   // Place the code for broadcasting invariant variables in the new preheader.
2571   IRBuilder<>::InsertPointGuard Guard(Builder);
2572   if (Invariant)
2573     Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
2574 
2575   // Broadcast the scalar into all locations in the vector.
2576   Value *Shuf = Builder.CreateVectorSplat(VF, V, "broadcast");
2577 
2578   return Shuf;
2579 }
2580 
2581 void InnerLoopVectorizer::createVectorIntOrFpInductionPHI(
2582     const InductionDescriptor &II, Value *Step, Instruction *EntryVal) {
2583   Value *Start = II.getStartValue();
2584 
2585   // Construct the initial value of the vector IV in the vector loop preheader
2586   auto CurrIP = Builder.saveIP();
2587   Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
2588   if (isa<TruncInst>(EntryVal)) {
2589     assert(Start->getType()->isIntegerTy() &&
2590            "Truncation requires an integer type");
2591     auto *TruncType = cast<IntegerType>(EntryVal->getType());
2592     Step = Builder.CreateTrunc(Step, TruncType);
2593     Start = Builder.CreateCast(Instruction::Trunc, Start, TruncType);
2594   }
2595   Value *SplatStart = Builder.CreateVectorSplat(VF, Start);
2596   Value *SteppedStart =
2597       getStepVector(SplatStart, 0, Step, II.getInductionOpcode());
2598 
2599   // We create vector phi nodes for both integer and floating-point induction
2600   // variables. Here, we determine the kind of arithmetic we will perform.
2601   Instruction::BinaryOps AddOp;
2602   Instruction::BinaryOps MulOp;
2603   if (Step->getType()->isIntegerTy()) {
2604     AddOp = Instruction::Add;
2605     MulOp = Instruction::Mul;
2606   } else {
2607     AddOp = II.getInductionOpcode();
2608     MulOp = Instruction::FMul;
2609   }
2610 
2611   // Multiply the vectorization factor by the step using integer or
2612   // floating-point arithmetic as appropriate.
2613   Value *ConstVF = getSignedIntOrFpConstant(Step->getType(), VF);
2614   Value *Mul = addFastMathFlag(Builder.CreateBinOp(MulOp, Step, ConstVF));
2615 
2616   // Create a vector splat to use in the induction update.
2617   //
2618   // FIXME: If the step is non-constant, we create the vector splat with
2619   //        IRBuilder. IRBuilder can constant-fold the multiply, but it doesn't
2620   //        handle a constant vector splat.
2621   Value *SplatVF = isa<Constant>(Mul)
2622                        ? ConstantVector::getSplat(VF, cast<Constant>(Mul))
2623                        : Builder.CreateVectorSplat(VF, Mul);
2624   Builder.restoreIP(CurrIP);
2625 
2626   // We may need to add the step a number of times, depending on the unroll
2627   // factor. The last of those goes into the PHI.
2628   PHINode *VecInd = PHINode::Create(SteppedStart->getType(), 2, "vec.ind",
2629                                     &*LoopVectorBody->getFirstInsertionPt());
2630   Instruction *LastInduction = VecInd;
2631   for (unsigned Part = 0; Part < UF; ++Part) {
2632     VectorLoopValueMap.setVectorValue(EntryVal, Part, LastInduction);
2633 
2634     if (isa<TruncInst>(EntryVal))
2635       addMetadata(LastInduction, EntryVal);
2636     else
2637       recordVectorLoopValueForInductionCast(II, LastInduction, Part);
2638 
2639     LastInduction = cast<Instruction>(addFastMathFlag(
2640         Builder.CreateBinOp(AddOp, LastInduction, SplatVF, "step.add")));
2641   }
2642 
2643   // Move the last step to the end of the latch block. This ensures consistent
2644   // placement of all induction updates.
2645   auto *LoopVectorLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch();
2646   auto *Br = cast<BranchInst>(LoopVectorLatch->getTerminator());
2647   auto *ICmp = cast<Instruction>(Br->getCondition());
2648   LastInduction->moveBefore(ICmp);
2649   LastInduction->setName("vec.ind.next");
2650 
2651   VecInd->addIncoming(SteppedStart, LoopVectorPreHeader);
2652   VecInd->addIncoming(LastInduction, LoopVectorLatch);
2653 }
2654 
2655 bool InnerLoopVectorizer::shouldScalarizeInstruction(Instruction *I) const {
2656   return Cost->isScalarAfterVectorization(I, VF) ||
2657          Cost->isProfitableToScalarize(I, VF);
2658 }
2659 
2660 bool InnerLoopVectorizer::needsScalarInduction(Instruction *IV) const {
2661   if (shouldScalarizeInstruction(IV))
2662     return true;
2663   auto isScalarInst = [&](User *U) -> bool {
2664     auto *I = cast<Instruction>(U);
2665     return (OrigLoop->contains(I) && shouldScalarizeInstruction(I));
2666   };
2667   return llvm::any_of(IV->users(), isScalarInst);
2668 }
2669 
2670 void InnerLoopVectorizer::recordVectorLoopValueForInductionCast(
2671     const InductionDescriptor &ID, Value *VectorLoopVal, unsigned Part,
2672     unsigned Lane) {
2673   const SmallVectorImpl<Instruction *> &Casts = ID.getCastInsts();
2674   if (Casts.empty())
2675     return;
2676   // Only the first Cast instruction in the Casts vector is of interest.
2677   // The rest of the Casts (if exist) have no uses outside the
2678   // induction update chain itself.
2679   Instruction *CastInst = *Casts.begin();
2680   if (Lane < UINT_MAX)
2681     VectorLoopValueMap.setScalarValue(CastInst, {Part, Lane}, VectorLoopVal);
2682   else
2683     VectorLoopValueMap.setVectorValue(CastInst, Part, VectorLoopVal);
2684 }
2685 
2686 void InnerLoopVectorizer::widenIntOrFpInduction(PHINode *IV, TruncInst *Trunc) {
2687   assert((IV->getType()->isIntegerTy() || IV != OldInduction) &&
2688          "Primary induction variable must have an integer type");
2689 
2690   auto II = Legal->getInductionVars()->find(IV);
2691   assert(II != Legal->getInductionVars()->end() && "IV is not an induction");
2692 
2693   auto ID = II->second;
2694   assert(IV->getType() == ID.getStartValue()->getType() && "Types must match");
2695 
2696   // The scalar value to broadcast. This will be derived from the canonical
2697   // induction variable.
2698   Value *ScalarIV = nullptr;
2699 
2700   // The value from the original loop to which we are mapping the new induction
2701   // variable.
2702   Instruction *EntryVal = Trunc ? cast<Instruction>(Trunc) : IV;
2703 
2704   // True if we have vectorized the induction variable.
2705   auto VectorizedIV = false;
2706 
2707   // Determine if we want a scalar version of the induction variable. This is
2708   // true if the induction variable itself is not widened, or if it has at
2709   // least one user in the loop that is not widened.
2710   auto NeedsScalarIV = VF > 1 && needsScalarInduction(EntryVal);
2711 
2712   // Generate code for the induction step. Note that induction steps are
2713   // required to be loop-invariant
2714   assert(PSE.getSE()->isLoopInvariant(ID.getStep(), OrigLoop) &&
2715          "Induction step should be loop invariant");
2716   auto &DL = OrigLoop->getHeader()->getModule()->getDataLayout();
2717   Value *Step = nullptr;
2718   if (PSE.getSE()->isSCEVable(IV->getType())) {
2719     SCEVExpander Exp(*PSE.getSE(), DL, "induction");
2720     Step = Exp.expandCodeFor(ID.getStep(), ID.getStep()->getType(),
2721                              LoopVectorPreHeader->getTerminator());
2722   } else {
2723     Step = cast<SCEVUnknown>(ID.getStep())->getValue();
2724   }
2725 
2726   // Try to create a new independent vector induction variable. If we can't
2727   // create the phi node, we will splat the scalar induction variable in each
2728   // loop iteration.
2729   if (VF > 1 && !shouldScalarizeInstruction(EntryVal)) {
2730     createVectorIntOrFpInductionPHI(ID, Step, EntryVal);
2731     VectorizedIV = true;
2732   }
2733 
2734   // If we haven't yet vectorized the induction variable, or if we will create
2735   // a scalar one, we need to define the scalar induction variable and step
2736   // values. If we were given a truncation type, truncate the canonical
2737   // induction variable and step. Otherwise, derive these values from the
2738   // induction descriptor.
2739   if (!VectorizedIV || NeedsScalarIV) {
2740     ScalarIV = Induction;
2741     if (IV != OldInduction) {
2742       ScalarIV = IV->getType()->isIntegerTy()
2743                      ? Builder.CreateSExtOrTrunc(Induction, IV->getType())
2744                      : Builder.CreateCast(Instruction::SIToFP, Induction,
2745                                           IV->getType());
2746       ScalarIV = ID.transform(Builder, ScalarIV, PSE.getSE(), DL);
2747       ScalarIV->setName("offset.idx");
2748     }
2749     if (Trunc) {
2750       auto *TruncType = cast<IntegerType>(Trunc->getType());
2751       assert(Step->getType()->isIntegerTy() &&
2752              "Truncation requires an integer step");
2753       ScalarIV = Builder.CreateTrunc(ScalarIV, TruncType);
2754       Step = Builder.CreateTrunc(Step, TruncType);
2755     }
2756   }
2757 
2758   // If we haven't yet vectorized the induction variable, splat the scalar
2759   // induction variable, and build the necessary step vectors.
2760   // TODO: Don't do it unless the vectorized IV is really required.
2761   if (!VectorizedIV) {
2762     Value *Broadcasted = getBroadcastInstrs(ScalarIV);
2763     for (unsigned Part = 0; Part < UF; ++Part) {
2764       Value *EntryPart =
2765           getStepVector(Broadcasted, VF * Part, Step, ID.getInductionOpcode());
2766       VectorLoopValueMap.setVectorValue(EntryVal, Part, EntryPart);
2767       if (Trunc)
2768         addMetadata(EntryPart, Trunc);
2769       else
2770         recordVectorLoopValueForInductionCast(ID, EntryPart, Part);
2771     }
2772   }
2773 
2774   // If an induction variable is only used for counting loop iterations or
2775   // calculating addresses, it doesn't need to be widened. Create scalar steps
2776   // that can be used by instructions we will later scalarize. Note that the
2777   // addition of the scalar steps will not increase the number of instructions
2778   // in the loop in the common case prior to InstCombine. We will be trading
2779   // one vector extract for each scalar step.
2780   if (NeedsScalarIV)
2781     buildScalarSteps(ScalarIV, Step, EntryVal, ID);
2782 }
2783 
2784 Value *InnerLoopVectorizer::getStepVector(Value *Val, int StartIdx, Value *Step,
2785                                           Instruction::BinaryOps BinOp) {
2786   // Create and check the types.
2787   assert(Val->getType()->isVectorTy() && "Must be a vector");
2788   int VLen = Val->getType()->getVectorNumElements();
2789 
2790   Type *STy = Val->getType()->getScalarType();
2791   assert((STy->isIntegerTy() || STy->isFloatingPointTy()) &&
2792          "Induction Step must be an integer or FP");
2793   assert(Step->getType() == STy && "Step has wrong type");
2794 
2795   SmallVector<Constant *, 8> Indices;
2796 
2797   if (STy->isIntegerTy()) {
2798     // Create a vector of consecutive numbers from zero to VF.
2799     for (int i = 0; i < VLen; ++i)
2800       Indices.push_back(ConstantInt::get(STy, StartIdx + i));
2801 
2802     // Add the consecutive indices to the vector value.
2803     Constant *Cv = ConstantVector::get(Indices);
2804     assert(Cv->getType() == Val->getType() && "Invalid consecutive vec");
2805     Step = Builder.CreateVectorSplat(VLen, Step);
2806     assert(Step->getType() == Val->getType() && "Invalid step vec");
2807     // FIXME: The newly created binary instructions should contain nsw/nuw flags,
2808     // which can be found from the original scalar operations.
2809     Step = Builder.CreateMul(Cv, Step);
2810     return Builder.CreateAdd(Val, Step, "induction");
2811   }
2812 
2813   // Floating point induction.
2814   assert((BinOp == Instruction::FAdd || BinOp == Instruction::FSub) &&
2815          "Binary Opcode should be specified for FP induction");
2816   // Create a vector of consecutive numbers from zero to VF.
2817   for (int i = 0; i < VLen; ++i)
2818     Indices.push_back(ConstantFP::get(STy, (double)(StartIdx + i)));
2819 
2820   // Add the consecutive indices to the vector value.
2821   Constant *Cv = ConstantVector::get(Indices);
2822 
2823   Step = Builder.CreateVectorSplat(VLen, Step);
2824 
2825   // Floating point operations had to be 'fast' to enable the induction.
2826   FastMathFlags Flags;
2827   Flags.setFast();
2828 
2829   Value *MulOp = Builder.CreateFMul(Cv, Step);
2830   if (isa<Instruction>(MulOp))
2831     // Have to check, MulOp may be a constant
2832     cast<Instruction>(MulOp)->setFastMathFlags(Flags);
2833 
2834   Value *BOp = Builder.CreateBinOp(BinOp, Val, MulOp, "induction");
2835   if (isa<Instruction>(BOp))
2836     cast<Instruction>(BOp)->setFastMathFlags(Flags);
2837   return BOp;
2838 }
2839 
2840 void InnerLoopVectorizer::buildScalarSteps(Value *ScalarIV, Value *Step,
2841                                            Value *EntryVal,
2842                                            const InductionDescriptor &ID) {
2843   // We shouldn't have to build scalar steps if we aren't vectorizing.
2844   assert(VF > 1 && "VF should be greater than one");
2845 
2846   // Get the value type and ensure it and the step have the same integer type.
2847   Type *ScalarIVTy = ScalarIV->getType()->getScalarType();
2848   assert(ScalarIVTy == Step->getType() &&
2849          "Val and Step should have the same type");
2850 
2851   // We build scalar steps for both integer and floating-point induction
2852   // variables. Here, we determine the kind of arithmetic we will perform.
2853   Instruction::BinaryOps AddOp;
2854   Instruction::BinaryOps MulOp;
2855   if (ScalarIVTy->isIntegerTy()) {
2856     AddOp = Instruction::Add;
2857     MulOp = Instruction::Mul;
2858   } else {
2859     AddOp = ID.getInductionOpcode();
2860     MulOp = Instruction::FMul;
2861   }
2862 
2863   // Determine the number of scalars we need to generate for each unroll
2864   // iteration. If EntryVal is uniform, we only need to generate the first
2865   // lane. Otherwise, we generate all VF values.
2866   unsigned Lanes =
2867       Cost->isUniformAfterVectorization(cast<Instruction>(EntryVal), VF) ? 1
2868                                                                          : VF;
2869   // Compute the scalar steps and save the results in VectorLoopValueMap.
2870   for (unsigned Part = 0; Part < UF; ++Part) {
2871     for (unsigned Lane = 0; Lane < Lanes; ++Lane) {
2872       auto *StartIdx = getSignedIntOrFpConstant(ScalarIVTy, VF * Part + Lane);
2873       auto *Mul = addFastMathFlag(Builder.CreateBinOp(MulOp, StartIdx, Step));
2874       auto *Add = addFastMathFlag(Builder.CreateBinOp(AddOp, ScalarIV, Mul));
2875       VectorLoopValueMap.setScalarValue(EntryVal, {Part, Lane}, Add);
2876       recordVectorLoopValueForInductionCast(ID, Add, Part, Lane);
2877     }
2878   }
2879 }
2880 
2881 int LoopVectorizationLegality::isConsecutivePtr(Value *Ptr) {
2882   const ValueToValueMap &Strides = getSymbolicStrides() ? *getSymbolicStrides() :
2883     ValueToValueMap();
2884 
2885   int Stride = getPtrStride(PSE, Ptr, TheLoop, Strides, true, false);
2886   if (Stride == 1 || Stride == -1)
2887     return Stride;
2888   return 0;
2889 }
2890 
2891 bool LoopVectorizationLegality::isUniform(Value *V) {
2892   return LAI->isUniform(V);
2893 }
2894 
2895 Value *InnerLoopVectorizer::getOrCreateVectorValue(Value *V, unsigned Part) {
2896   assert(V != Induction && "The new induction variable should not be used.");
2897   assert(!V->getType()->isVectorTy() && "Can't widen a vector");
2898   assert(!V->getType()->isVoidTy() && "Type does not produce a value");
2899 
2900   // If we have a stride that is replaced by one, do it here.
2901   if (Legal->hasStride(V))
2902     V = ConstantInt::get(V->getType(), 1);
2903 
2904   // If we have a vector mapped to this value, return it.
2905   if (VectorLoopValueMap.hasVectorValue(V, Part))
2906     return VectorLoopValueMap.getVectorValue(V, Part);
2907 
2908   // If the value has not been vectorized, check if it has been scalarized
2909   // instead. If it has been scalarized, and we actually need the value in
2910   // vector form, we will construct the vector values on demand.
2911   if (VectorLoopValueMap.hasAnyScalarValue(V)) {
2912     Value *ScalarValue = VectorLoopValueMap.getScalarValue(V, {Part, 0});
2913 
2914     // If we've scalarized a value, that value should be an instruction.
2915     auto *I = cast<Instruction>(V);
2916 
2917     // If we aren't vectorizing, we can just copy the scalar map values over to
2918     // the vector map.
2919     if (VF == 1) {
2920       VectorLoopValueMap.setVectorValue(V, Part, ScalarValue);
2921       return ScalarValue;
2922     }
2923 
2924     // Get the last scalar instruction we generated for V and Part. If the value
2925     // is known to be uniform after vectorization, this corresponds to lane zero
2926     // of the Part unroll iteration. Otherwise, the last instruction is the one
2927     // we created for the last vector lane of the Part unroll iteration.
2928     unsigned LastLane = Cost->isUniformAfterVectorization(I, VF) ? 0 : VF - 1;
2929     auto *LastInst = cast<Instruction>(
2930         VectorLoopValueMap.getScalarValue(V, {Part, LastLane}));
2931 
2932     // Set the insert point after the last scalarized instruction. This ensures
2933     // the insertelement sequence will directly follow the scalar definitions.
2934     auto OldIP = Builder.saveIP();
2935     auto NewIP = std::next(BasicBlock::iterator(LastInst));
2936     Builder.SetInsertPoint(&*NewIP);
2937 
2938     // However, if we are vectorizing, we need to construct the vector values.
2939     // If the value is known to be uniform after vectorization, we can just
2940     // broadcast the scalar value corresponding to lane zero for each unroll
2941     // iteration. Otherwise, we construct the vector values using insertelement
2942     // instructions. Since the resulting vectors are stored in
2943     // VectorLoopValueMap, we will only generate the insertelements once.
2944     Value *VectorValue = nullptr;
2945     if (Cost->isUniformAfterVectorization(I, VF)) {
2946       VectorValue = getBroadcastInstrs(ScalarValue);
2947       VectorLoopValueMap.setVectorValue(V, Part, VectorValue);
2948     } else {
2949       // Initialize packing with insertelements to start from undef.
2950       Value *Undef = UndefValue::get(VectorType::get(V->getType(), VF));
2951       VectorLoopValueMap.setVectorValue(V, Part, Undef);
2952       for (unsigned Lane = 0; Lane < VF; ++Lane)
2953         packScalarIntoVectorValue(V, {Part, Lane});
2954       VectorValue = VectorLoopValueMap.getVectorValue(V, Part);
2955     }
2956     Builder.restoreIP(OldIP);
2957     return VectorValue;
2958   }
2959 
2960   // If this scalar is unknown, assume that it is a constant or that it is
2961   // loop invariant. Broadcast V and save the value for future uses.
2962   Value *B = getBroadcastInstrs(V);
2963   VectorLoopValueMap.setVectorValue(V, Part, B);
2964   return B;
2965 }
2966 
2967 Value *
2968 InnerLoopVectorizer::getOrCreateScalarValue(Value *V,
2969                                             const VPIteration &Instance) {
2970   // If the value is not an instruction contained in the loop, it should
2971   // already be scalar.
2972   if (OrigLoop->isLoopInvariant(V))
2973     return V;
2974 
2975   assert(Instance.Lane > 0
2976              ? !Cost->isUniformAfterVectorization(cast<Instruction>(V), VF)
2977              : true && "Uniform values only have lane zero");
2978 
2979   // If the value from the original loop has not been vectorized, it is
2980   // represented by UF x VF scalar values in the new loop. Return the requested
2981   // scalar value.
2982   if (VectorLoopValueMap.hasScalarValue(V, Instance))
2983     return VectorLoopValueMap.getScalarValue(V, Instance);
2984 
2985   // If the value has not been scalarized, get its entry in VectorLoopValueMap
2986   // for the given unroll part. If this entry is not a vector type (i.e., the
2987   // vectorization factor is one), there is no need to generate an
2988   // extractelement instruction.
2989   auto *U = getOrCreateVectorValue(V, Instance.Part);
2990   if (!U->getType()->isVectorTy()) {
2991     assert(VF == 1 && "Value not scalarized has non-vector type");
2992     return U;
2993   }
2994 
2995   // Otherwise, the value from the original loop has been vectorized and is
2996   // represented by UF vector values. Extract and return the requested scalar
2997   // value from the appropriate vector lane.
2998   return Builder.CreateExtractElement(U, Builder.getInt32(Instance.Lane));
2999 }
3000 
3001 void InnerLoopVectorizer::packScalarIntoVectorValue(
3002     Value *V, const VPIteration &Instance) {
3003   assert(V != Induction && "The new induction variable should not be used.");
3004   assert(!V->getType()->isVectorTy() && "Can't pack a vector");
3005   assert(!V->getType()->isVoidTy() && "Type does not produce a value");
3006 
3007   Value *ScalarInst = VectorLoopValueMap.getScalarValue(V, Instance);
3008   Value *VectorValue = VectorLoopValueMap.getVectorValue(V, Instance.Part);
3009   VectorValue = Builder.CreateInsertElement(VectorValue, ScalarInst,
3010                                             Builder.getInt32(Instance.Lane));
3011   VectorLoopValueMap.resetVectorValue(V, Instance.Part, VectorValue);
3012 }
3013 
3014 Value *InnerLoopVectorizer::reverseVector(Value *Vec) {
3015   assert(Vec->getType()->isVectorTy() && "Invalid type");
3016   SmallVector<Constant *, 8> ShuffleMask;
3017   for (unsigned i = 0; i < VF; ++i)
3018     ShuffleMask.push_back(Builder.getInt32(VF - i - 1));
3019 
3020   return Builder.CreateShuffleVector(Vec, UndefValue::get(Vec->getType()),
3021                                      ConstantVector::get(ShuffleMask),
3022                                      "reverse");
3023 }
3024 
3025 // Try to vectorize the interleave group that \p Instr belongs to.
3026 //
3027 // E.g. Translate following interleaved load group (factor = 3):
3028 //   for (i = 0; i < N; i+=3) {
3029 //     R = Pic[i];             // Member of index 0
3030 //     G = Pic[i+1];           // Member of index 1
3031 //     B = Pic[i+2];           // Member of index 2
3032 //     ... // do something to R, G, B
3033 //   }
3034 // To:
3035 //   %wide.vec = load <12 x i32>                       ; Read 4 tuples of R,G,B
3036 //   %R.vec = shuffle %wide.vec, undef, <0, 3, 6, 9>   ; R elements
3037 //   %G.vec = shuffle %wide.vec, undef, <1, 4, 7, 10>  ; G elements
3038 //   %B.vec = shuffle %wide.vec, undef, <2, 5, 8, 11>  ; B elements
3039 //
3040 // Or translate following interleaved store group (factor = 3):
3041 //   for (i = 0; i < N; i+=3) {
3042 //     ... do something to R, G, B
3043 //     Pic[i]   = R;           // Member of index 0
3044 //     Pic[i+1] = G;           // Member of index 1
3045 //     Pic[i+2] = B;           // Member of index 2
3046 //   }
3047 // To:
3048 //   %R_G.vec = shuffle %R.vec, %G.vec, <0, 1, 2, ..., 7>
3049 //   %B_U.vec = shuffle %B.vec, undef, <0, 1, 2, 3, u, u, u, u>
3050 //   %interleaved.vec = shuffle %R_G.vec, %B_U.vec,
3051 //        <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11>    ; Interleave R,G,B elements
3052 //   store <12 x i32> %interleaved.vec              ; Write 4 tuples of R,G,B
3053 void InnerLoopVectorizer::vectorizeInterleaveGroup(Instruction *Instr) {
3054   const InterleaveGroup *Group = Legal->getInterleavedAccessGroup(Instr);
3055   assert(Group && "Fail to get an interleaved access group.");
3056 
3057   // Skip if current instruction is not the insert position.
3058   if (Instr != Group->getInsertPos())
3059     return;
3060 
3061   const DataLayout &DL = Instr->getModule()->getDataLayout();
3062   Value *Ptr = getPointerOperand(Instr);
3063 
3064   // Prepare for the vector type of the interleaved load/store.
3065   Type *ScalarTy = getMemInstValueType(Instr);
3066   unsigned InterleaveFactor = Group->getFactor();
3067   Type *VecTy = VectorType::get(ScalarTy, InterleaveFactor * VF);
3068   Type *PtrTy = VecTy->getPointerTo(getMemInstAddressSpace(Instr));
3069 
3070   // Prepare for the new pointers.
3071   setDebugLocFromInst(Builder, Ptr);
3072   SmallVector<Value *, 2> NewPtrs;
3073   unsigned Index = Group->getIndex(Instr);
3074 
3075   // If the group is reverse, adjust the index to refer to the last vector lane
3076   // instead of the first. We adjust the index from the first vector lane,
3077   // rather than directly getting the pointer for lane VF - 1, because the
3078   // pointer operand of the interleaved access is supposed to be uniform. For
3079   // uniform instructions, we're only required to generate a value for the
3080   // first vector lane in each unroll iteration.
3081   if (Group->isReverse())
3082     Index += (VF - 1) * Group->getFactor();
3083 
3084   for (unsigned Part = 0; Part < UF; Part++) {
3085     Value *NewPtr = getOrCreateScalarValue(Ptr, {Part, 0});
3086 
3087     // Notice current instruction could be any index. Need to adjust the address
3088     // to the member of index 0.
3089     //
3090     // E.g.  a = A[i+1];     // Member of index 1 (Current instruction)
3091     //       b = A[i];       // Member of index 0
3092     // Current pointer is pointed to A[i+1], adjust it to A[i].
3093     //
3094     // E.g.  A[i+1] = a;     // Member of index 1
3095     //       A[i]   = b;     // Member of index 0
3096     //       A[i+2] = c;     // Member of index 2 (Current instruction)
3097     // Current pointer is pointed to A[i+2], adjust it to A[i].
3098     NewPtr = Builder.CreateGEP(NewPtr, Builder.getInt32(-Index));
3099 
3100     // Cast to the vector pointer type.
3101     NewPtrs.push_back(Builder.CreateBitCast(NewPtr, PtrTy));
3102   }
3103 
3104   setDebugLocFromInst(Builder, Instr);
3105   Value *UndefVec = UndefValue::get(VecTy);
3106 
3107   // Vectorize the interleaved load group.
3108   if (isa<LoadInst>(Instr)) {
3109     // For each unroll part, create a wide load for the group.
3110     SmallVector<Value *, 2> NewLoads;
3111     for (unsigned Part = 0; Part < UF; Part++) {
3112       auto *NewLoad = Builder.CreateAlignedLoad(
3113           NewPtrs[Part], Group->getAlignment(), "wide.vec");
3114       Group->addMetadata(NewLoad);
3115       NewLoads.push_back(NewLoad);
3116     }
3117 
3118     // For each member in the group, shuffle out the appropriate data from the
3119     // wide loads.
3120     for (unsigned I = 0; I < InterleaveFactor; ++I) {
3121       Instruction *Member = Group->getMember(I);
3122 
3123       // Skip the gaps in the group.
3124       if (!Member)
3125         continue;
3126 
3127       Constant *StrideMask = createStrideMask(Builder, I, InterleaveFactor, VF);
3128       for (unsigned Part = 0; Part < UF; Part++) {
3129         Value *StridedVec = Builder.CreateShuffleVector(
3130             NewLoads[Part], UndefVec, StrideMask, "strided.vec");
3131 
3132         // If this member has different type, cast the result type.
3133         if (Member->getType() != ScalarTy) {
3134           VectorType *OtherVTy = VectorType::get(Member->getType(), VF);
3135           StridedVec = createBitOrPointerCast(StridedVec, OtherVTy, DL);
3136         }
3137 
3138         if (Group->isReverse())
3139           StridedVec = reverseVector(StridedVec);
3140 
3141         VectorLoopValueMap.setVectorValue(Member, Part, StridedVec);
3142       }
3143     }
3144     return;
3145   }
3146 
3147   // The sub vector type for current instruction.
3148   VectorType *SubVT = VectorType::get(ScalarTy, VF);
3149 
3150   // Vectorize the interleaved store group.
3151   for (unsigned Part = 0; Part < UF; Part++) {
3152     // Collect the stored vector from each member.
3153     SmallVector<Value *, 4> StoredVecs;
3154     for (unsigned i = 0; i < InterleaveFactor; i++) {
3155       // Interleaved store group doesn't allow a gap, so each index has a member
3156       Instruction *Member = Group->getMember(i);
3157       assert(Member && "Fail to get a member from an interleaved store group");
3158 
3159       Value *StoredVec = getOrCreateVectorValue(
3160           cast<StoreInst>(Member)->getValueOperand(), Part);
3161       if (Group->isReverse())
3162         StoredVec = reverseVector(StoredVec);
3163 
3164       // If this member has different type, cast it to a unified type.
3165 
3166       if (StoredVec->getType() != SubVT)
3167         StoredVec = createBitOrPointerCast(StoredVec, SubVT, DL);
3168 
3169       StoredVecs.push_back(StoredVec);
3170     }
3171 
3172     // Concatenate all vectors into a wide vector.
3173     Value *WideVec = concatenateVectors(Builder, StoredVecs);
3174 
3175     // Interleave the elements in the wide vector.
3176     Constant *IMask = createInterleaveMask(Builder, VF, InterleaveFactor);
3177     Value *IVec = Builder.CreateShuffleVector(WideVec, UndefVec, IMask,
3178                                               "interleaved.vec");
3179 
3180     Instruction *NewStoreInstr =
3181         Builder.CreateAlignedStore(IVec, NewPtrs[Part], Group->getAlignment());
3182 
3183     Group->addMetadata(NewStoreInstr);
3184   }
3185 }
3186 
3187 void InnerLoopVectorizer::vectorizeMemoryInstruction(Instruction *Instr,
3188                                                      VectorParts *BlockInMask) {
3189   // Attempt to issue a wide load.
3190   LoadInst *LI = dyn_cast<LoadInst>(Instr);
3191   StoreInst *SI = dyn_cast<StoreInst>(Instr);
3192 
3193   assert((LI || SI) && "Invalid Load/Store instruction");
3194 
3195   LoopVectorizationCostModel::InstWidening Decision =
3196       Cost->getWideningDecision(Instr, VF);
3197   assert(Decision != LoopVectorizationCostModel::CM_Unknown &&
3198          "CM decision should be taken at this point");
3199   if (Decision == LoopVectorizationCostModel::CM_Interleave)
3200     return vectorizeInterleaveGroup(Instr);
3201 
3202   Type *ScalarDataTy = getMemInstValueType(Instr);
3203   Type *DataTy = VectorType::get(ScalarDataTy, VF);
3204   Value *Ptr = getPointerOperand(Instr);
3205   unsigned Alignment = getMemInstAlignment(Instr);
3206   // An alignment of 0 means target abi alignment. We need to use the scalar's
3207   // target abi alignment in such a case.
3208   const DataLayout &DL = Instr->getModule()->getDataLayout();
3209   if (!Alignment)
3210     Alignment = DL.getABITypeAlignment(ScalarDataTy);
3211   unsigned AddressSpace = getMemInstAddressSpace(Instr);
3212 
3213   // Determine if the pointer operand of the access is either consecutive or
3214   // reverse consecutive.
3215   bool Reverse = (Decision == LoopVectorizationCostModel::CM_Widen_Reverse);
3216   bool ConsecutiveStride =
3217       Reverse || (Decision == LoopVectorizationCostModel::CM_Widen);
3218   bool CreateGatherScatter =
3219       (Decision == LoopVectorizationCostModel::CM_GatherScatter);
3220 
3221   // Either Ptr feeds a vector load/store, or a vector GEP should feed a vector
3222   // gather/scatter. Otherwise Decision should have been to Scalarize.
3223   assert((ConsecutiveStride || CreateGatherScatter) &&
3224          "The instruction should be scalarized");
3225 
3226   // Handle consecutive loads/stores.
3227   if (ConsecutiveStride)
3228     Ptr = getOrCreateScalarValue(Ptr, {0, 0});
3229 
3230   VectorParts Mask;
3231   bool isMaskRequired = BlockInMask;
3232   if (isMaskRequired)
3233     Mask = *BlockInMask;
3234 
3235   // Handle Stores:
3236   if (SI) {
3237     assert(!Legal->isUniform(SI->getPointerOperand()) &&
3238            "We do not allow storing to uniform addresses");
3239     setDebugLocFromInst(Builder, SI);
3240 
3241     for (unsigned Part = 0; Part < UF; ++Part) {
3242       Instruction *NewSI = nullptr;
3243       Value *StoredVal = getOrCreateVectorValue(SI->getValueOperand(), Part);
3244       if (CreateGatherScatter) {
3245         Value *MaskPart = isMaskRequired ? Mask[Part] : nullptr;
3246         Value *VectorGep = getOrCreateVectorValue(Ptr, Part);
3247         NewSI = Builder.CreateMaskedScatter(StoredVal, VectorGep, Alignment,
3248                                             MaskPart);
3249       } else {
3250         // Calculate the pointer for the specific unroll-part.
3251         Value *PartPtr =
3252             Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(Part * VF));
3253 
3254         if (Reverse) {
3255           // If we store to reverse consecutive memory locations, then we need
3256           // to reverse the order of elements in the stored value.
3257           StoredVal = reverseVector(StoredVal);
3258           // We don't want to update the value in the map as it might be used in
3259           // another expression. So don't call resetVectorValue(StoredVal).
3260 
3261           // If the address is consecutive but reversed, then the
3262           // wide store needs to start at the last vector element.
3263           PartPtr =
3264               Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(-Part * VF));
3265           PartPtr =
3266               Builder.CreateGEP(nullptr, PartPtr, Builder.getInt32(1 - VF));
3267           if (isMaskRequired) // Reverse of a null all-one mask is a null mask.
3268             Mask[Part] = reverseVector(Mask[Part]);
3269         }
3270 
3271         Value *VecPtr =
3272             Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace));
3273 
3274         if (isMaskRequired)
3275           NewSI = Builder.CreateMaskedStore(StoredVal, VecPtr, Alignment,
3276                                             Mask[Part]);
3277         else
3278           NewSI = Builder.CreateAlignedStore(StoredVal, VecPtr, Alignment);
3279       }
3280       addMetadata(NewSI, SI);
3281     }
3282     return;
3283   }
3284 
3285   // Handle loads.
3286   assert(LI && "Must have a load instruction");
3287   setDebugLocFromInst(Builder, LI);
3288   for (unsigned Part = 0; Part < UF; ++Part) {
3289     Value *NewLI;
3290     if (CreateGatherScatter) {
3291       Value *MaskPart = isMaskRequired ? Mask[Part] : nullptr;
3292       Value *VectorGep = getOrCreateVectorValue(Ptr, Part);
3293       NewLI = Builder.CreateMaskedGather(VectorGep, Alignment, MaskPart,
3294                                          nullptr, "wide.masked.gather");
3295       addMetadata(NewLI, LI);
3296     } else {
3297       // Calculate the pointer for the specific unroll-part.
3298       Value *PartPtr =
3299           Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(Part * VF));
3300 
3301       if (Reverse) {
3302         // If the address is consecutive but reversed, then the
3303         // wide load needs to start at the last vector element.
3304         PartPtr = Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(-Part * VF));
3305         PartPtr = Builder.CreateGEP(nullptr, PartPtr, Builder.getInt32(1 - VF));
3306         if (isMaskRequired) // Reverse of a null all-one mask is a null mask.
3307           Mask[Part] = reverseVector(Mask[Part]);
3308       }
3309 
3310       Value *VecPtr =
3311           Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace));
3312       if (isMaskRequired)
3313         NewLI = Builder.CreateMaskedLoad(VecPtr, Alignment, Mask[Part],
3314                                          UndefValue::get(DataTy),
3315                                          "wide.masked.load");
3316       else
3317         NewLI = Builder.CreateAlignedLoad(VecPtr, Alignment, "wide.load");
3318 
3319       // Add metadata to the load, but setVectorValue to the reverse shuffle.
3320       addMetadata(NewLI, LI);
3321       if (Reverse)
3322         NewLI = reverseVector(NewLI);
3323     }
3324     VectorLoopValueMap.setVectorValue(Instr, Part, NewLI);
3325   }
3326 }
3327 
3328 void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr,
3329                                                const VPIteration &Instance,
3330                                                bool IfPredicateInstr) {
3331   assert(!Instr->getType()->isAggregateType() && "Can't handle vectors");
3332 
3333   setDebugLocFromInst(Builder, Instr);
3334 
3335   // Does this instruction return a value ?
3336   bool IsVoidRetTy = Instr->getType()->isVoidTy();
3337 
3338   Instruction *Cloned = Instr->clone();
3339   if (!IsVoidRetTy)
3340     Cloned->setName(Instr->getName() + ".cloned");
3341 
3342   // Replace the operands of the cloned instructions with their scalar
3343   // equivalents in the new loop.
3344   for (unsigned op = 0, e = Instr->getNumOperands(); op != e; ++op) {
3345     auto *NewOp = getOrCreateScalarValue(Instr->getOperand(op), Instance);
3346     Cloned->setOperand(op, NewOp);
3347   }
3348   addNewMetadata(Cloned, Instr);
3349 
3350   // Place the cloned scalar in the new loop.
3351   Builder.Insert(Cloned);
3352 
3353   // Add the cloned scalar to the scalar map entry.
3354   VectorLoopValueMap.setScalarValue(Instr, Instance, Cloned);
3355 
3356   // If we just cloned a new assumption, add it the assumption cache.
3357   if (auto *II = dyn_cast<IntrinsicInst>(Cloned))
3358     if (II->getIntrinsicID() == Intrinsic::assume)
3359       AC->registerAssumption(II);
3360 
3361   // End if-block.
3362   if (IfPredicateInstr)
3363     PredicatedInstructions.push_back(Cloned);
3364 }
3365 
3366 PHINode *InnerLoopVectorizer::createInductionVariable(Loop *L, Value *Start,
3367                                                       Value *End, Value *Step,
3368                                                       Instruction *DL) {
3369   BasicBlock *Header = L->getHeader();
3370   BasicBlock *Latch = L->getLoopLatch();
3371   // As we're just creating this loop, it's possible no latch exists
3372   // yet. If so, use the header as this will be a single block loop.
3373   if (!Latch)
3374     Latch = Header;
3375 
3376   IRBuilder<> Builder(&*Header->getFirstInsertionPt());
3377   Instruction *OldInst = getDebugLocFromInstOrOperands(OldInduction);
3378   setDebugLocFromInst(Builder, OldInst);
3379   auto *Induction = Builder.CreatePHI(Start->getType(), 2, "index");
3380 
3381   Builder.SetInsertPoint(Latch->getTerminator());
3382   setDebugLocFromInst(Builder, OldInst);
3383 
3384   // Create i+1 and fill the PHINode.
3385   Value *Next = Builder.CreateAdd(Induction, Step, "index.next");
3386   Induction->addIncoming(Start, L->getLoopPreheader());
3387   Induction->addIncoming(Next, Latch);
3388   // Create the compare.
3389   Value *ICmp = Builder.CreateICmpEQ(Next, End);
3390   Builder.CreateCondBr(ICmp, L->getExitBlock(), Header);
3391 
3392   // Now we have two terminators. Remove the old one from the block.
3393   Latch->getTerminator()->eraseFromParent();
3394 
3395   return Induction;
3396 }
3397 
3398 Value *InnerLoopVectorizer::getOrCreateTripCount(Loop *L) {
3399   if (TripCount)
3400     return TripCount;
3401 
3402   IRBuilder<> Builder(L->getLoopPreheader()->getTerminator());
3403   // Find the loop boundaries.
3404   ScalarEvolution *SE = PSE.getSE();
3405   const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount();
3406   assert(BackedgeTakenCount != SE->getCouldNotCompute() &&
3407          "Invalid loop count");
3408 
3409   Type *IdxTy = Legal->getWidestInductionType();
3410 
3411   // The exit count might have the type of i64 while the phi is i32. This can
3412   // happen if we have an induction variable that is sign extended before the
3413   // compare. The only way that we get a backedge taken count is that the
3414   // induction variable was signed and as such will not overflow. In such a case
3415   // truncation is legal.
3416   if (BackedgeTakenCount->getType()->getPrimitiveSizeInBits() >
3417       IdxTy->getPrimitiveSizeInBits())
3418     BackedgeTakenCount = SE->getTruncateOrNoop(BackedgeTakenCount, IdxTy);
3419   BackedgeTakenCount = SE->getNoopOrZeroExtend(BackedgeTakenCount, IdxTy);
3420 
3421   // Get the total trip count from the count by adding 1.
3422   const SCEV *ExitCount = SE->getAddExpr(
3423       BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType()));
3424 
3425   const DataLayout &DL = L->getHeader()->getModule()->getDataLayout();
3426 
3427   // Expand the trip count and place the new instructions in the preheader.
3428   // Notice that the pre-header does not change, only the loop body.
3429   SCEVExpander Exp(*SE, DL, "induction");
3430 
3431   // Count holds the overall loop count (N).
3432   TripCount = Exp.expandCodeFor(ExitCount, ExitCount->getType(),
3433                                 L->getLoopPreheader()->getTerminator());
3434 
3435   if (TripCount->getType()->isPointerTy())
3436     TripCount =
3437         CastInst::CreatePointerCast(TripCount, IdxTy, "exitcount.ptrcnt.to.int",
3438                                     L->getLoopPreheader()->getTerminator());
3439 
3440   return TripCount;
3441 }
3442 
3443 Value *InnerLoopVectorizer::getOrCreateVectorTripCount(Loop *L) {
3444   if (VectorTripCount)
3445     return VectorTripCount;
3446 
3447   Value *TC = getOrCreateTripCount(L);
3448   IRBuilder<> Builder(L->getLoopPreheader()->getTerminator());
3449 
3450   // Now we need to generate the expression for the part of the loop that the
3451   // vectorized body will execute. This is equal to N - (N % Step) if scalar
3452   // iterations are not required for correctness, or N - Step, otherwise. Step
3453   // is equal to the vectorization factor (number of SIMD elements) times the
3454   // unroll factor (number of SIMD instructions).
3455   Constant *Step = ConstantInt::get(TC->getType(), VF * UF);
3456   Value *R = Builder.CreateURem(TC, Step, "n.mod.vf");
3457 
3458   // If there is a non-reversed interleaved group that may speculatively access
3459   // memory out-of-bounds, we need to ensure that there will be at least one
3460   // iteration of the scalar epilogue loop. Thus, if the step evenly divides
3461   // the trip count, we set the remainder to be equal to the step. If the step
3462   // does not evenly divide the trip count, no adjustment is necessary since
3463   // there will already be scalar iterations. Note that the minimum iterations
3464   // check ensures that N >= Step.
3465   if (VF > 1 && Legal->requiresScalarEpilogue()) {
3466     auto *IsZero = Builder.CreateICmpEQ(R, ConstantInt::get(R->getType(), 0));
3467     R = Builder.CreateSelect(IsZero, Step, R);
3468   }
3469 
3470   VectorTripCount = Builder.CreateSub(TC, R, "n.vec");
3471 
3472   return VectorTripCount;
3473 }
3474 
3475 Value *InnerLoopVectorizer::createBitOrPointerCast(Value *V, VectorType *DstVTy,
3476                                                    const DataLayout &DL) {
3477   // Verify that V is a vector type with same number of elements as DstVTy.
3478   unsigned VF = DstVTy->getNumElements();
3479   VectorType *SrcVecTy = cast<VectorType>(V->getType());
3480   assert((VF == SrcVecTy->getNumElements()) && "Vector dimensions do not match");
3481   Type *SrcElemTy = SrcVecTy->getElementType();
3482   Type *DstElemTy = DstVTy->getElementType();
3483   assert((DL.getTypeSizeInBits(SrcElemTy) == DL.getTypeSizeInBits(DstElemTy)) &&
3484          "Vector elements must have same size");
3485 
3486   // Do a direct cast if element types are castable.
3487   if (CastInst::isBitOrNoopPointerCastable(SrcElemTy, DstElemTy, DL)) {
3488     return Builder.CreateBitOrPointerCast(V, DstVTy);
3489   }
3490   // V cannot be directly casted to desired vector type.
3491   // May happen when V is a floating point vector but DstVTy is a vector of
3492   // pointers or vice-versa. Handle this using a two-step bitcast using an
3493   // intermediate Integer type for the bitcast i.e. Ptr <-> Int <-> Float.
3494   assert((DstElemTy->isPointerTy() != SrcElemTy->isPointerTy()) &&
3495          "Only one type should be a pointer type");
3496   assert((DstElemTy->isFloatingPointTy() != SrcElemTy->isFloatingPointTy()) &&
3497          "Only one type should be a floating point type");
3498   Type *IntTy =
3499       IntegerType::getIntNTy(V->getContext(), DL.getTypeSizeInBits(SrcElemTy));
3500   VectorType *VecIntTy = VectorType::get(IntTy, VF);
3501   Value *CastVal = Builder.CreateBitOrPointerCast(V, VecIntTy);
3502   return Builder.CreateBitOrPointerCast(CastVal, DstVTy);
3503 }
3504 
3505 void InnerLoopVectorizer::emitMinimumIterationCountCheck(Loop *L,
3506                                                          BasicBlock *Bypass) {
3507   Value *Count = getOrCreateTripCount(L);
3508   BasicBlock *BB = L->getLoopPreheader();
3509   IRBuilder<> Builder(BB->getTerminator());
3510 
3511   // Generate code to check if the loop's trip count is less than VF * UF, or
3512   // equal to it in case a scalar epilogue is required; this implies that the
3513   // vector trip count is zero. This check also covers the case where adding one
3514   // to the backedge-taken count overflowed leading to an incorrect trip count
3515   // of zero. In this case we will also jump to the scalar loop.
3516   auto P = Legal->requiresScalarEpilogue() ? ICmpInst::ICMP_ULE
3517                                            : ICmpInst::ICMP_ULT;
3518   Value *CheckMinIters = Builder.CreateICmp(
3519       P, Count, ConstantInt::get(Count->getType(), VF * UF), "min.iters.check");
3520 
3521   BasicBlock *NewBB = BB->splitBasicBlock(BB->getTerminator(), "vector.ph");
3522   // Update dominator tree immediately if the generated block is a
3523   // LoopBypassBlock because SCEV expansions to generate loop bypass
3524   // checks may query it before the current function is finished.
3525   DT->addNewBlock(NewBB, BB);
3526   if (L->getParentLoop())
3527     L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI);
3528   ReplaceInstWithInst(BB->getTerminator(),
3529                       BranchInst::Create(Bypass, NewBB, CheckMinIters));
3530   LoopBypassBlocks.push_back(BB);
3531 }
3532 
3533 void InnerLoopVectorizer::emitSCEVChecks(Loop *L, BasicBlock *Bypass) {
3534   BasicBlock *BB = L->getLoopPreheader();
3535 
3536   // Generate the code to check that the SCEV assumptions that we made.
3537   // We want the new basic block to start at the first instruction in a
3538   // sequence of instructions that form a check.
3539   SCEVExpander Exp(*PSE.getSE(), Bypass->getModule()->getDataLayout(),
3540                    "scev.check");
3541   Value *SCEVCheck =
3542       Exp.expandCodeForPredicate(&PSE.getUnionPredicate(), BB->getTerminator());
3543 
3544   if (auto *C = dyn_cast<ConstantInt>(SCEVCheck))
3545     if (C->isZero())
3546       return;
3547 
3548   // Create a new block containing the stride check.
3549   BB->setName("vector.scevcheck");
3550   auto *NewBB = BB->splitBasicBlock(BB->getTerminator(), "vector.ph");
3551   // Update dominator tree immediately if the generated block is a
3552   // LoopBypassBlock because SCEV expansions to generate loop bypass
3553   // checks may query it before the current function is finished.
3554   DT->addNewBlock(NewBB, BB);
3555   if (L->getParentLoop())
3556     L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI);
3557   ReplaceInstWithInst(BB->getTerminator(),
3558                       BranchInst::Create(Bypass, NewBB, SCEVCheck));
3559   LoopBypassBlocks.push_back(BB);
3560   AddedSafetyChecks = true;
3561 }
3562 
3563 void InnerLoopVectorizer::emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass) {
3564   BasicBlock *BB = L->getLoopPreheader();
3565 
3566   // Generate the code that checks in runtime if arrays overlap. We put the
3567   // checks into a separate block to make the more common case of few elements
3568   // faster.
3569   Instruction *FirstCheckInst;
3570   Instruction *MemRuntimeCheck;
3571   std::tie(FirstCheckInst, MemRuntimeCheck) =
3572       Legal->getLAI()->addRuntimeChecks(BB->getTerminator());
3573   if (!MemRuntimeCheck)
3574     return;
3575 
3576   // Create a new block containing the memory check.
3577   BB->setName("vector.memcheck");
3578   auto *NewBB = BB->splitBasicBlock(BB->getTerminator(), "vector.ph");
3579   // Update dominator tree immediately if the generated block is a
3580   // LoopBypassBlock because SCEV expansions to generate loop bypass
3581   // checks may query it before the current function is finished.
3582   DT->addNewBlock(NewBB, BB);
3583   if (L->getParentLoop())
3584     L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI);
3585   ReplaceInstWithInst(BB->getTerminator(),
3586                       BranchInst::Create(Bypass, NewBB, MemRuntimeCheck));
3587   LoopBypassBlocks.push_back(BB);
3588   AddedSafetyChecks = true;
3589 
3590   // We currently don't use LoopVersioning for the actual loop cloning but we
3591   // still use it to add the noalias metadata.
3592   LVer = llvm::make_unique<LoopVersioning>(*Legal->getLAI(), OrigLoop, LI, DT,
3593                                            PSE.getSE());
3594   LVer->prepareNoAliasMetadata();
3595 }
3596 
3597 BasicBlock *InnerLoopVectorizer::createVectorizedLoopSkeleton() {
3598   /*
3599    In this function we generate a new loop. The new loop will contain
3600    the vectorized instructions while the old loop will continue to run the
3601    scalar remainder.
3602 
3603        [ ] <-- loop iteration number check.
3604     /   |
3605    /    v
3606   |    [ ] <-- vector loop bypass (may consist of multiple blocks).
3607   |  /  |
3608   | /   v
3609   ||   [ ]     <-- vector pre header.
3610   |/    |
3611   |     v
3612   |    [  ] \
3613   |    [  ]_|   <-- vector loop.
3614   |     |
3615   |     v
3616   |   -[ ]   <--- middle-block.
3617   |  /  |
3618   | /   v
3619   -|- >[ ]     <--- new preheader.
3620    |    |
3621    |    v
3622    |   [ ] \
3623    |   [ ]_|   <-- old scalar loop to handle remainder.
3624     \   |
3625      \  v
3626       >[ ]     <-- exit block.
3627    ...
3628    */
3629 
3630   BasicBlock *OldBasicBlock = OrigLoop->getHeader();
3631   BasicBlock *VectorPH = OrigLoop->getLoopPreheader();
3632   BasicBlock *ExitBlock = OrigLoop->getExitBlock();
3633   assert(VectorPH && "Invalid loop structure");
3634   assert(ExitBlock && "Must have an exit block");
3635 
3636   // Some loops have a single integer induction variable, while other loops
3637   // don't. One example is c++ iterators that often have multiple pointer
3638   // induction variables. In the code below we also support a case where we
3639   // don't have a single induction variable.
3640   //
3641   // We try to obtain an induction variable from the original loop as hard
3642   // as possible. However if we don't find one that:
3643   //   - is an integer
3644   //   - counts from zero, stepping by one
3645   //   - is the size of the widest induction variable type
3646   // then we create a new one.
3647   OldInduction = Legal->getPrimaryInduction();
3648   Type *IdxTy = Legal->getWidestInductionType();
3649 
3650   // Split the single block loop into the two loop structure described above.
3651   BasicBlock *VecBody =
3652       VectorPH->splitBasicBlock(VectorPH->getTerminator(), "vector.body");
3653   BasicBlock *MiddleBlock =
3654       VecBody->splitBasicBlock(VecBody->getTerminator(), "middle.block");
3655   BasicBlock *ScalarPH =
3656       MiddleBlock->splitBasicBlock(MiddleBlock->getTerminator(), "scalar.ph");
3657 
3658   // Create and register the new vector loop.
3659   Loop *Lp = LI->AllocateLoop();
3660   Loop *ParentLoop = OrigLoop->getParentLoop();
3661 
3662   // Insert the new loop into the loop nest and register the new basic blocks
3663   // before calling any utilities such as SCEV that require valid LoopInfo.
3664   if (ParentLoop) {
3665     ParentLoop->addChildLoop(Lp);
3666     ParentLoop->addBasicBlockToLoop(ScalarPH, *LI);
3667     ParentLoop->addBasicBlockToLoop(MiddleBlock, *LI);
3668   } else {
3669     LI->addTopLevelLoop(Lp);
3670   }
3671   Lp->addBasicBlockToLoop(VecBody, *LI);
3672 
3673   // Find the loop boundaries.
3674   Value *Count = getOrCreateTripCount(Lp);
3675 
3676   Value *StartIdx = ConstantInt::get(IdxTy, 0);
3677 
3678   // Now, compare the new count to zero. If it is zero skip the vector loop and
3679   // jump to the scalar loop. This check also covers the case where the
3680   // backedge-taken count is uint##_max: adding one to it will overflow leading
3681   // to an incorrect trip count of zero. In this (rare) case we will also jump
3682   // to the scalar loop.
3683   emitMinimumIterationCountCheck(Lp, ScalarPH);
3684 
3685   // Generate the code to check any assumptions that we've made for SCEV
3686   // expressions.
3687   emitSCEVChecks(Lp, ScalarPH);
3688 
3689   // Generate the code that checks in runtime if arrays overlap. We put the
3690   // checks into a separate block to make the more common case of few elements
3691   // faster.
3692   emitMemRuntimeChecks(Lp, ScalarPH);
3693 
3694   // Generate the induction variable.
3695   // The loop step is equal to the vectorization factor (num of SIMD elements)
3696   // times the unroll factor (num of SIMD instructions).
3697   Value *CountRoundDown = getOrCreateVectorTripCount(Lp);
3698   Constant *Step = ConstantInt::get(IdxTy, VF * UF);
3699   Induction =
3700       createInductionVariable(Lp, StartIdx, CountRoundDown, Step,
3701                               getDebugLocFromInstOrOperands(OldInduction));
3702 
3703   // We are going to resume the execution of the scalar loop.
3704   // Go over all of the induction variables that we found and fix the
3705   // PHIs that are left in the scalar version of the loop.
3706   // The starting values of PHI nodes depend on the counter of the last
3707   // iteration in the vectorized loop.
3708   // If we come from a bypass edge then we need to start from the original
3709   // start value.
3710 
3711   // This variable saves the new starting index for the scalar loop. It is used
3712   // to test if there are any tail iterations left once the vector loop has
3713   // completed.
3714   LoopVectorizationLegality::InductionList *List = Legal->getInductionVars();
3715   for (auto &InductionEntry : *List) {
3716     PHINode *OrigPhi = InductionEntry.first;
3717     InductionDescriptor II = InductionEntry.second;
3718 
3719     // Create phi nodes to merge from the  backedge-taken check block.
3720     PHINode *BCResumeVal = PHINode::Create(
3721         OrigPhi->getType(), 3, "bc.resume.val", ScalarPH->getTerminator());
3722     Value *&EndValue = IVEndValues[OrigPhi];
3723     if (OrigPhi == OldInduction) {
3724       // We know what the end value is.
3725       EndValue = CountRoundDown;
3726     } else {
3727       IRBuilder<> B(Lp->getLoopPreheader()->getTerminator());
3728       Type *StepType = II.getStep()->getType();
3729       Instruction::CastOps CastOp =
3730         CastInst::getCastOpcode(CountRoundDown, true, StepType, true);
3731       Value *CRD = B.CreateCast(CastOp, CountRoundDown, StepType, "cast.crd");
3732       const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout();
3733       EndValue = II.transform(B, CRD, PSE.getSE(), DL);
3734       EndValue->setName("ind.end");
3735     }
3736 
3737     // The new PHI merges the original incoming value, in case of a bypass,
3738     // or the value at the end of the vectorized loop.
3739     BCResumeVal->addIncoming(EndValue, MiddleBlock);
3740 
3741     // Fix the scalar body counter (PHI node).
3742     unsigned BlockIdx = OrigPhi->getBasicBlockIndex(ScalarPH);
3743 
3744     // The old induction's phi node in the scalar body needs the truncated
3745     // value.
3746     for (BasicBlock *BB : LoopBypassBlocks)
3747       BCResumeVal->addIncoming(II.getStartValue(), BB);
3748     OrigPhi->setIncomingValue(BlockIdx, BCResumeVal);
3749   }
3750 
3751   // Add a check in the middle block to see if we have completed
3752   // all of the iterations in the first vector loop.
3753   // If (N - N%VF) == N, then we *don't* need to run the remainder.
3754   Value *CmpN =
3755       CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ, Count,
3756                       CountRoundDown, "cmp.n", MiddleBlock->getTerminator());
3757   ReplaceInstWithInst(MiddleBlock->getTerminator(),
3758                       BranchInst::Create(ExitBlock, ScalarPH, CmpN));
3759 
3760   // Get ready to start creating new instructions into the vectorized body.
3761   Builder.SetInsertPoint(&*VecBody->getFirstInsertionPt());
3762 
3763   // Save the state.
3764   LoopVectorPreHeader = Lp->getLoopPreheader();
3765   LoopScalarPreHeader = ScalarPH;
3766   LoopMiddleBlock = MiddleBlock;
3767   LoopExitBlock = ExitBlock;
3768   LoopVectorBody = VecBody;
3769   LoopScalarBody = OldBasicBlock;
3770 
3771   // Keep all loop hints from the original loop on the vector loop (we'll
3772   // replace the vectorizer-specific hints below).
3773   if (MDNode *LID = OrigLoop->getLoopID())
3774     Lp->setLoopID(LID);
3775 
3776   LoopVectorizeHints Hints(Lp, true, *ORE);
3777   Hints.setAlreadyVectorized();
3778 
3779   return LoopVectorPreHeader;
3780 }
3781 
3782 // Fix up external users of the induction variable. At this point, we are
3783 // in LCSSA form, with all external PHIs that use the IV having one input value,
3784 // coming from the remainder loop. We need those PHIs to also have a correct
3785 // value for the IV when arriving directly from the middle block.
3786 void InnerLoopVectorizer::fixupIVUsers(PHINode *OrigPhi,
3787                                        const InductionDescriptor &II,
3788                                        Value *CountRoundDown, Value *EndValue,
3789                                        BasicBlock *MiddleBlock) {
3790   // There are two kinds of external IV usages - those that use the value
3791   // computed in the last iteration (the PHI) and those that use the penultimate
3792   // value (the value that feeds into the phi from the loop latch).
3793   // We allow both, but they, obviously, have different values.
3794 
3795   assert(OrigLoop->getExitBlock() && "Expected a single exit block");
3796 
3797   DenseMap<Value *, Value *> MissingVals;
3798 
3799   // An external user of the last iteration's value should see the value that
3800   // the remainder loop uses to initialize its own IV.
3801   Value *PostInc = OrigPhi->getIncomingValueForBlock(OrigLoop->getLoopLatch());
3802   for (User *U : PostInc->users()) {
3803     Instruction *UI = cast<Instruction>(U);
3804     if (!OrigLoop->contains(UI)) {
3805       assert(isa<PHINode>(UI) && "Expected LCSSA form");
3806       MissingVals[UI] = EndValue;
3807     }
3808   }
3809 
3810   // An external user of the penultimate value need to see EndValue - Step.
3811   // The simplest way to get this is to recompute it from the constituent SCEVs,
3812   // that is Start + (Step * (CRD - 1)).
3813   for (User *U : OrigPhi->users()) {
3814     auto *UI = cast<Instruction>(U);
3815     if (!OrigLoop->contains(UI)) {
3816       const DataLayout &DL =
3817           OrigLoop->getHeader()->getModule()->getDataLayout();
3818       assert(isa<PHINode>(UI) && "Expected LCSSA form");
3819 
3820       IRBuilder<> B(MiddleBlock->getTerminator());
3821       Value *CountMinusOne = B.CreateSub(
3822           CountRoundDown, ConstantInt::get(CountRoundDown->getType(), 1));
3823       Value *CMO =
3824           !II.getStep()->getType()->isIntegerTy()
3825               ? B.CreateCast(Instruction::SIToFP, CountMinusOne,
3826                              II.getStep()->getType())
3827               : B.CreateSExtOrTrunc(CountMinusOne, II.getStep()->getType());
3828       CMO->setName("cast.cmo");
3829       Value *Escape = II.transform(B, CMO, PSE.getSE(), DL);
3830       Escape->setName("ind.escape");
3831       MissingVals[UI] = Escape;
3832     }
3833   }
3834 
3835   for (auto &I : MissingVals) {
3836     PHINode *PHI = cast<PHINode>(I.first);
3837     // One corner case we have to handle is two IVs "chasing" each-other,
3838     // that is %IV2 = phi [...], [ %IV1, %latch ]
3839     // In this case, if IV1 has an external use, we need to avoid adding both
3840     // "last value of IV1" and "penultimate value of IV2". So, verify that we
3841     // don't already have an incoming value for the middle block.
3842     if (PHI->getBasicBlockIndex(MiddleBlock) == -1)
3843       PHI->addIncoming(I.second, MiddleBlock);
3844   }
3845 }
3846 
3847 namespace {
3848 
3849 struct CSEDenseMapInfo {
3850   static bool canHandle(const Instruction *I) {
3851     return isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) ||
3852            isa<ShuffleVectorInst>(I) || isa<GetElementPtrInst>(I);
3853   }
3854 
3855   static inline Instruction *getEmptyKey() {
3856     return DenseMapInfo<Instruction *>::getEmptyKey();
3857   }
3858 
3859   static inline Instruction *getTombstoneKey() {
3860     return DenseMapInfo<Instruction *>::getTombstoneKey();
3861   }
3862 
3863   static unsigned getHashValue(const Instruction *I) {
3864     assert(canHandle(I) && "Unknown instruction!");
3865     return hash_combine(I->getOpcode(), hash_combine_range(I->value_op_begin(),
3866                                                            I->value_op_end()));
3867   }
3868 
3869   static bool isEqual(const Instruction *LHS, const Instruction *RHS) {
3870     if (LHS == getEmptyKey() || RHS == getEmptyKey() ||
3871         LHS == getTombstoneKey() || RHS == getTombstoneKey())
3872       return LHS == RHS;
3873     return LHS->isIdenticalTo(RHS);
3874   }
3875 };
3876 
3877 } // end anonymous namespace
3878 
3879 ///\brief Perform cse of induction variable instructions.
3880 static void cse(BasicBlock *BB) {
3881   // Perform simple cse.
3882   SmallDenseMap<Instruction *, Instruction *, 4, CSEDenseMapInfo> CSEMap;
3883   for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) {
3884     Instruction *In = &*I++;
3885 
3886     if (!CSEDenseMapInfo::canHandle(In))
3887       continue;
3888 
3889     // Check if we can replace this instruction with any of the
3890     // visited instructions.
3891     if (Instruction *V = CSEMap.lookup(In)) {
3892       In->replaceAllUsesWith(V);
3893       In->eraseFromParent();
3894       continue;
3895     }
3896 
3897     CSEMap[In] = In;
3898   }
3899 }
3900 
3901 /// \brief Estimate the overhead of scalarizing an instruction. This is a
3902 /// convenience wrapper for the type-based getScalarizationOverhead API.
3903 static unsigned getScalarizationOverhead(Instruction *I, unsigned VF,
3904                                          const TargetTransformInfo &TTI) {
3905   if (VF == 1)
3906     return 0;
3907 
3908   unsigned Cost = 0;
3909   Type *RetTy = ToVectorTy(I->getType(), VF);
3910   if (!RetTy->isVoidTy() &&
3911       (!isa<LoadInst>(I) ||
3912        !TTI.supportsEfficientVectorElementLoadStore()))
3913     Cost += TTI.getScalarizationOverhead(RetTy, true, false);
3914 
3915   if (CallInst *CI = dyn_cast<CallInst>(I)) {
3916     SmallVector<const Value *, 4> Operands(CI->arg_operands());
3917     Cost += TTI.getOperandsScalarizationOverhead(Operands, VF);
3918   }
3919   else if (!isa<StoreInst>(I) ||
3920            !TTI.supportsEfficientVectorElementLoadStore()) {
3921     SmallVector<const Value *, 4> Operands(I->operand_values());
3922     Cost += TTI.getOperandsScalarizationOverhead(Operands, VF);
3923   }
3924 
3925   return Cost;
3926 }
3927 
3928 // Estimate cost of a call instruction CI if it were vectorized with factor VF.
3929 // Return the cost of the instruction, including scalarization overhead if it's
3930 // needed. The flag NeedToScalarize shows if the call needs to be scalarized -
3931 // i.e. either vector version isn't available, or is too expensive.
3932 static unsigned getVectorCallCost(CallInst *CI, unsigned VF,
3933                                   const TargetTransformInfo &TTI,
3934                                   const TargetLibraryInfo *TLI,
3935                                   bool &NeedToScalarize) {
3936   Function *F = CI->getCalledFunction();
3937   StringRef FnName = CI->getCalledFunction()->getName();
3938   Type *ScalarRetTy = CI->getType();
3939   SmallVector<Type *, 4> Tys, ScalarTys;
3940   for (auto &ArgOp : CI->arg_operands())
3941     ScalarTys.push_back(ArgOp->getType());
3942 
3943   // Estimate cost of scalarized vector call. The source operands are assumed
3944   // to be vectors, so we need to extract individual elements from there,
3945   // execute VF scalar calls, and then gather the result into the vector return
3946   // value.
3947   unsigned ScalarCallCost = TTI.getCallInstrCost(F, ScalarRetTy, ScalarTys);
3948   if (VF == 1)
3949     return ScalarCallCost;
3950 
3951   // Compute corresponding vector type for return value and arguments.
3952   Type *RetTy = ToVectorTy(ScalarRetTy, VF);
3953   for (Type *ScalarTy : ScalarTys)
3954     Tys.push_back(ToVectorTy(ScalarTy, VF));
3955 
3956   // Compute costs of unpacking argument values for the scalar calls and
3957   // packing the return values to a vector.
3958   unsigned ScalarizationCost = getScalarizationOverhead(CI, VF, TTI);
3959 
3960   unsigned Cost = ScalarCallCost * VF + ScalarizationCost;
3961 
3962   // If we can't emit a vector call for this function, then the currently found
3963   // cost is the cost we need to return.
3964   NeedToScalarize = true;
3965   if (!TLI || !TLI->isFunctionVectorizable(FnName, VF) || CI->isNoBuiltin())
3966     return Cost;
3967 
3968   // If the corresponding vector cost is cheaper, return its cost.
3969   unsigned VectorCallCost = TTI.getCallInstrCost(nullptr, RetTy, Tys);
3970   if (VectorCallCost < Cost) {
3971     NeedToScalarize = false;
3972     return VectorCallCost;
3973   }
3974   return Cost;
3975 }
3976 
3977 // Estimate cost of an intrinsic call instruction CI if it were vectorized with
3978 // factor VF.  Return the cost of the instruction, including scalarization
3979 // overhead if it's needed.
3980 static unsigned getVectorIntrinsicCost(CallInst *CI, unsigned VF,
3981                                        const TargetTransformInfo &TTI,
3982                                        const TargetLibraryInfo *TLI) {
3983   Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
3984   assert(ID && "Expected intrinsic call!");
3985 
3986   FastMathFlags FMF;
3987   if (auto *FPMO = dyn_cast<FPMathOperator>(CI))
3988     FMF = FPMO->getFastMathFlags();
3989 
3990   SmallVector<Value *, 4> Operands(CI->arg_operands());
3991   return TTI.getIntrinsicInstrCost(ID, CI->getType(), Operands, FMF, VF);
3992 }
3993 
3994 static Type *smallestIntegerVectorType(Type *T1, Type *T2) {
3995   auto *I1 = cast<IntegerType>(T1->getVectorElementType());
3996   auto *I2 = cast<IntegerType>(T2->getVectorElementType());
3997   return I1->getBitWidth() < I2->getBitWidth() ? T1 : T2;
3998 }
3999 static Type *largestIntegerVectorType(Type *T1, Type *T2) {
4000   auto *I1 = cast<IntegerType>(T1->getVectorElementType());
4001   auto *I2 = cast<IntegerType>(T2->getVectorElementType());
4002   return I1->getBitWidth() > I2->getBitWidth() ? T1 : T2;
4003 }
4004 
4005 void InnerLoopVectorizer::truncateToMinimalBitwidths() {
4006   // For every instruction `I` in MinBWs, truncate the operands, create a
4007   // truncated version of `I` and reextend its result. InstCombine runs
4008   // later and will remove any ext/trunc pairs.
4009   SmallPtrSet<Value *, 4> Erased;
4010   for (const auto &KV : Cost->getMinimalBitwidths()) {
4011     // If the value wasn't vectorized, we must maintain the original scalar
4012     // type. The absence of the value from VectorLoopValueMap indicates that it
4013     // wasn't vectorized.
4014     if (!VectorLoopValueMap.hasAnyVectorValue(KV.first))
4015       continue;
4016     for (unsigned Part = 0; Part < UF; ++Part) {
4017       Value *I = getOrCreateVectorValue(KV.first, Part);
4018       if (Erased.count(I) || I->use_empty() || !isa<Instruction>(I))
4019         continue;
4020       Type *OriginalTy = I->getType();
4021       Type *ScalarTruncatedTy =
4022           IntegerType::get(OriginalTy->getContext(), KV.second);
4023       Type *TruncatedTy = VectorType::get(ScalarTruncatedTy,
4024                                           OriginalTy->getVectorNumElements());
4025       if (TruncatedTy == OriginalTy)
4026         continue;
4027 
4028       IRBuilder<> B(cast<Instruction>(I));
4029       auto ShrinkOperand = [&](Value *V) -> Value * {
4030         if (auto *ZI = dyn_cast<ZExtInst>(V))
4031           if (ZI->getSrcTy() == TruncatedTy)
4032             return ZI->getOperand(0);
4033         return B.CreateZExtOrTrunc(V, TruncatedTy);
4034       };
4035 
4036       // The actual instruction modification depends on the instruction type,
4037       // unfortunately.
4038       Value *NewI = nullptr;
4039       if (auto *BO = dyn_cast<BinaryOperator>(I)) {
4040         NewI = B.CreateBinOp(BO->getOpcode(), ShrinkOperand(BO->getOperand(0)),
4041                              ShrinkOperand(BO->getOperand(1)));
4042 
4043         // Any wrapping introduced by shrinking this operation shouldn't be
4044         // considered undefined behavior. So, we can't unconditionally copy
4045         // arithmetic wrapping flags to NewI.
4046         cast<BinaryOperator>(NewI)->copyIRFlags(I, /*IncludeWrapFlags=*/false);
4047       } else if (auto *CI = dyn_cast<ICmpInst>(I)) {
4048         NewI =
4049             B.CreateICmp(CI->getPredicate(), ShrinkOperand(CI->getOperand(0)),
4050                          ShrinkOperand(CI->getOperand(1)));
4051       } else if (auto *SI = dyn_cast<SelectInst>(I)) {
4052         NewI = B.CreateSelect(SI->getCondition(),
4053                               ShrinkOperand(SI->getTrueValue()),
4054                               ShrinkOperand(SI->getFalseValue()));
4055       } else if (auto *CI = dyn_cast<CastInst>(I)) {
4056         switch (CI->getOpcode()) {
4057         default:
4058           llvm_unreachable("Unhandled cast!");
4059         case Instruction::Trunc:
4060           NewI = ShrinkOperand(CI->getOperand(0));
4061           break;
4062         case Instruction::SExt:
4063           NewI = B.CreateSExtOrTrunc(
4064               CI->getOperand(0),
4065               smallestIntegerVectorType(OriginalTy, TruncatedTy));
4066           break;
4067         case Instruction::ZExt:
4068           NewI = B.CreateZExtOrTrunc(
4069               CI->getOperand(0),
4070               smallestIntegerVectorType(OriginalTy, TruncatedTy));
4071           break;
4072         }
4073       } else if (auto *SI = dyn_cast<ShuffleVectorInst>(I)) {
4074         auto Elements0 = SI->getOperand(0)->getType()->getVectorNumElements();
4075         auto *O0 = B.CreateZExtOrTrunc(
4076             SI->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements0));
4077         auto Elements1 = SI->getOperand(1)->getType()->getVectorNumElements();
4078         auto *O1 = B.CreateZExtOrTrunc(
4079             SI->getOperand(1), VectorType::get(ScalarTruncatedTy, Elements1));
4080 
4081         NewI = B.CreateShuffleVector(O0, O1, SI->getMask());
4082       } else if (isa<LoadInst>(I)) {
4083         // Don't do anything with the operands, just extend the result.
4084         continue;
4085       } else if (auto *IE = dyn_cast<InsertElementInst>(I)) {
4086         auto Elements = IE->getOperand(0)->getType()->getVectorNumElements();
4087         auto *O0 = B.CreateZExtOrTrunc(
4088             IE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements));
4089         auto *O1 = B.CreateZExtOrTrunc(IE->getOperand(1), ScalarTruncatedTy);
4090         NewI = B.CreateInsertElement(O0, O1, IE->getOperand(2));
4091       } else if (auto *EE = dyn_cast<ExtractElementInst>(I)) {
4092         auto Elements = EE->getOperand(0)->getType()->getVectorNumElements();
4093         auto *O0 = B.CreateZExtOrTrunc(
4094             EE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements));
4095         NewI = B.CreateExtractElement(O0, EE->getOperand(2));
4096       } else {
4097         llvm_unreachable("Unhandled instruction type!");
4098       }
4099 
4100       // Lastly, extend the result.
4101       NewI->takeName(cast<Instruction>(I));
4102       Value *Res = B.CreateZExtOrTrunc(NewI, OriginalTy);
4103       I->replaceAllUsesWith(Res);
4104       cast<Instruction>(I)->eraseFromParent();
4105       Erased.insert(I);
4106       VectorLoopValueMap.resetVectorValue(KV.first, Part, Res);
4107     }
4108   }
4109 
4110   // We'll have created a bunch of ZExts that are now parentless. Clean up.
4111   for (const auto &KV : Cost->getMinimalBitwidths()) {
4112     // If the value wasn't vectorized, we must maintain the original scalar
4113     // type. The absence of the value from VectorLoopValueMap indicates that it
4114     // wasn't vectorized.
4115     if (!VectorLoopValueMap.hasAnyVectorValue(KV.first))
4116       continue;
4117     for (unsigned Part = 0; Part < UF; ++Part) {
4118       Value *I = getOrCreateVectorValue(KV.first, Part);
4119       ZExtInst *Inst = dyn_cast<ZExtInst>(I);
4120       if (Inst && Inst->use_empty()) {
4121         Value *NewI = Inst->getOperand(0);
4122         Inst->eraseFromParent();
4123         VectorLoopValueMap.resetVectorValue(KV.first, Part, NewI);
4124       }
4125     }
4126   }
4127 }
4128 
4129 void InnerLoopVectorizer::fixVectorizedLoop() {
4130   // Insert truncates and extends for any truncated instructions as hints to
4131   // InstCombine.
4132   if (VF > 1)
4133     truncateToMinimalBitwidths();
4134 
4135   // At this point every instruction in the original loop is widened to a
4136   // vector form. Now we need to fix the recurrences in the loop. These PHI
4137   // nodes are currently empty because we did not want to introduce cycles.
4138   // This is the second stage of vectorizing recurrences.
4139   fixCrossIterationPHIs();
4140 
4141   // Update the dominator tree.
4142   //
4143   // FIXME: After creating the structure of the new loop, the dominator tree is
4144   //        no longer up-to-date, and it remains that way until we update it
4145   //        here. An out-of-date dominator tree is problematic for SCEV,
4146   //        because SCEVExpander uses it to guide code generation. The
4147   //        vectorizer use SCEVExpanders in several places. Instead, we should
4148   //        keep the dominator tree up-to-date as we go.
4149   updateAnalysis();
4150 
4151   // Fix-up external users of the induction variables.
4152   for (auto &Entry : *Legal->getInductionVars())
4153     fixupIVUsers(Entry.first, Entry.second,
4154                  getOrCreateVectorTripCount(LI->getLoopFor(LoopVectorBody)),
4155                  IVEndValues[Entry.first], LoopMiddleBlock);
4156 
4157   fixLCSSAPHIs();
4158   for (Instruction *PI : PredicatedInstructions)
4159     sinkScalarOperands(&*PI);
4160 
4161   // Remove redundant induction instructions.
4162   cse(LoopVectorBody);
4163 }
4164 
4165 void InnerLoopVectorizer::fixCrossIterationPHIs() {
4166   // In order to support recurrences we need to be able to vectorize Phi nodes.
4167   // Phi nodes have cycles, so we need to vectorize them in two stages. This is
4168   // stage #2: We now need to fix the recurrences by adding incoming edges to
4169   // the currently empty PHI nodes. At this point every instruction in the
4170   // original loop is widened to a vector form so we can use them to construct
4171   // the incoming edges.
4172   for (PHINode &Phi : OrigLoop->getHeader()->phis()) {
4173     // Handle first-order recurrences and reductions that need to be fixed.
4174     if (Legal->isFirstOrderRecurrence(&Phi))
4175       fixFirstOrderRecurrence(&Phi);
4176     else if (Legal->isReductionVariable(&Phi))
4177       fixReduction(&Phi);
4178   }
4179 }
4180 
4181 void InnerLoopVectorizer::fixFirstOrderRecurrence(PHINode *Phi) {
4182   // This is the second phase of vectorizing first-order recurrences. An
4183   // overview of the transformation is described below. Suppose we have the
4184   // following loop.
4185   //
4186   //   for (int i = 0; i < n; ++i)
4187   //     b[i] = a[i] - a[i - 1];
4188   //
4189   // There is a first-order recurrence on "a". For this loop, the shorthand
4190   // scalar IR looks like:
4191   //
4192   //   scalar.ph:
4193   //     s_init = a[-1]
4194   //     br scalar.body
4195   //
4196   //   scalar.body:
4197   //     i = phi [0, scalar.ph], [i+1, scalar.body]
4198   //     s1 = phi [s_init, scalar.ph], [s2, scalar.body]
4199   //     s2 = a[i]
4200   //     b[i] = s2 - s1
4201   //     br cond, scalar.body, ...
4202   //
4203   // In this example, s1 is a recurrence because it's value depends on the
4204   // previous iteration. In the first phase of vectorization, we created a
4205   // temporary value for s1. We now complete the vectorization and produce the
4206   // shorthand vector IR shown below (for VF = 4, UF = 1).
4207   //
4208   //   vector.ph:
4209   //     v_init = vector(..., ..., ..., a[-1])
4210   //     br vector.body
4211   //
4212   //   vector.body
4213   //     i = phi [0, vector.ph], [i+4, vector.body]
4214   //     v1 = phi [v_init, vector.ph], [v2, vector.body]
4215   //     v2 = a[i, i+1, i+2, i+3];
4216   //     v3 = vector(v1(3), v2(0, 1, 2))
4217   //     b[i, i+1, i+2, i+3] = v2 - v3
4218   //     br cond, vector.body, middle.block
4219   //
4220   //   middle.block:
4221   //     x = v2(3)
4222   //     br scalar.ph
4223   //
4224   //   scalar.ph:
4225   //     s_init = phi [x, middle.block], [a[-1], otherwise]
4226   //     br scalar.body
4227   //
4228   // After execution completes the vector loop, we extract the next value of
4229   // the recurrence (x) to use as the initial value in the scalar loop.
4230 
4231   // Get the original loop preheader and single loop latch.
4232   auto *Preheader = OrigLoop->getLoopPreheader();
4233   auto *Latch = OrigLoop->getLoopLatch();
4234 
4235   // Get the initial and previous values of the scalar recurrence.
4236   auto *ScalarInit = Phi->getIncomingValueForBlock(Preheader);
4237   auto *Previous = Phi->getIncomingValueForBlock(Latch);
4238 
4239   // Create a vector from the initial value.
4240   auto *VectorInit = ScalarInit;
4241   if (VF > 1) {
4242     Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
4243     VectorInit = Builder.CreateInsertElement(
4244         UndefValue::get(VectorType::get(VectorInit->getType(), VF)), VectorInit,
4245         Builder.getInt32(VF - 1), "vector.recur.init");
4246   }
4247 
4248   // We constructed a temporary phi node in the first phase of vectorization.
4249   // This phi node will eventually be deleted.
4250   Builder.SetInsertPoint(
4251       cast<Instruction>(VectorLoopValueMap.getVectorValue(Phi, 0)));
4252 
4253   // Create a phi node for the new recurrence. The current value will either be
4254   // the initial value inserted into a vector or loop-varying vector value.
4255   auto *VecPhi = Builder.CreatePHI(VectorInit->getType(), 2, "vector.recur");
4256   VecPhi->addIncoming(VectorInit, LoopVectorPreHeader);
4257 
4258   // Get the vectorized previous value of the last part UF - 1. It appears last
4259   // among all unrolled iterations, due to the order of their construction.
4260   Value *PreviousLastPart = getOrCreateVectorValue(Previous, UF - 1);
4261 
4262   // Set the insertion point after the previous value if it is an instruction.
4263   // Note that the previous value may have been constant-folded so it is not
4264   // guaranteed to be an instruction in the vector loop. Also, if the previous
4265   // value is a phi node, we should insert after all the phi nodes to avoid
4266   // breaking basic block verification.
4267   if (LI->getLoopFor(LoopVectorBody)->isLoopInvariant(PreviousLastPart) ||
4268       isa<PHINode>(PreviousLastPart))
4269     Builder.SetInsertPoint(&*LoopVectorBody->getFirstInsertionPt());
4270   else
4271     Builder.SetInsertPoint(
4272         &*++BasicBlock::iterator(cast<Instruction>(PreviousLastPart)));
4273 
4274   // We will construct a vector for the recurrence by combining the values for
4275   // the current and previous iterations. This is the required shuffle mask.
4276   SmallVector<Constant *, 8> ShuffleMask(VF);
4277   ShuffleMask[0] = Builder.getInt32(VF - 1);
4278   for (unsigned I = 1; I < VF; ++I)
4279     ShuffleMask[I] = Builder.getInt32(I + VF - 1);
4280 
4281   // The vector from which to take the initial value for the current iteration
4282   // (actual or unrolled). Initially, this is the vector phi node.
4283   Value *Incoming = VecPhi;
4284 
4285   // Shuffle the current and previous vector and update the vector parts.
4286   for (unsigned Part = 0; Part < UF; ++Part) {
4287     Value *PreviousPart = getOrCreateVectorValue(Previous, Part);
4288     Value *PhiPart = VectorLoopValueMap.getVectorValue(Phi, Part);
4289     auto *Shuffle =
4290         VF > 1 ? Builder.CreateShuffleVector(Incoming, PreviousPart,
4291                                              ConstantVector::get(ShuffleMask))
4292                : Incoming;
4293     PhiPart->replaceAllUsesWith(Shuffle);
4294     cast<Instruction>(PhiPart)->eraseFromParent();
4295     VectorLoopValueMap.resetVectorValue(Phi, Part, Shuffle);
4296     Incoming = PreviousPart;
4297   }
4298 
4299   // Fix the latch value of the new recurrence in the vector loop.
4300   VecPhi->addIncoming(Incoming, LI->getLoopFor(LoopVectorBody)->getLoopLatch());
4301 
4302   // Extract the last vector element in the middle block. This will be the
4303   // initial value for the recurrence when jumping to the scalar loop.
4304   auto *ExtractForScalar = Incoming;
4305   if (VF > 1) {
4306     Builder.SetInsertPoint(LoopMiddleBlock->getTerminator());
4307     ExtractForScalar = Builder.CreateExtractElement(
4308         ExtractForScalar, Builder.getInt32(VF - 1), "vector.recur.extract");
4309   }
4310   // Extract the second last element in the middle block if the
4311   // Phi is used outside the loop. We need to extract the phi itself
4312   // and not the last element (the phi update in the current iteration). This
4313   // will be the value when jumping to the exit block from the LoopMiddleBlock,
4314   // when the scalar loop is not run at all.
4315   Value *ExtractForPhiUsedOutsideLoop = nullptr;
4316   if (VF > 1)
4317     ExtractForPhiUsedOutsideLoop = Builder.CreateExtractElement(
4318         Incoming, Builder.getInt32(VF - 2), "vector.recur.extract.for.phi");
4319   // When loop is unrolled without vectorizing, initialize
4320   // ExtractForPhiUsedOutsideLoop with the value just prior to unrolled value of
4321   // `Incoming`. This is analogous to the vectorized case above: extracting the
4322   // second last element when VF > 1.
4323   else if (UF > 1)
4324     ExtractForPhiUsedOutsideLoop = getOrCreateVectorValue(Previous, UF - 2);
4325 
4326   // Fix the initial value of the original recurrence in the scalar loop.
4327   Builder.SetInsertPoint(&*LoopScalarPreHeader->begin());
4328   auto *Start = Builder.CreatePHI(Phi->getType(), 2, "scalar.recur.init");
4329   for (auto *BB : predecessors(LoopScalarPreHeader)) {
4330     auto *Incoming = BB == LoopMiddleBlock ? ExtractForScalar : ScalarInit;
4331     Start->addIncoming(Incoming, BB);
4332   }
4333 
4334   Phi->setIncomingValue(Phi->getBasicBlockIndex(LoopScalarPreHeader), Start);
4335   Phi->setName("scalar.recur");
4336 
4337   // Finally, fix users of the recurrence outside the loop. The users will need
4338   // either the last value of the scalar recurrence or the last value of the
4339   // vector recurrence we extracted in the middle block. Since the loop is in
4340   // LCSSA form, we just need to find the phi node for the original scalar
4341   // recurrence in the exit block, and then add an edge for the middle block.
4342   for (PHINode &LCSSAPhi : LoopExitBlock->phis()) {
4343     if (LCSSAPhi.getIncomingValue(0) == Phi) {
4344       LCSSAPhi.addIncoming(ExtractForPhiUsedOutsideLoop, LoopMiddleBlock);
4345       break;
4346     }
4347   }
4348 }
4349 
4350 void InnerLoopVectorizer::fixReduction(PHINode *Phi) {
4351   Constant *Zero = Builder.getInt32(0);
4352 
4353   // Get it's reduction variable descriptor.
4354   assert(Legal->isReductionVariable(Phi) &&
4355          "Unable to find the reduction variable");
4356   RecurrenceDescriptor RdxDesc = (*Legal->getReductionVars())[Phi];
4357 
4358   RecurrenceDescriptor::RecurrenceKind RK = RdxDesc.getRecurrenceKind();
4359   TrackingVH<Value> ReductionStartValue = RdxDesc.getRecurrenceStartValue();
4360   Instruction *LoopExitInst = RdxDesc.getLoopExitInstr();
4361   RecurrenceDescriptor::MinMaxRecurrenceKind MinMaxKind =
4362     RdxDesc.getMinMaxRecurrenceKind();
4363   setDebugLocFromInst(Builder, ReductionStartValue);
4364 
4365   // We need to generate a reduction vector from the incoming scalar.
4366   // To do so, we need to generate the 'identity' vector and override
4367   // one of the elements with the incoming scalar reduction. We need
4368   // to do it in the vector-loop preheader.
4369   Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
4370 
4371   // This is the vector-clone of the value that leaves the loop.
4372   Type *VecTy = getOrCreateVectorValue(LoopExitInst, 0)->getType();
4373 
4374   // Find the reduction identity variable. Zero for addition, or, xor,
4375   // one for multiplication, -1 for And.
4376   Value *Identity;
4377   Value *VectorStart;
4378   if (RK == RecurrenceDescriptor::RK_IntegerMinMax ||
4379       RK == RecurrenceDescriptor::RK_FloatMinMax) {
4380     // MinMax reduction have the start value as their identify.
4381     if (VF == 1) {
4382       VectorStart = Identity = ReductionStartValue;
4383     } else {
4384       VectorStart = Identity =
4385         Builder.CreateVectorSplat(VF, ReductionStartValue, "minmax.ident");
4386     }
4387   } else {
4388     // Handle other reduction kinds:
4389     Constant *Iden = RecurrenceDescriptor::getRecurrenceIdentity(
4390         RK, VecTy->getScalarType());
4391     if (VF == 1) {
4392       Identity = Iden;
4393       // This vector is the Identity vector where the first element is the
4394       // incoming scalar reduction.
4395       VectorStart = ReductionStartValue;
4396     } else {
4397       Identity = ConstantVector::getSplat(VF, Iden);
4398 
4399       // This vector is the Identity vector where the first element is the
4400       // incoming scalar reduction.
4401       VectorStart =
4402         Builder.CreateInsertElement(Identity, ReductionStartValue, Zero);
4403     }
4404   }
4405 
4406   // Fix the vector-loop phi.
4407 
4408   // Reductions do not have to start at zero. They can start with
4409   // any loop invariant values.
4410   BasicBlock *Latch = OrigLoop->getLoopLatch();
4411   Value *LoopVal = Phi->getIncomingValueForBlock(Latch);
4412   for (unsigned Part = 0; Part < UF; ++Part) {
4413     Value *VecRdxPhi = getOrCreateVectorValue(Phi, Part);
4414     Value *Val = getOrCreateVectorValue(LoopVal, Part);
4415     // Make sure to add the reduction stat value only to the
4416     // first unroll part.
4417     Value *StartVal = (Part == 0) ? VectorStart : Identity;
4418     cast<PHINode>(VecRdxPhi)->addIncoming(StartVal, LoopVectorPreHeader);
4419     cast<PHINode>(VecRdxPhi)
4420       ->addIncoming(Val, LI->getLoopFor(LoopVectorBody)->getLoopLatch());
4421   }
4422 
4423   // Before each round, move the insertion point right between
4424   // the PHIs and the values we are going to write.
4425   // This allows us to write both PHINodes and the extractelement
4426   // instructions.
4427   Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt());
4428 
4429   setDebugLocFromInst(Builder, LoopExitInst);
4430 
4431   // If the vector reduction can be performed in a smaller type, we truncate
4432   // then extend the loop exit value to enable InstCombine to evaluate the
4433   // entire expression in the smaller type.
4434   if (VF > 1 && Phi->getType() != RdxDesc.getRecurrenceType()) {
4435     Type *RdxVecTy = VectorType::get(RdxDesc.getRecurrenceType(), VF);
4436     Builder.SetInsertPoint(
4437         LI->getLoopFor(LoopVectorBody)->getLoopLatch()->getTerminator());
4438     VectorParts RdxParts(UF);
4439     for (unsigned Part = 0; Part < UF; ++Part) {
4440       RdxParts[Part] = VectorLoopValueMap.getVectorValue(LoopExitInst, Part);
4441       Value *Trunc = Builder.CreateTrunc(RdxParts[Part], RdxVecTy);
4442       Value *Extnd = RdxDesc.isSigned() ? Builder.CreateSExt(Trunc, VecTy)
4443                                         : Builder.CreateZExt(Trunc, VecTy);
4444       for (Value::user_iterator UI = RdxParts[Part]->user_begin();
4445            UI != RdxParts[Part]->user_end();)
4446         if (*UI != Trunc) {
4447           (*UI++)->replaceUsesOfWith(RdxParts[Part], Extnd);
4448           RdxParts[Part] = Extnd;
4449         } else {
4450           ++UI;
4451         }
4452     }
4453     Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt());
4454     for (unsigned Part = 0; Part < UF; ++Part) {
4455       RdxParts[Part] = Builder.CreateTrunc(RdxParts[Part], RdxVecTy);
4456       VectorLoopValueMap.resetVectorValue(LoopExitInst, Part, RdxParts[Part]);
4457     }
4458   }
4459 
4460   // Reduce all of the unrolled parts into a single vector.
4461   Value *ReducedPartRdx = VectorLoopValueMap.getVectorValue(LoopExitInst, 0);
4462   unsigned Op = RecurrenceDescriptor::getRecurrenceBinOp(RK);
4463   setDebugLocFromInst(Builder, ReducedPartRdx);
4464   for (unsigned Part = 1; Part < UF; ++Part) {
4465     Value *RdxPart = VectorLoopValueMap.getVectorValue(LoopExitInst, Part);
4466     if (Op != Instruction::ICmp && Op != Instruction::FCmp)
4467       // Floating point operations had to be 'fast' to enable the reduction.
4468       ReducedPartRdx = addFastMathFlag(
4469           Builder.CreateBinOp((Instruction::BinaryOps)Op, RdxPart,
4470                               ReducedPartRdx, "bin.rdx"));
4471     else
4472       ReducedPartRdx = RecurrenceDescriptor::createMinMaxOp(
4473           Builder, MinMaxKind, ReducedPartRdx, RdxPart);
4474   }
4475 
4476   if (VF > 1) {
4477     bool NoNaN = Legal->hasFunNoNaNAttr();
4478     ReducedPartRdx =
4479         createTargetReduction(Builder, TTI, RdxDesc, ReducedPartRdx, NoNaN);
4480     // If the reduction can be performed in a smaller type, we need to extend
4481     // the reduction to the wider type before we branch to the original loop.
4482     if (Phi->getType() != RdxDesc.getRecurrenceType())
4483       ReducedPartRdx =
4484         RdxDesc.isSigned()
4485         ? Builder.CreateSExt(ReducedPartRdx, Phi->getType())
4486         : Builder.CreateZExt(ReducedPartRdx, Phi->getType());
4487   }
4488 
4489   // Create a phi node that merges control-flow from the backedge-taken check
4490   // block and the middle block.
4491   PHINode *BCBlockPhi = PHINode::Create(Phi->getType(), 2, "bc.merge.rdx",
4492                                         LoopScalarPreHeader->getTerminator());
4493   for (unsigned I = 0, E = LoopBypassBlocks.size(); I != E; ++I)
4494     BCBlockPhi->addIncoming(ReductionStartValue, LoopBypassBlocks[I]);
4495   BCBlockPhi->addIncoming(ReducedPartRdx, LoopMiddleBlock);
4496 
4497   // Now, we need to fix the users of the reduction variable
4498   // inside and outside of the scalar remainder loop.
4499   // We know that the loop is in LCSSA form. We need to update the
4500   // PHI nodes in the exit blocks.
4501   for (PHINode &LCSSAPhi : LoopExitBlock->phis()) {
4502     // All PHINodes need to have a single entry edge, or two if
4503     // we already fixed them.
4504     assert(LCSSAPhi.getNumIncomingValues() < 3 && "Invalid LCSSA PHI");
4505 
4506     // We found a reduction value exit-PHI. Update it with the
4507     // incoming bypass edge.
4508     if (LCSSAPhi.getIncomingValue(0) == LoopExitInst)
4509       LCSSAPhi.addIncoming(ReducedPartRdx, LoopMiddleBlock);
4510   } // end of the LCSSA phi scan.
4511 
4512     // Fix the scalar loop reduction variable with the incoming reduction sum
4513     // from the vector body and from the backedge value.
4514   int IncomingEdgeBlockIdx =
4515     Phi->getBasicBlockIndex(OrigLoop->getLoopLatch());
4516   assert(IncomingEdgeBlockIdx >= 0 && "Invalid block index");
4517   // Pick the other block.
4518   int SelfEdgeBlockIdx = (IncomingEdgeBlockIdx ? 0 : 1);
4519   Phi->setIncomingValue(SelfEdgeBlockIdx, BCBlockPhi);
4520   Phi->setIncomingValue(IncomingEdgeBlockIdx, LoopExitInst);
4521 }
4522 
4523 void InnerLoopVectorizer::fixLCSSAPHIs() {
4524   for (PHINode &LCSSAPhi : LoopExitBlock->phis()) {
4525     if (LCSSAPhi.getNumIncomingValues() == 1) {
4526       assert(OrigLoop->isLoopInvariant(LCSSAPhi.getIncomingValue(0)) &&
4527              "Incoming value isn't loop invariant");
4528       LCSSAPhi.addIncoming(LCSSAPhi.getIncomingValue(0), LoopMiddleBlock);
4529     }
4530   }
4531 }
4532 
4533 void InnerLoopVectorizer::sinkScalarOperands(Instruction *PredInst) {
4534   // The basic block and loop containing the predicated instruction.
4535   auto *PredBB = PredInst->getParent();
4536   auto *VectorLoop = LI->getLoopFor(PredBB);
4537 
4538   // Initialize a worklist with the operands of the predicated instruction.
4539   SetVector<Value *> Worklist(PredInst->op_begin(), PredInst->op_end());
4540 
4541   // Holds instructions that we need to analyze again. An instruction may be
4542   // reanalyzed if we don't yet know if we can sink it or not.
4543   SmallVector<Instruction *, 8> InstsToReanalyze;
4544 
4545   // Returns true if a given use occurs in the predicated block. Phi nodes use
4546   // their operands in their corresponding predecessor blocks.
4547   auto isBlockOfUsePredicated = [&](Use &U) -> bool {
4548     auto *I = cast<Instruction>(U.getUser());
4549     BasicBlock *BB = I->getParent();
4550     if (auto *Phi = dyn_cast<PHINode>(I))
4551       BB = Phi->getIncomingBlock(
4552           PHINode::getIncomingValueNumForOperand(U.getOperandNo()));
4553     return BB == PredBB;
4554   };
4555 
4556   // Iteratively sink the scalarized operands of the predicated instruction
4557   // into the block we created for it. When an instruction is sunk, it's
4558   // operands are then added to the worklist. The algorithm ends after one pass
4559   // through the worklist doesn't sink a single instruction.
4560   bool Changed;
4561   do {
4562     // Add the instructions that need to be reanalyzed to the worklist, and
4563     // reset the changed indicator.
4564     Worklist.insert(InstsToReanalyze.begin(), InstsToReanalyze.end());
4565     InstsToReanalyze.clear();
4566     Changed = false;
4567 
4568     while (!Worklist.empty()) {
4569       auto *I = dyn_cast<Instruction>(Worklist.pop_back_val());
4570 
4571       // We can't sink an instruction if it is a phi node, is already in the
4572       // predicated block, is not in the loop, or may have side effects.
4573       if (!I || isa<PHINode>(I) || I->getParent() == PredBB ||
4574           !VectorLoop->contains(I) || I->mayHaveSideEffects())
4575         continue;
4576 
4577       // It's legal to sink the instruction if all its uses occur in the
4578       // predicated block. Otherwise, there's nothing to do yet, and we may
4579       // need to reanalyze the instruction.
4580       if (!llvm::all_of(I->uses(), isBlockOfUsePredicated)) {
4581         InstsToReanalyze.push_back(I);
4582         continue;
4583       }
4584 
4585       // Move the instruction to the beginning of the predicated block, and add
4586       // it's operands to the worklist.
4587       I->moveBefore(&*PredBB->getFirstInsertionPt());
4588       Worklist.insert(I->op_begin(), I->op_end());
4589 
4590       // The sinking may have enabled other instructions to be sunk, so we will
4591       // need to iterate.
4592       Changed = true;
4593     }
4594   } while (Changed);
4595 }
4596 
4597 void InnerLoopVectorizer::widenPHIInstruction(Instruction *PN, unsigned UF,
4598                                               unsigned VF) {
4599   assert(PN->getParent() == OrigLoop->getHeader() &&
4600          "Non-header phis should have been handled elsewhere");
4601 
4602   PHINode *P = cast<PHINode>(PN);
4603   // In order to support recurrences we need to be able to vectorize Phi nodes.
4604   // Phi nodes have cycles, so we need to vectorize them in two stages. This is
4605   // stage #1: We create a new vector PHI node with no incoming edges. We'll use
4606   // this value when we vectorize all of the instructions that use the PHI.
4607   if (Legal->isReductionVariable(P) || Legal->isFirstOrderRecurrence(P)) {
4608     for (unsigned Part = 0; Part < UF; ++Part) {
4609       // This is phase one of vectorizing PHIs.
4610       Type *VecTy =
4611           (VF == 1) ? PN->getType() : VectorType::get(PN->getType(), VF);
4612       Value *EntryPart = PHINode::Create(
4613           VecTy, 2, "vec.phi", &*LoopVectorBody->getFirstInsertionPt());
4614       VectorLoopValueMap.setVectorValue(P, Part, EntryPart);
4615     }
4616     return;
4617   }
4618 
4619   setDebugLocFromInst(Builder, P);
4620 
4621   // This PHINode must be an induction variable.
4622   // Make sure that we know about it.
4623   assert(Legal->getInductionVars()->count(P) && "Not an induction variable");
4624 
4625   InductionDescriptor II = Legal->getInductionVars()->lookup(P);
4626   const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout();
4627 
4628   // FIXME: The newly created binary instructions should contain nsw/nuw flags,
4629   // which can be found from the original scalar operations.
4630   switch (II.getKind()) {
4631   case InductionDescriptor::IK_NoInduction:
4632     llvm_unreachable("Unknown induction");
4633   case InductionDescriptor::IK_IntInduction:
4634   case InductionDescriptor::IK_FpInduction:
4635     llvm_unreachable("Integer/fp induction is handled elsewhere.");
4636   case InductionDescriptor::IK_PtrInduction: {
4637     // Handle the pointer induction variable case.
4638     assert(P->getType()->isPointerTy() && "Unexpected type.");
4639     // This is the normalized GEP that starts counting at zero.
4640     Value *PtrInd = Induction;
4641     PtrInd = Builder.CreateSExtOrTrunc(PtrInd, II.getStep()->getType());
4642     // Determine the number of scalars we need to generate for each unroll
4643     // iteration. If the instruction is uniform, we only need to generate the
4644     // first lane. Otherwise, we generate all VF values.
4645     unsigned Lanes = Cost->isUniformAfterVectorization(P, VF) ? 1 : VF;
4646     // These are the scalar results. Notice that we don't generate vector GEPs
4647     // because scalar GEPs result in better code.
4648     for (unsigned Part = 0; Part < UF; ++Part) {
4649       for (unsigned Lane = 0; Lane < Lanes; ++Lane) {
4650         Constant *Idx = ConstantInt::get(PtrInd->getType(), Lane + Part * VF);
4651         Value *GlobalIdx = Builder.CreateAdd(PtrInd, Idx);
4652         Value *SclrGep = II.transform(Builder, GlobalIdx, PSE.getSE(), DL);
4653         SclrGep->setName("next.gep");
4654         VectorLoopValueMap.setScalarValue(P, {Part, Lane}, SclrGep);
4655       }
4656     }
4657     return;
4658   }
4659   }
4660 }
4661 
4662 /// A helper function for checking whether an integer division-related
4663 /// instruction may divide by zero (in which case it must be predicated if
4664 /// executed conditionally in the scalar code).
4665 /// TODO: It may be worthwhile to generalize and check isKnownNonZero().
4666 /// Non-zero divisors that are non compile-time constants will not be
4667 /// converted into multiplication, so we will still end up scalarizing
4668 /// the division, but can do so w/o predication.
4669 static bool mayDivideByZero(Instruction &I) {
4670   assert((I.getOpcode() == Instruction::UDiv ||
4671           I.getOpcode() == Instruction::SDiv ||
4672           I.getOpcode() == Instruction::URem ||
4673           I.getOpcode() == Instruction::SRem) &&
4674          "Unexpected instruction");
4675   Value *Divisor = I.getOperand(1);
4676   auto *CInt = dyn_cast<ConstantInt>(Divisor);
4677   return !CInt || CInt->isZero();
4678 }
4679 
4680 void InnerLoopVectorizer::widenInstruction(Instruction &I) {
4681   switch (I.getOpcode()) {
4682   case Instruction::Br:
4683   case Instruction::PHI:
4684     llvm_unreachable("This instruction is handled by a different recipe.");
4685   case Instruction::GetElementPtr: {
4686     // Construct a vector GEP by widening the operands of the scalar GEP as
4687     // necessary. We mark the vector GEP 'inbounds' if appropriate. A GEP
4688     // results in a vector of pointers when at least one operand of the GEP
4689     // is vector-typed. Thus, to keep the representation compact, we only use
4690     // vector-typed operands for loop-varying values.
4691     auto *GEP = cast<GetElementPtrInst>(&I);
4692 
4693     if (VF > 1 && OrigLoop->hasLoopInvariantOperands(GEP)) {
4694       // If we are vectorizing, but the GEP has only loop-invariant operands,
4695       // the GEP we build (by only using vector-typed operands for
4696       // loop-varying values) would be a scalar pointer. Thus, to ensure we
4697       // produce a vector of pointers, we need to either arbitrarily pick an
4698       // operand to broadcast, or broadcast a clone of the original GEP.
4699       // Here, we broadcast a clone of the original.
4700       //
4701       // TODO: If at some point we decide to scalarize instructions having
4702       //       loop-invariant operands, this special case will no longer be
4703       //       required. We would add the scalarization decision to
4704       //       collectLoopScalars() and teach getVectorValue() to broadcast
4705       //       the lane-zero scalar value.
4706       auto *Clone = Builder.Insert(GEP->clone());
4707       for (unsigned Part = 0; Part < UF; ++Part) {
4708         Value *EntryPart = Builder.CreateVectorSplat(VF, Clone);
4709         VectorLoopValueMap.setVectorValue(&I, Part, EntryPart);
4710         addMetadata(EntryPart, GEP);
4711       }
4712     } else {
4713       // If the GEP has at least one loop-varying operand, we are sure to
4714       // produce a vector of pointers. But if we are only unrolling, we want
4715       // to produce a scalar GEP for each unroll part. Thus, the GEP we
4716       // produce with the code below will be scalar (if VF == 1) or vector
4717       // (otherwise). Note that for the unroll-only case, we still maintain
4718       // values in the vector mapping with initVector, as we do for other
4719       // instructions.
4720       for (unsigned Part = 0; Part < UF; ++Part) {
4721         // The pointer operand of the new GEP. If it's loop-invariant, we
4722         // won't broadcast it.
4723         auto *Ptr =
4724             OrigLoop->isLoopInvariant(GEP->getPointerOperand())
4725                 ? GEP->getPointerOperand()
4726                 : getOrCreateVectorValue(GEP->getPointerOperand(), Part);
4727 
4728         // Collect all the indices for the new GEP. If any index is
4729         // loop-invariant, we won't broadcast it.
4730         SmallVector<Value *, 4> Indices;
4731         for (auto &U : make_range(GEP->idx_begin(), GEP->idx_end())) {
4732           if (OrigLoop->isLoopInvariant(U.get()))
4733             Indices.push_back(U.get());
4734           else
4735             Indices.push_back(getOrCreateVectorValue(U.get(), Part));
4736         }
4737 
4738         // Create the new GEP. Note that this GEP may be a scalar if VF == 1,
4739         // but it should be a vector, otherwise.
4740         auto *NewGEP = GEP->isInBounds()
4741                            ? Builder.CreateInBoundsGEP(Ptr, Indices)
4742                            : Builder.CreateGEP(Ptr, Indices);
4743         assert((VF == 1 || NewGEP->getType()->isVectorTy()) &&
4744                "NewGEP is not a pointer vector");
4745         VectorLoopValueMap.setVectorValue(&I, Part, NewGEP);
4746         addMetadata(NewGEP, GEP);
4747       }
4748     }
4749 
4750     break;
4751   }
4752   case Instruction::UDiv:
4753   case Instruction::SDiv:
4754   case Instruction::SRem:
4755   case Instruction::URem:
4756   case Instruction::Add:
4757   case Instruction::FAdd:
4758   case Instruction::Sub:
4759   case Instruction::FSub:
4760   case Instruction::Mul:
4761   case Instruction::FMul:
4762   case Instruction::FDiv:
4763   case Instruction::FRem:
4764   case Instruction::Shl:
4765   case Instruction::LShr:
4766   case Instruction::AShr:
4767   case Instruction::And:
4768   case Instruction::Or:
4769   case Instruction::Xor: {
4770     // Just widen binops.
4771     auto *BinOp = cast<BinaryOperator>(&I);
4772     setDebugLocFromInst(Builder, BinOp);
4773 
4774     for (unsigned Part = 0; Part < UF; ++Part) {
4775       Value *A = getOrCreateVectorValue(BinOp->getOperand(0), Part);
4776       Value *B = getOrCreateVectorValue(BinOp->getOperand(1), Part);
4777       Value *V = Builder.CreateBinOp(BinOp->getOpcode(), A, B);
4778 
4779       if (BinaryOperator *VecOp = dyn_cast<BinaryOperator>(V))
4780         VecOp->copyIRFlags(BinOp);
4781 
4782       // Use this vector value for all users of the original instruction.
4783       VectorLoopValueMap.setVectorValue(&I, Part, V);
4784       addMetadata(V, BinOp);
4785     }
4786 
4787     break;
4788   }
4789   case Instruction::Select: {
4790     // Widen selects.
4791     // If the selector is loop invariant we can create a select
4792     // instruction with a scalar condition. Otherwise, use vector-select.
4793     auto *SE = PSE.getSE();
4794     bool InvariantCond =
4795         SE->isLoopInvariant(PSE.getSCEV(I.getOperand(0)), OrigLoop);
4796     setDebugLocFromInst(Builder, &I);
4797 
4798     // The condition can be loop invariant  but still defined inside the
4799     // loop. This means that we can't just use the original 'cond' value.
4800     // We have to take the 'vectorized' value and pick the first lane.
4801     // Instcombine will make this a no-op.
4802 
4803     auto *ScalarCond = getOrCreateScalarValue(I.getOperand(0), {0, 0});
4804 
4805     for (unsigned Part = 0; Part < UF; ++Part) {
4806       Value *Cond = getOrCreateVectorValue(I.getOperand(0), Part);
4807       Value *Op0 = getOrCreateVectorValue(I.getOperand(1), Part);
4808       Value *Op1 = getOrCreateVectorValue(I.getOperand(2), Part);
4809       Value *Sel =
4810           Builder.CreateSelect(InvariantCond ? ScalarCond : Cond, Op0, Op1);
4811       VectorLoopValueMap.setVectorValue(&I, Part, Sel);
4812       addMetadata(Sel, &I);
4813     }
4814 
4815     break;
4816   }
4817 
4818   case Instruction::ICmp:
4819   case Instruction::FCmp: {
4820     // Widen compares. Generate vector compares.
4821     bool FCmp = (I.getOpcode() == Instruction::FCmp);
4822     auto *Cmp = dyn_cast<CmpInst>(&I);
4823     setDebugLocFromInst(Builder, Cmp);
4824     for (unsigned Part = 0; Part < UF; ++Part) {
4825       Value *A = getOrCreateVectorValue(Cmp->getOperand(0), Part);
4826       Value *B = getOrCreateVectorValue(Cmp->getOperand(1), Part);
4827       Value *C = nullptr;
4828       if (FCmp) {
4829         // Propagate fast math flags.
4830         IRBuilder<>::FastMathFlagGuard FMFG(Builder);
4831         Builder.setFastMathFlags(Cmp->getFastMathFlags());
4832         C = Builder.CreateFCmp(Cmp->getPredicate(), A, B);
4833       } else {
4834         C = Builder.CreateICmp(Cmp->getPredicate(), A, B);
4835       }
4836       VectorLoopValueMap.setVectorValue(&I, Part, C);
4837       addMetadata(C, &I);
4838     }
4839 
4840     break;
4841   }
4842 
4843   case Instruction::ZExt:
4844   case Instruction::SExt:
4845   case Instruction::FPToUI:
4846   case Instruction::FPToSI:
4847   case Instruction::FPExt:
4848   case Instruction::PtrToInt:
4849   case Instruction::IntToPtr:
4850   case Instruction::SIToFP:
4851   case Instruction::UIToFP:
4852   case Instruction::Trunc:
4853   case Instruction::FPTrunc:
4854   case Instruction::BitCast: {
4855     auto *CI = dyn_cast<CastInst>(&I);
4856     setDebugLocFromInst(Builder, CI);
4857 
4858     /// Vectorize casts.
4859     Type *DestTy =
4860         (VF == 1) ? CI->getType() : VectorType::get(CI->getType(), VF);
4861 
4862     for (unsigned Part = 0; Part < UF; ++Part) {
4863       Value *A = getOrCreateVectorValue(CI->getOperand(0), Part);
4864       Value *Cast = Builder.CreateCast(CI->getOpcode(), A, DestTy);
4865       VectorLoopValueMap.setVectorValue(&I, Part, Cast);
4866       addMetadata(Cast, &I);
4867     }
4868     break;
4869   }
4870 
4871   case Instruction::Call: {
4872     // Ignore dbg intrinsics.
4873     if (isa<DbgInfoIntrinsic>(I))
4874       break;
4875     setDebugLocFromInst(Builder, &I);
4876 
4877     Module *M = I.getParent()->getParent()->getParent();
4878     auto *CI = cast<CallInst>(&I);
4879 
4880     StringRef FnName = CI->getCalledFunction()->getName();
4881     Function *F = CI->getCalledFunction();
4882     Type *RetTy = ToVectorTy(CI->getType(), VF);
4883     SmallVector<Type *, 4> Tys;
4884     for (Value *ArgOperand : CI->arg_operands())
4885       Tys.push_back(ToVectorTy(ArgOperand->getType(), VF));
4886 
4887     Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
4888 
4889     // The flag shows whether we use Intrinsic or a usual Call for vectorized
4890     // version of the instruction.
4891     // Is it beneficial to perform intrinsic call compared to lib call?
4892     bool NeedToScalarize;
4893     unsigned CallCost = getVectorCallCost(CI, VF, *TTI, TLI, NeedToScalarize);
4894     bool UseVectorIntrinsic =
4895         ID && getVectorIntrinsicCost(CI, VF, *TTI, TLI) <= CallCost;
4896     assert((UseVectorIntrinsic || !NeedToScalarize) &&
4897            "Instruction should be scalarized elsewhere.");
4898 
4899     for (unsigned Part = 0; Part < UF; ++Part) {
4900       SmallVector<Value *, 4> Args;
4901       for (unsigned i = 0, ie = CI->getNumArgOperands(); i != ie; ++i) {
4902         Value *Arg = CI->getArgOperand(i);
4903         // Some intrinsics have a scalar argument - don't replace it with a
4904         // vector.
4905         if (!UseVectorIntrinsic || !hasVectorInstrinsicScalarOpd(ID, i))
4906           Arg = getOrCreateVectorValue(CI->getArgOperand(i), Part);
4907         Args.push_back(Arg);
4908       }
4909 
4910       Function *VectorF;
4911       if (UseVectorIntrinsic) {
4912         // Use vector version of the intrinsic.
4913         Type *TysForDecl[] = {CI->getType()};
4914         if (VF > 1)
4915           TysForDecl[0] = VectorType::get(CI->getType()->getScalarType(), VF);
4916         VectorF = Intrinsic::getDeclaration(M, ID, TysForDecl);
4917       } else {
4918         // Use vector version of the library call.
4919         StringRef VFnName = TLI->getVectorizedFunction(FnName, VF);
4920         assert(!VFnName.empty() && "Vector function name is empty.");
4921         VectorF = M->getFunction(VFnName);
4922         if (!VectorF) {
4923           // Generate a declaration
4924           FunctionType *FTy = FunctionType::get(RetTy, Tys, false);
4925           VectorF =
4926               Function::Create(FTy, Function::ExternalLinkage, VFnName, M);
4927           VectorF->copyAttributesFrom(F);
4928         }
4929       }
4930       assert(VectorF && "Can't create vector function.");
4931 
4932       SmallVector<OperandBundleDef, 1> OpBundles;
4933       CI->getOperandBundlesAsDefs(OpBundles);
4934       CallInst *V = Builder.CreateCall(VectorF, Args, OpBundles);
4935 
4936       if (isa<FPMathOperator>(V))
4937         V->copyFastMathFlags(CI);
4938 
4939       VectorLoopValueMap.setVectorValue(&I, Part, V);
4940       addMetadata(V, &I);
4941     }
4942 
4943     break;
4944   }
4945 
4946   default:
4947     // This instruction is not vectorized by simple widening.
4948     DEBUG(dbgs() << "LV: Found an unhandled instruction: " << I);
4949     llvm_unreachable("Unhandled instruction!");
4950   } // end of switch.
4951 }
4952 
4953 void InnerLoopVectorizer::updateAnalysis() {
4954   // Forget the original basic block.
4955   PSE.getSE()->forgetLoop(OrigLoop);
4956 
4957   // Update the dominator tree information.
4958   assert(DT->properlyDominates(LoopBypassBlocks.front(), LoopExitBlock) &&
4959          "Entry does not dominate exit.");
4960 
4961   DT->addNewBlock(LoopMiddleBlock,
4962                   LI->getLoopFor(LoopVectorBody)->getLoopLatch());
4963   DT->addNewBlock(LoopScalarPreHeader, LoopBypassBlocks[0]);
4964   DT->changeImmediateDominator(LoopScalarBody, LoopScalarPreHeader);
4965   DT->changeImmediateDominator(LoopExitBlock, LoopBypassBlocks[0]);
4966   DEBUG(DT->verifyDomTree());
4967 }
4968 
4969 /// \brief Check whether it is safe to if-convert this phi node.
4970 ///
4971 /// Phi nodes with constant expressions that can trap are not safe to if
4972 /// convert.
4973 static bool canIfConvertPHINodes(BasicBlock *BB) {
4974   for (PHINode &Phi : BB->phis()) {
4975     for (Value *V : Phi.incoming_values())
4976       if (auto *C = dyn_cast<Constant>(V))
4977         if (C->canTrap())
4978           return false;
4979   }
4980   return true;
4981 }
4982 
4983 bool LoopVectorizationLegality::canVectorizeWithIfConvert() {
4984   if (!EnableIfConversion) {
4985     ORE->emit(createMissedAnalysis("IfConversionDisabled")
4986               << "if-conversion is disabled");
4987     return false;
4988   }
4989 
4990   assert(TheLoop->getNumBlocks() > 1 && "Single block loops are vectorizable");
4991 
4992   // A list of pointers that we can safely read and write to.
4993   SmallPtrSet<Value *, 8> SafePointes;
4994 
4995   // Collect safe addresses.
4996   for (BasicBlock *BB : TheLoop->blocks()) {
4997     if (blockNeedsPredication(BB))
4998       continue;
4999 
5000     for (Instruction &I : *BB)
5001       if (auto *Ptr = getPointerOperand(&I))
5002         SafePointes.insert(Ptr);
5003   }
5004 
5005   // Collect the blocks that need predication.
5006   BasicBlock *Header = TheLoop->getHeader();
5007   for (BasicBlock *BB : TheLoop->blocks()) {
5008     // We don't support switch statements inside loops.
5009     if (!isa<BranchInst>(BB->getTerminator())) {
5010       ORE->emit(createMissedAnalysis("LoopContainsSwitch", BB->getTerminator())
5011                 << "loop contains a switch statement");
5012       return false;
5013     }
5014 
5015     // We must be able to predicate all blocks that need to be predicated.
5016     if (blockNeedsPredication(BB)) {
5017       if (!blockCanBePredicated(BB, SafePointes)) {
5018         ORE->emit(createMissedAnalysis("NoCFGForSelect", BB->getTerminator())
5019                   << "control flow cannot be substituted for a select");
5020         return false;
5021       }
5022     } else if (BB != Header && !canIfConvertPHINodes(BB)) {
5023       ORE->emit(createMissedAnalysis("NoCFGForSelect", BB->getTerminator())
5024                 << "control flow cannot be substituted for a select");
5025       return false;
5026     }
5027   }
5028 
5029   // We can if-convert this loop.
5030   return true;
5031 }
5032 
5033 bool LoopVectorizationLegality::canVectorize() {
5034   // Store the result and return it at the end instead of exiting early, in case
5035   // allowExtraAnalysis is used to report multiple reasons for not vectorizing.
5036   bool Result = true;
5037 
5038   bool DoExtraAnalysis = ORE->allowExtraAnalysis(DEBUG_TYPE);
5039   // We must have a loop in canonical form. Loops with indirectbr in them cannot
5040   // be canonicalized.
5041   if (!TheLoop->getLoopPreheader()) {
5042     DEBUG(dbgs() << "LV: Loop doesn't have a legal pre-header.\n");
5043     ORE->emit(createMissedAnalysis("CFGNotUnderstood")
5044               << "loop control flow is not understood by vectorizer");
5045     if (DoExtraAnalysis)
5046       Result = false;
5047     else
5048       return false;
5049   }
5050 
5051   // FIXME: The code is currently dead, since the loop gets sent to
5052   // LoopVectorizationLegality is already an innermost loop.
5053   //
5054   // We can only vectorize innermost loops.
5055   if (!TheLoop->empty()) {
5056     ORE->emit(createMissedAnalysis("NotInnermostLoop")
5057               << "loop is not the innermost loop");
5058     if (DoExtraAnalysis)
5059       Result = false;
5060     else
5061       return false;
5062   }
5063 
5064   // We must have a single backedge.
5065   if (TheLoop->getNumBackEdges() != 1) {
5066     ORE->emit(createMissedAnalysis("CFGNotUnderstood")
5067               << "loop control flow is not understood by vectorizer");
5068     if (DoExtraAnalysis)
5069       Result = false;
5070     else
5071       return false;
5072   }
5073 
5074   // We must have a single exiting block.
5075   if (!TheLoop->getExitingBlock()) {
5076     ORE->emit(createMissedAnalysis("CFGNotUnderstood")
5077               << "loop control flow is not understood by vectorizer");
5078     if (DoExtraAnalysis)
5079       Result = false;
5080     else
5081       return false;
5082   }
5083 
5084   // We only handle bottom-tested loops, i.e. loop in which the condition is
5085   // checked at the end of each iteration. With that we can assume that all
5086   // instructions in the loop are executed the same number of times.
5087   if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) {
5088     ORE->emit(createMissedAnalysis("CFGNotUnderstood")
5089               << "loop control flow is not understood by vectorizer");
5090     if (DoExtraAnalysis)
5091       Result = false;
5092     else
5093       return false;
5094   }
5095 
5096   // We need to have a loop header.
5097   DEBUG(dbgs() << "LV: Found a loop: " << TheLoop->getHeader()->getName()
5098                << '\n');
5099 
5100   // Check if we can if-convert non-single-bb loops.
5101   unsigned NumBlocks = TheLoop->getNumBlocks();
5102   if (NumBlocks != 1 && !canVectorizeWithIfConvert()) {
5103     DEBUG(dbgs() << "LV: Can't if-convert the loop.\n");
5104     if (DoExtraAnalysis)
5105       Result = false;
5106     else
5107       return false;
5108   }
5109 
5110   // Check if we can vectorize the instructions and CFG in this loop.
5111   if (!canVectorizeInstrs()) {
5112     DEBUG(dbgs() << "LV: Can't vectorize the instructions or CFG\n");
5113     if (DoExtraAnalysis)
5114       Result = false;
5115     else
5116       return false;
5117   }
5118 
5119   // Go over each instruction and look at memory deps.
5120   if (!canVectorizeMemory()) {
5121     DEBUG(dbgs() << "LV: Can't vectorize due to memory conflicts\n");
5122     if (DoExtraAnalysis)
5123       Result = false;
5124     else
5125       return false;
5126   }
5127 
5128   DEBUG(dbgs() << "LV: We can vectorize this loop"
5129                << (LAI->getRuntimePointerChecking()->Need
5130                        ? " (with a runtime bound check)"
5131                        : "")
5132                << "!\n");
5133 
5134   bool UseInterleaved = TTI->enableInterleavedAccessVectorization();
5135 
5136   // If an override option has been passed in for interleaved accesses, use it.
5137   if (EnableInterleavedMemAccesses.getNumOccurrences() > 0)
5138     UseInterleaved = EnableInterleavedMemAccesses;
5139 
5140   // Analyze interleaved memory accesses.
5141   if (UseInterleaved)
5142     InterleaveInfo.analyzeInterleaving(*getSymbolicStrides());
5143 
5144   unsigned SCEVThreshold = VectorizeSCEVCheckThreshold;
5145   if (Hints->getForce() == LoopVectorizeHints::FK_Enabled)
5146     SCEVThreshold = PragmaVectorizeSCEVCheckThreshold;
5147 
5148   if (PSE.getUnionPredicate().getComplexity() > SCEVThreshold) {
5149     ORE->emit(createMissedAnalysis("TooManySCEVRunTimeChecks")
5150               << "Too many SCEV assumptions need to be made and checked "
5151               << "at runtime");
5152     DEBUG(dbgs() << "LV: Too many SCEV checks needed.\n");
5153     if (DoExtraAnalysis)
5154       Result = false;
5155     else
5156       return false;
5157   }
5158 
5159   // Okay! We've done all the tests. If any have failed, return false. Otherwise
5160   // we can vectorize, and at this point we don't have any other mem analysis
5161   // which may limit our maximum vectorization factor, so just return true with
5162   // no restrictions.
5163   return Result;
5164 }
5165 
5166 static Type *convertPointerToIntegerType(const DataLayout &DL, Type *Ty) {
5167   if (Ty->isPointerTy())
5168     return DL.getIntPtrType(Ty);
5169 
5170   // It is possible that char's or short's overflow when we ask for the loop's
5171   // trip count, work around this by changing the type size.
5172   if (Ty->getScalarSizeInBits() < 32)
5173     return Type::getInt32Ty(Ty->getContext());
5174 
5175   return Ty;
5176 }
5177 
5178 static Type *getWiderType(const DataLayout &DL, Type *Ty0, Type *Ty1) {
5179   Ty0 = convertPointerToIntegerType(DL, Ty0);
5180   Ty1 = convertPointerToIntegerType(DL, Ty1);
5181   if (Ty0->getScalarSizeInBits() > Ty1->getScalarSizeInBits())
5182     return Ty0;
5183   return Ty1;
5184 }
5185 
5186 /// \brief Check that the instruction has outside loop users and is not an
5187 /// identified reduction variable.
5188 static bool hasOutsideLoopUser(const Loop *TheLoop, Instruction *Inst,
5189                                SmallPtrSetImpl<Value *> &AllowedExit) {
5190   // Reduction and Induction instructions are allowed to have exit users. All
5191   // other instructions must not have external users.
5192   if (!AllowedExit.count(Inst))
5193     // Check that all of the users of the loop are inside the BB.
5194     for (User *U : Inst->users()) {
5195       Instruction *UI = cast<Instruction>(U);
5196       // This user may be a reduction exit value.
5197       if (!TheLoop->contains(UI)) {
5198         DEBUG(dbgs() << "LV: Found an outside user for : " << *UI << '\n');
5199         return true;
5200       }
5201     }
5202   return false;
5203 }
5204 
5205 void LoopVectorizationLegality::addInductionPhi(
5206     PHINode *Phi, const InductionDescriptor &ID,
5207     SmallPtrSetImpl<Value *> &AllowedExit) {
5208   Inductions[Phi] = ID;
5209 
5210   // In case this induction also comes with casts that we know we can ignore
5211   // in the vectorized loop body, record them here. All casts could be recorded
5212   // here for ignoring, but suffices to record only the first (as it is the
5213   // only one that may bw used outside the cast sequence).
5214   const SmallVectorImpl<Instruction *> &Casts = ID.getCastInsts();
5215   if (!Casts.empty())
5216     InductionCastsToIgnore.insert(*Casts.begin());
5217 
5218   Type *PhiTy = Phi->getType();
5219   const DataLayout &DL = Phi->getModule()->getDataLayout();
5220 
5221   // Get the widest type.
5222   if (!PhiTy->isFloatingPointTy()) {
5223     if (!WidestIndTy)
5224       WidestIndTy = convertPointerToIntegerType(DL, PhiTy);
5225     else
5226       WidestIndTy = getWiderType(DL, PhiTy, WidestIndTy);
5227   }
5228 
5229   // Int inductions are special because we only allow one IV.
5230   if (ID.getKind() == InductionDescriptor::IK_IntInduction &&
5231       ID.getConstIntStepValue() &&
5232       ID.getConstIntStepValue()->isOne() &&
5233       isa<Constant>(ID.getStartValue()) &&
5234       cast<Constant>(ID.getStartValue())->isNullValue()) {
5235 
5236     // Use the phi node with the widest type as induction. Use the last
5237     // one if there are multiple (no good reason for doing this other
5238     // than it is expedient). We've checked that it begins at zero and
5239     // steps by one, so this is a canonical induction variable.
5240     if (!PrimaryInduction || PhiTy == WidestIndTy)
5241       PrimaryInduction = Phi;
5242   }
5243 
5244   // Both the PHI node itself, and the "post-increment" value feeding
5245   // back into the PHI node may have external users.
5246   // We can allow those uses, except if the SCEVs we have for them rely
5247   // on predicates that only hold within the loop, since allowing the exit
5248   // currently means re-using this SCEV outside the loop.
5249   if (PSE.getUnionPredicate().isAlwaysTrue()) {
5250     AllowedExit.insert(Phi);
5251     AllowedExit.insert(Phi->getIncomingValueForBlock(TheLoop->getLoopLatch()));
5252   }
5253 
5254   DEBUG(dbgs() << "LV: Found an induction variable.\n");
5255 }
5256 
5257 bool LoopVectorizationLegality::canVectorizeInstrs() {
5258   BasicBlock *Header = TheLoop->getHeader();
5259 
5260   // Look for the attribute signaling the absence of NaNs.
5261   Function &F = *Header->getParent();
5262   HasFunNoNaNAttr =
5263       F.getFnAttribute("no-nans-fp-math").getValueAsString() == "true";
5264 
5265   // For each block in the loop.
5266   for (BasicBlock *BB : TheLoop->blocks()) {
5267     // Scan the instructions in the block and look for hazards.
5268     for (Instruction &I : *BB) {
5269       if (auto *Phi = dyn_cast<PHINode>(&I)) {
5270         Type *PhiTy = Phi->getType();
5271         // Check that this PHI type is allowed.
5272         if (!PhiTy->isIntegerTy() && !PhiTy->isFloatingPointTy() &&
5273             !PhiTy->isPointerTy()) {
5274           ORE->emit(createMissedAnalysis("CFGNotUnderstood", Phi)
5275                     << "loop control flow is not understood by vectorizer");
5276           DEBUG(dbgs() << "LV: Found an non-int non-pointer PHI.\n");
5277           return false;
5278         }
5279 
5280         // If this PHINode is not in the header block, then we know that we
5281         // can convert it to select during if-conversion. No need to check if
5282         // the PHIs in this block are induction or reduction variables.
5283         if (BB != Header) {
5284           // Check that this instruction has no outside users or is an
5285           // identified reduction value with an outside user.
5286           if (!hasOutsideLoopUser(TheLoop, Phi, AllowedExit))
5287             continue;
5288           ORE->emit(createMissedAnalysis("NeitherInductionNorReduction", Phi)
5289                     << "value could not be identified as "
5290                        "an induction or reduction variable");
5291           return false;
5292         }
5293 
5294         // We only allow if-converted PHIs with exactly two incoming values.
5295         if (Phi->getNumIncomingValues() != 2) {
5296           ORE->emit(createMissedAnalysis("CFGNotUnderstood", Phi)
5297                     << "control flow not understood by vectorizer");
5298           DEBUG(dbgs() << "LV: Found an invalid PHI.\n");
5299           return false;
5300         }
5301 
5302         RecurrenceDescriptor RedDes;
5303         if (RecurrenceDescriptor::isReductionPHI(Phi, TheLoop, RedDes)) {
5304           if (RedDes.hasUnsafeAlgebra())
5305             Requirements->addUnsafeAlgebraInst(RedDes.getUnsafeAlgebraInst());
5306           AllowedExit.insert(RedDes.getLoopExitInstr());
5307           Reductions[Phi] = RedDes;
5308           continue;
5309         }
5310 
5311         InductionDescriptor ID;
5312         if (InductionDescriptor::isInductionPHI(Phi, TheLoop, PSE, ID)) {
5313           addInductionPhi(Phi, ID, AllowedExit);
5314           if (ID.hasUnsafeAlgebra() && !HasFunNoNaNAttr)
5315             Requirements->addUnsafeAlgebraInst(ID.getUnsafeAlgebraInst());
5316           continue;
5317         }
5318 
5319         if (RecurrenceDescriptor::isFirstOrderRecurrence(Phi, TheLoop,
5320                                                          SinkAfter, DT)) {
5321           FirstOrderRecurrences.insert(Phi);
5322           continue;
5323         }
5324 
5325         // As a last resort, coerce the PHI to a AddRec expression
5326         // and re-try classifying it a an induction PHI.
5327         if (InductionDescriptor::isInductionPHI(Phi, TheLoop, PSE, ID, true)) {
5328           addInductionPhi(Phi, ID, AllowedExit);
5329           continue;
5330         }
5331 
5332         ORE->emit(createMissedAnalysis("NonReductionValueUsedOutsideLoop", Phi)
5333                   << "value that could not be identified as "
5334                      "reduction is used outside the loop");
5335         DEBUG(dbgs() << "LV: Found an unidentified PHI." << *Phi << "\n");
5336         return false;
5337       } // end of PHI handling
5338 
5339       // We handle calls that:
5340       //   * Are debug info intrinsics.
5341       //   * Have a mapping to an IR intrinsic.
5342       //   * Have a vector version available.
5343       auto *CI = dyn_cast<CallInst>(&I);
5344       if (CI && !getVectorIntrinsicIDForCall(CI, TLI) &&
5345           !isa<DbgInfoIntrinsic>(CI) &&
5346           !(CI->getCalledFunction() && TLI &&
5347             TLI->isFunctionVectorizable(CI->getCalledFunction()->getName()))) {
5348         ORE->emit(createMissedAnalysis("CantVectorizeCall", CI)
5349                   << "call instruction cannot be vectorized");
5350         DEBUG(dbgs() << "LV: Found a non-intrinsic, non-libfunc callsite.\n");
5351         return false;
5352       }
5353 
5354       // Intrinsics such as powi,cttz and ctlz are legal to vectorize if the
5355       // second argument is the same (i.e. loop invariant)
5356       if (CI && hasVectorInstrinsicScalarOpd(
5357                     getVectorIntrinsicIDForCall(CI, TLI), 1)) {
5358         auto *SE = PSE.getSE();
5359         if (!SE->isLoopInvariant(PSE.getSCEV(CI->getOperand(1)), TheLoop)) {
5360           ORE->emit(createMissedAnalysis("CantVectorizeIntrinsic", CI)
5361                     << "intrinsic instruction cannot be vectorized");
5362           DEBUG(dbgs() << "LV: Found unvectorizable intrinsic " << *CI << "\n");
5363           return false;
5364         }
5365       }
5366 
5367       // Check that the instruction return type is vectorizable.
5368       // Also, we can't vectorize extractelement instructions.
5369       if ((!VectorType::isValidElementType(I.getType()) &&
5370            !I.getType()->isVoidTy()) ||
5371           isa<ExtractElementInst>(I)) {
5372         ORE->emit(createMissedAnalysis("CantVectorizeInstructionReturnType", &I)
5373                   << "instruction return type cannot be vectorized");
5374         DEBUG(dbgs() << "LV: Found unvectorizable type.\n");
5375         return false;
5376       }
5377 
5378       // Check that the stored type is vectorizable.
5379       if (auto *ST = dyn_cast<StoreInst>(&I)) {
5380         Type *T = ST->getValueOperand()->getType();
5381         if (!VectorType::isValidElementType(T)) {
5382           ORE->emit(createMissedAnalysis("CantVectorizeStore", ST)
5383                     << "store instruction cannot be vectorized");
5384           return false;
5385         }
5386 
5387         // FP instructions can allow unsafe algebra, thus vectorizable by
5388         // non-IEEE-754 compliant SIMD units.
5389         // This applies to floating-point math operations and calls, not memory
5390         // operations, shuffles, or casts, as they don't change precision or
5391         // semantics.
5392       } else if (I.getType()->isFloatingPointTy() && (CI || I.isBinaryOp()) &&
5393                  !I.isFast()) {
5394         DEBUG(dbgs() << "LV: Found FP op with unsafe algebra.\n");
5395         Hints->setPotentiallyUnsafe();
5396       }
5397 
5398       // Reduction instructions are allowed to have exit users.
5399       // All other instructions must not have external users.
5400       if (hasOutsideLoopUser(TheLoop, &I, AllowedExit)) {
5401         ORE->emit(createMissedAnalysis("ValueUsedOutsideLoop", &I)
5402                   << "value cannot be used outside the loop");
5403         return false;
5404       }
5405     } // next instr.
5406   }
5407 
5408   if (!PrimaryInduction) {
5409     DEBUG(dbgs() << "LV: Did not find one integer induction var.\n");
5410     if (Inductions.empty()) {
5411       ORE->emit(createMissedAnalysis("NoInductionVariable")
5412                 << "loop induction variable could not be identified");
5413       return false;
5414     }
5415   }
5416 
5417   // Now we know the widest induction type, check if our found induction
5418   // is the same size. If it's not, unset it here and InnerLoopVectorizer
5419   // will create another.
5420   if (PrimaryInduction && WidestIndTy != PrimaryInduction->getType())
5421     PrimaryInduction = nullptr;
5422 
5423   return true;
5424 }
5425 
5426 void LoopVectorizationCostModel::collectLoopScalars(unsigned VF) {
5427   // We should not collect Scalars more than once per VF. Right now, this
5428   // function is called from collectUniformsAndScalars(), which already does
5429   // this check. Collecting Scalars for VF=1 does not make any sense.
5430   assert(VF >= 2 && !Scalars.count(VF) &&
5431          "This function should not be visited twice for the same VF");
5432 
5433   SmallSetVector<Instruction *, 8> Worklist;
5434 
5435   // These sets are used to seed the analysis with pointers used by memory
5436   // accesses that will remain scalar.
5437   SmallSetVector<Instruction *, 8> ScalarPtrs;
5438   SmallPtrSet<Instruction *, 8> PossibleNonScalarPtrs;
5439 
5440   // A helper that returns true if the use of Ptr by MemAccess will be scalar.
5441   // The pointer operands of loads and stores will be scalar as long as the
5442   // memory access is not a gather or scatter operation. The value operand of a
5443   // store will remain scalar if the store is scalarized.
5444   auto isScalarUse = [&](Instruction *MemAccess, Value *Ptr) {
5445     InstWidening WideningDecision = getWideningDecision(MemAccess, VF);
5446     assert(WideningDecision != CM_Unknown &&
5447            "Widening decision should be ready at this moment");
5448     if (auto *Store = dyn_cast<StoreInst>(MemAccess))
5449       if (Ptr == Store->getValueOperand())
5450         return WideningDecision == CM_Scalarize;
5451     assert(Ptr == getPointerOperand(MemAccess) &&
5452            "Ptr is neither a value or pointer operand");
5453     return WideningDecision != CM_GatherScatter;
5454   };
5455 
5456   // A helper that returns true if the given value is a bitcast or
5457   // getelementptr instruction contained in the loop.
5458   auto isLoopVaryingBitCastOrGEP = [&](Value *V) {
5459     return ((isa<BitCastInst>(V) && V->getType()->isPointerTy()) ||
5460             isa<GetElementPtrInst>(V)) &&
5461            !TheLoop->isLoopInvariant(V);
5462   };
5463 
5464   // A helper that evaluates a memory access's use of a pointer. If the use
5465   // will be a scalar use, and the pointer is only used by memory accesses, we
5466   // place the pointer in ScalarPtrs. Otherwise, the pointer is placed in
5467   // PossibleNonScalarPtrs.
5468   auto evaluatePtrUse = [&](Instruction *MemAccess, Value *Ptr) {
5469     // We only care about bitcast and getelementptr instructions contained in
5470     // the loop.
5471     if (!isLoopVaryingBitCastOrGEP(Ptr))
5472       return;
5473 
5474     // If the pointer has already been identified as scalar (e.g., if it was
5475     // also identified as uniform), there's nothing to do.
5476     auto *I = cast<Instruction>(Ptr);
5477     if (Worklist.count(I))
5478       return;
5479 
5480     // If the use of the pointer will be a scalar use, and all users of the
5481     // pointer are memory accesses, place the pointer in ScalarPtrs. Otherwise,
5482     // place the pointer in PossibleNonScalarPtrs.
5483     if (isScalarUse(MemAccess, Ptr) && llvm::all_of(I->users(), [&](User *U) {
5484           return isa<LoadInst>(U) || isa<StoreInst>(U);
5485         }))
5486       ScalarPtrs.insert(I);
5487     else
5488       PossibleNonScalarPtrs.insert(I);
5489   };
5490 
5491   // We seed the scalars analysis with three classes of instructions: (1)
5492   // instructions marked uniform-after-vectorization, (2) bitcast and
5493   // getelementptr instructions used by memory accesses requiring a scalar use,
5494   // and (3) pointer induction variables and their update instructions (we
5495   // currently only scalarize these).
5496   //
5497   // (1) Add to the worklist all instructions that have been identified as
5498   // uniform-after-vectorization.
5499   Worklist.insert(Uniforms[VF].begin(), Uniforms[VF].end());
5500 
5501   // (2) Add to the worklist all bitcast and getelementptr instructions used by
5502   // memory accesses requiring a scalar use. The pointer operands of loads and
5503   // stores will be scalar as long as the memory accesses is not a gather or
5504   // scatter operation. The value operand of a store will remain scalar if the
5505   // store is scalarized.
5506   for (auto *BB : TheLoop->blocks())
5507     for (auto &I : *BB) {
5508       if (auto *Load = dyn_cast<LoadInst>(&I)) {
5509         evaluatePtrUse(Load, Load->getPointerOperand());
5510       } else if (auto *Store = dyn_cast<StoreInst>(&I)) {
5511         evaluatePtrUse(Store, Store->getPointerOperand());
5512         evaluatePtrUse(Store, Store->getValueOperand());
5513       }
5514     }
5515   for (auto *I : ScalarPtrs)
5516     if (!PossibleNonScalarPtrs.count(I)) {
5517       DEBUG(dbgs() << "LV: Found scalar instruction: " << *I << "\n");
5518       Worklist.insert(I);
5519     }
5520 
5521   // (3) Add to the worklist all pointer induction variables and their update
5522   // instructions.
5523   //
5524   // TODO: Once we are able to vectorize pointer induction variables we should
5525   //       no longer insert them into the worklist here.
5526   auto *Latch = TheLoop->getLoopLatch();
5527   for (auto &Induction : *Legal->getInductionVars()) {
5528     auto *Ind = Induction.first;
5529     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
5530     if (Induction.second.getKind() != InductionDescriptor::IK_PtrInduction)
5531       continue;
5532     Worklist.insert(Ind);
5533     Worklist.insert(IndUpdate);
5534     DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n");
5535     DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate << "\n");
5536   }
5537 
5538   // Insert the forced scalars.
5539   // FIXME: Currently widenPHIInstruction() often creates a dead vector
5540   // induction variable when the PHI user is scalarized.
5541   if (ForcedScalars.count(VF))
5542     for (auto *I : ForcedScalars.find(VF)->second)
5543       Worklist.insert(I);
5544 
5545   // Expand the worklist by looking through any bitcasts and getelementptr
5546   // instructions we've already identified as scalar. This is similar to the
5547   // expansion step in collectLoopUniforms(); however, here we're only
5548   // expanding to include additional bitcasts and getelementptr instructions.
5549   unsigned Idx = 0;
5550   while (Idx != Worklist.size()) {
5551     Instruction *Dst = Worklist[Idx++];
5552     if (!isLoopVaryingBitCastOrGEP(Dst->getOperand(0)))
5553       continue;
5554     auto *Src = cast<Instruction>(Dst->getOperand(0));
5555     if (llvm::all_of(Src->users(), [&](User *U) -> bool {
5556           auto *J = cast<Instruction>(U);
5557           return !TheLoop->contains(J) || Worklist.count(J) ||
5558                  ((isa<LoadInst>(J) || isa<StoreInst>(J)) &&
5559                   isScalarUse(J, Src));
5560         })) {
5561       Worklist.insert(Src);
5562       DEBUG(dbgs() << "LV: Found scalar instruction: " << *Src << "\n");
5563     }
5564   }
5565 
5566   // An induction variable will remain scalar if all users of the induction
5567   // variable and induction variable update remain scalar.
5568   for (auto &Induction : *Legal->getInductionVars()) {
5569     auto *Ind = Induction.first;
5570     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
5571 
5572     // We already considered pointer induction variables, so there's no reason
5573     // to look at their users again.
5574     //
5575     // TODO: Once we are able to vectorize pointer induction variables we
5576     //       should no longer skip over them here.
5577     if (Induction.second.getKind() == InductionDescriptor::IK_PtrInduction)
5578       continue;
5579 
5580     // Determine if all users of the induction variable are scalar after
5581     // vectorization.
5582     auto ScalarInd = llvm::all_of(Ind->users(), [&](User *U) -> bool {
5583       auto *I = cast<Instruction>(U);
5584       return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I);
5585     });
5586     if (!ScalarInd)
5587       continue;
5588 
5589     // Determine if all users of the induction variable update instruction are
5590     // scalar after vectorization.
5591     auto ScalarIndUpdate =
5592         llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
5593           auto *I = cast<Instruction>(U);
5594           return I == Ind || !TheLoop->contains(I) || Worklist.count(I);
5595         });
5596     if (!ScalarIndUpdate)
5597       continue;
5598 
5599     // The induction variable and its update instruction will remain scalar.
5600     Worklist.insert(Ind);
5601     Worklist.insert(IndUpdate);
5602     DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n");
5603     DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate << "\n");
5604   }
5605 
5606   Scalars[VF].insert(Worklist.begin(), Worklist.end());
5607 }
5608 
5609 bool LoopVectorizationLegality::isScalarWithPredication(Instruction *I) {
5610   if (!blockNeedsPredication(I->getParent()))
5611     return false;
5612   switch(I->getOpcode()) {
5613   default:
5614     break;
5615   case Instruction::Store:
5616     return !isMaskRequired(I);
5617   case Instruction::UDiv:
5618   case Instruction::SDiv:
5619   case Instruction::SRem:
5620   case Instruction::URem:
5621     return mayDivideByZero(*I);
5622   }
5623   return false;
5624 }
5625 
5626 bool LoopVectorizationLegality::memoryInstructionCanBeWidened(Instruction *I,
5627                                                               unsigned VF) {
5628   // Get and ensure we have a valid memory instruction.
5629   LoadInst *LI = dyn_cast<LoadInst>(I);
5630   StoreInst *SI = dyn_cast<StoreInst>(I);
5631   assert((LI || SI) && "Invalid memory instruction");
5632 
5633   auto *Ptr = getPointerOperand(I);
5634 
5635   // In order to be widened, the pointer should be consecutive, first of all.
5636   if (!isConsecutivePtr(Ptr))
5637     return false;
5638 
5639   // If the instruction is a store located in a predicated block, it will be
5640   // scalarized.
5641   if (isScalarWithPredication(I))
5642     return false;
5643 
5644   // If the instruction's allocated size doesn't equal it's type size, it
5645   // requires padding and will be scalarized.
5646   auto &DL = I->getModule()->getDataLayout();
5647   auto *ScalarTy = LI ? LI->getType() : SI->getValueOperand()->getType();
5648   if (hasIrregularType(ScalarTy, DL, VF))
5649     return false;
5650 
5651   return true;
5652 }
5653 
5654 void LoopVectorizationCostModel::collectLoopUniforms(unsigned VF) {
5655   // We should not collect Uniforms more than once per VF. Right now,
5656   // this function is called from collectUniformsAndScalars(), which
5657   // already does this check. Collecting Uniforms for VF=1 does not make any
5658   // sense.
5659 
5660   assert(VF >= 2 && !Uniforms.count(VF) &&
5661          "This function should not be visited twice for the same VF");
5662 
5663   // Visit the list of Uniforms. If we'll not find any uniform value, we'll
5664   // not analyze again.  Uniforms.count(VF) will return 1.
5665   Uniforms[VF].clear();
5666 
5667   // We now know that the loop is vectorizable!
5668   // Collect instructions inside the loop that will remain uniform after
5669   // vectorization.
5670 
5671   // Global values, params and instructions outside of current loop are out of
5672   // scope.
5673   auto isOutOfScope = [&](Value *V) -> bool {
5674     Instruction *I = dyn_cast<Instruction>(V);
5675     return (!I || !TheLoop->contains(I));
5676   };
5677 
5678   SetVector<Instruction *> Worklist;
5679   BasicBlock *Latch = TheLoop->getLoopLatch();
5680 
5681   // Start with the conditional branch. If the branch condition is an
5682   // instruction contained in the loop that is only used by the branch, it is
5683   // uniform.
5684   auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0));
5685   if (Cmp && TheLoop->contains(Cmp) && Cmp->hasOneUse()) {
5686     Worklist.insert(Cmp);
5687     DEBUG(dbgs() << "LV: Found uniform instruction: " << *Cmp << "\n");
5688   }
5689 
5690   // Holds consecutive and consecutive-like pointers. Consecutive-like pointers
5691   // are pointers that are treated like consecutive pointers during
5692   // vectorization. The pointer operands of interleaved accesses are an
5693   // example.
5694   SmallSetVector<Instruction *, 8> ConsecutiveLikePtrs;
5695 
5696   // Holds pointer operands of instructions that are possibly non-uniform.
5697   SmallPtrSet<Instruction *, 8> PossibleNonUniformPtrs;
5698 
5699   auto isUniformDecision = [&](Instruction *I, unsigned VF) {
5700     InstWidening WideningDecision = getWideningDecision(I, VF);
5701     assert(WideningDecision != CM_Unknown &&
5702            "Widening decision should be ready at this moment");
5703 
5704     return (WideningDecision == CM_Widen ||
5705             WideningDecision == CM_Widen_Reverse ||
5706             WideningDecision == CM_Interleave);
5707   };
5708   // Iterate over the instructions in the loop, and collect all
5709   // consecutive-like pointer operands in ConsecutiveLikePtrs. If it's possible
5710   // that a consecutive-like pointer operand will be scalarized, we collect it
5711   // in PossibleNonUniformPtrs instead. We use two sets here because a single
5712   // getelementptr instruction can be used by both vectorized and scalarized
5713   // memory instructions. For example, if a loop loads and stores from the same
5714   // location, but the store is conditional, the store will be scalarized, and
5715   // the getelementptr won't remain uniform.
5716   for (auto *BB : TheLoop->blocks())
5717     for (auto &I : *BB) {
5718       // If there's no pointer operand, there's nothing to do.
5719       auto *Ptr = dyn_cast_or_null<Instruction>(getPointerOperand(&I));
5720       if (!Ptr)
5721         continue;
5722 
5723       // True if all users of Ptr are memory accesses that have Ptr as their
5724       // pointer operand.
5725       auto UsersAreMemAccesses =
5726           llvm::all_of(Ptr->users(), [&](User *U) -> bool {
5727             return getPointerOperand(U) == Ptr;
5728           });
5729 
5730       // Ensure the memory instruction will not be scalarized or used by
5731       // gather/scatter, making its pointer operand non-uniform. If the pointer
5732       // operand is used by any instruction other than a memory access, we
5733       // conservatively assume the pointer operand may be non-uniform.
5734       if (!UsersAreMemAccesses || !isUniformDecision(&I, VF))
5735         PossibleNonUniformPtrs.insert(Ptr);
5736 
5737       // If the memory instruction will be vectorized and its pointer operand
5738       // is consecutive-like, or interleaving - the pointer operand should
5739       // remain uniform.
5740       else
5741         ConsecutiveLikePtrs.insert(Ptr);
5742     }
5743 
5744   // Add to the Worklist all consecutive and consecutive-like pointers that
5745   // aren't also identified as possibly non-uniform.
5746   for (auto *V : ConsecutiveLikePtrs)
5747     if (!PossibleNonUniformPtrs.count(V)) {
5748       DEBUG(dbgs() << "LV: Found uniform instruction: " << *V << "\n");
5749       Worklist.insert(V);
5750     }
5751 
5752   // Expand Worklist in topological order: whenever a new instruction
5753   // is added , its users should be either already inside Worklist, or
5754   // out of scope. It ensures a uniform instruction will only be used
5755   // by uniform instructions or out of scope instructions.
5756   unsigned idx = 0;
5757   while (idx != Worklist.size()) {
5758     Instruction *I = Worklist[idx++];
5759 
5760     for (auto OV : I->operand_values()) {
5761       if (isOutOfScope(OV))
5762         continue;
5763       auto *OI = cast<Instruction>(OV);
5764       if (llvm::all_of(OI->users(), [&](User *U) -> bool {
5765             auto *J = cast<Instruction>(U);
5766             return !TheLoop->contains(J) || Worklist.count(J) ||
5767                    (OI == getPointerOperand(J) && isUniformDecision(J, VF));
5768           })) {
5769         Worklist.insert(OI);
5770         DEBUG(dbgs() << "LV: Found uniform instruction: " << *OI << "\n");
5771       }
5772     }
5773   }
5774 
5775   // Returns true if Ptr is the pointer operand of a memory access instruction
5776   // I, and I is known to not require scalarization.
5777   auto isVectorizedMemAccessUse = [&](Instruction *I, Value *Ptr) -> bool {
5778     return getPointerOperand(I) == Ptr && isUniformDecision(I, VF);
5779   };
5780 
5781   // For an instruction to be added into Worklist above, all its users inside
5782   // the loop should also be in Worklist. However, this condition cannot be
5783   // true for phi nodes that form a cyclic dependence. We must process phi
5784   // nodes separately. An induction variable will remain uniform if all users
5785   // of the induction variable and induction variable update remain uniform.
5786   // The code below handles both pointer and non-pointer induction variables.
5787   for (auto &Induction : *Legal->getInductionVars()) {
5788     auto *Ind = Induction.first;
5789     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
5790 
5791     // Determine if all users of the induction variable are uniform after
5792     // vectorization.
5793     auto UniformInd = llvm::all_of(Ind->users(), [&](User *U) -> bool {
5794       auto *I = cast<Instruction>(U);
5795       return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) ||
5796              isVectorizedMemAccessUse(I, Ind);
5797     });
5798     if (!UniformInd)
5799       continue;
5800 
5801     // Determine if all users of the induction variable update instruction are
5802     // uniform after vectorization.
5803     auto UniformIndUpdate =
5804         llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
5805           auto *I = cast<Instruction>(U);
5806           return I == Ind || !TheLoop->contains(I) || Worklist.count(I) ||
5807                  isVectorizedMemAccessUse(I, IndUpdate);
5808         });
5809     if (!UniformIndUpdate)
5810       continue;
5811 
5812     // The induction variable and its update instruction will remain uniform.
5813     Worklist.insert(Ind);
5814     Worklist.insert(IndUpdate);
5815     DEBUG(dbgs() << "LV: Found uniform instruction: " << *Ind << "\n");
5816     DEBUG(dbgs() << "LV: Found uniform instruction: " << *IndUpdate << "\n");
5817   }
5818 
5819   Uniforms[VF].insert(Worklist.begin(), Worklist.end());
5820 }
5821 
5822 bool LoopVectorizationLegality::canVectorizeMemory() {
5823   LAI = &(*GetLAA)(*TheLoop);
5824   InterleaveInfo.setLAI(LAI);
5825   const OptimizationRemarkAnalysis *LAR = LAI->getReport();
5826   if (LAR) {
5827     ORE->emit([&]() {
5828       return OptimizationRemarkAnalysis(Hints->vectorizeAnalysisPassName(),
5829                                         "loop not vectorized: ", *LAR);
5830     });
5831   }
5832   if (!LAI->canVectorizeMemory())
5833     return false;
5834 
5835   if (LAI->hasStoreToLoopInvariantAddress()) {
5836     ORE->emit(createMissedAnalysis("CantVectorizeStoreToLoopInvariantAddress")
5837               << "write to a loop invariant address could not be vectorized");
5838     DEBUG(dbgs() << "LV: We don't allow storing to uniform addresses\n");
5839     return false;
5840   }
5841 
5842   Requirements->addRuntimePointerChecks(LAI->getNumRuntimePointerChecks());
5843   PSE.addPredicate(LAI->getPSE().getUnionPredicate());
5844 
5845   return true;
5846 }
5847 
5848 bool LoopVectorizationLegality::isInductionPhi(const Value *V) {
5849   Value *In0 = const_cast<Value *>(V);
5850   PHINode *PN = dyn_cast_or_null<PHINode>(In0);
5851   if (!PN)
5852     return false;
5853 
5854   return Inductions.count(PN);
5855 }
5856 
5857 bool LoopVectorizationLegality::isCastedInductionVariable(const Value *V) {
5858   auto *Inst = dyn_cast<Instruction>(V);
5859   return (Inst && InductionCastsToIgnore.count(Inst));
5860 }
5861 
5862 bool LoopVectorizationLegality::isInductionVariable(const Value *V) {
5863   return isInductionPhi(V) || isCastedInductionVariable(V);
5864 }
5865 
5866 bool LoopVectorizationLegality::isFirstOrderRecurrence(const PHINode *Phi) {
5867   return FirstOrderRecurrences.count(Phi);
5868 }
5869 
5870 bool LoopVectorizationLegality::blockNeedsPredication(BasicBlock *BB) {
5871   return LoopAccessInfo::blockNeedsPredication(BB, TheLoop, DT);
5872 }
5873 
5874 bool LoopVectorizationLegality::blockCanBePredicated(
5875     BasicBlock *BB, SmallPtrSetImpl<Value *> &SafePtrs) {
5876   const bool IsAnnotatedParallel = TheLoop->isAnnotatedParallel();
5877 
5878   for (Instruction &I : *BB) {
5879     // Check that we don't have a constant expression that can trap as operand.
5880     for (Value *Operand : I.operands()) {
5881       if (auto *C = dyn_cast<Constant>(Operand))
5882         if (C->canTrap())
5883           return false;
5884     }
5885     // We might be able to hoist the load.
5886     if (I.mayReadFromMemory()) {
5887       auto *LI = dyn_cast<LoadInst>(&I);
5888       if (!LI)
5889         return false;
5890       if (!SafePtrs.count(LI->getPointerOperand())) {
5891         if (isLegalMaskedLoad(LI->getType(), LI->getPointerOperand()) ||
5892             isLegalMaskedGather(LI->getType())) {
5893           MaskedOp.insert(LI);
5894           continue;
5895         }
5896         // !llvm.mem.parallel_loop_access implies if-conversion safety.
5897         if (IsAnnotatedParallel)
5898           continue;
5899         return false;
5900       }
5901     }
5902 
5903     if (I.mayWriteToMemory()) {
5904       auto *SI = dyn_cast<StoreInst>(&I);
5905       // We only support predication of stores in basic blocks with one
5906       // predecessor.
5907       if (!SI)
5908         return false;
5909 
5910       // Build a masked store if it is legal for the target.
5911       if (isLegalMaskedStore(SI->getValueOperand()->getType(),
5912                              SI->getPointerOperand()) ||
5913           isLegalMaskedScatter(SI->getValueOperand()->getType())) {
5914         MaskedOp.insert(SI);
5915         continue;
5916       }
5917 
5918       bool isSafePtr = (SafePtrs.count(SI->getPointerOperand()) != 0);
5919       bool isSinglePredecessor = SI->getParent()->getSinglePredecessor();
5920 
5921       if (++NumPredStores > NumberOfStoresToPredicate || !isSafePtr ||
5922           !isSinglePredecessor)
5923         return false;
5924     }
5925     if (I.mayThrow())
5926       return false;
5927   }
5928 
5929   return true;
5930 }
5931 
5932 void InterleavedAccessInfo::collectConstStrideAccesses(
5933     MapVector<Instruction *, StrideDescriptor> &AccessStrideInfo,
5934     const ValueToValueMap &Strides) {
5935   auto &DL = TheLoop->getHeader()->getModule()->getDataLayout();
5936 
5937   // Since it's desired that the load/store instructions be maintained in
5938   // "program order" for the interleaved access analysis, we have to visit the
5939   // blocks in the loop in reverse postorder (i.e., in a topological order).
5940   // Such an ordering will ensure that any load/store that may be executed
5941   // before a second load/store will precede the second load/store in
5942   // AccessStrideInfo.
5943   LoopBlocksDFS DFS(TheLoop);
5944   DFS.perform(LI);
5945   for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO()))
5946     for (auto &I : *BB) {
5947       auto *LI = dyn_cast<LoadInst>(&I);
5948       auto *SI = dyn_cast<StoreInst>(&I);
5949       if (!LI && !SI)
5950         continue;
5951 
5952       Value *Ptr = getPointerOperand(&I);
5953       // We don't check wrapping here because we don't know yet if Ptr will be
5954       // part of a full group or a group with gaps. Checking wrapping for all
5955       // pointers (even those that end up in groups with no gaps) will be overly
5956       // conservative. For full groups, wrapping should be ok since if we would
5957       // wrap around the address space we would do a memory access at nullptr
5958       // even without the transformation. The wrapping checks are therefore
5959       // deferred until after we've formed the interleaved groups.
5960       int64_t Stride = getPtrStride(PSE, Ptr, TheLoop, Strides,
5961                                     /*Assume=*/true, /*ShouldCheckWrap=*/false);
5962 
5963       const SCEV *Scev = replaceSymbolicStrideSCEV(PSE, Strides, Ptr);
5964       PointerType *PtrTy = dyn_cast<PointerType>(Ptr->getType());
5965       uint64_t Size = DL.getTypeAllocSize(PtrTy->getElementType());
5966 
5967       // An alignment of 0 means target ABI alignment.
5968       unsigned Align = getMemInstAlignment(&I);
5969       if (!Align)
5970         Align = DL.getABITypeAlignment(PtrTy->getElementType());
5971 
5972       AccessStrideInfo[&I] = StrideDescriptor(Stride, Scev, Size, Align);
5973     }
5974 }
5975 
5976 // Analyze interleaved accesses and collect them into interleaved load and
5977 // store groups.
5978 //
5979 // When generating code for an interleaved load group, we effectively hoist all
5980 // loads in the group to the location of the first load in program order. When
5981 // generating code for an interleaved store group, we sink all stores to the
5982 // location of the last store. This code motion can change the order of load
5983 // and store instructions and may break dependences.
5984 //
5985 // The code generation strategy mentioned above ensures that we won't violate
5986 // any write-after-read (WAR) dependences.
5987 //
5988 // E.g., for the WAR dependence:  a = A[i];      // (1)
5989 //                                A[i] = b;      // (2)
5990 //
5991 // The store group of (2) is always inserted at or below (2), and the load
5992 // group of (1) is always inserted at or above (1). Thus, the instructions will
5993 // never be reordered. All other dependences are checked to ensure the
5994 // correctness of the instruction reordering.
5995 //
5996 // The algorithm visits all memory accesses in the loop in bottom-up program
5997 // order. Program order is established by traversing the blocks in the loop in
5998 // reverse postorder when collecting the accesses.
5999 //
6000 // We visit the memory accesses in bottom-up order because it can simplify the
6001 // construction of store groups in the presence of write-after-write (WAW)
6002 // dependences.
6003 //
6004 // E.g., for the WAW dependence:  A[i] = a;      // (1)
6005 //                                A[i] = b;      // (2)
6006 //                                A[i + 1] = c;  // (3)
6007 //
6008 // We will first create a store group with (3) and (2). (1) can't be added to
6009 // this group because it and (2) are dependent. However, (1) can be grouped
6010 // with other accesses that may precede it in program order. Note that a
6011 // bottom-up order does not imply that WAW dependences should not be checked.
6012 void InterleavedAccessInfo::analyzeInterleaving(
6013     const ValueToValueMap &Strides) {
6014   DEBUG(dbgs() << "LV: Analyzing interleaved accesses...\n");
6015 
6016   // Holds all accesses with a constant stride.
6017   MapVector<Instruction *, StrideDescriptor> AccessStrideInfo;
6018   collectConstStrideAccesses(AccessStrideInfo, Strides);
6019 
6020   if (AccessStrideInfo.empty())
6021     return;
6022 
6023   // Collect the dependences in the loop.
6024   collectDependences();
6025 
6026   // Holds all interleaved store groups temporarily.
6027   SmallSetVector<InterleaveGroup *, 4> StoreGroups;
6028   // Holds all interleaved load groups temporarily.
6029   SmallSetVector<InterleaveGroup *, 4> LoadGroups;
6030 
6031   // Search in bottom-up program order for pairs of accesses (A and B) that can
6032   // form interleaved load or store groups. In the algorithm below, access A
6033   // precedes access B in program order. We initialize a group for B in the
6034   // outer loop of the algorithm, and then in the inner loop, we attempt to
6035   // insert each A into B's group if:
6036   //
6037   //  1. A and B have the same stride,
6038   //  2. A and B have the same memory object size, and
6039   //  3. A belongs in B's group according to its distance from B.
6040   //
6041   // Special care is taken to ensure group formation will not break any
6042   // dependences.
6043   for (auto BI = AccessStrideInfo.rbegin(), E = AccessStrideInfo.rend();
6044        BI != E; ++BI) {
6045     Instruction *B = BI->first;
6046     StrideDescriptor DesB = BI->second;
6047 
6048     // Initialize a group for B if it has an allowable stride. Even if we don't
6049     // create a group for B, we continue with the bottom-up algorithm to ensure
6050     // we don't break any of B's dependences.
6051     InterleaveGroup *Group = nullptr;
6052     if (isStrided(DesB.Stride)) {
6053       Group = getInterleaveGroup(B);
6054       if (!Group) {
6055         DEBUG(dbgs() << "LV: Creating an interleave group with:" << *B << '\n');
6056         Group = createInterleaveGroup(B, DesB.Stride, DesB.Align);
6057       }
6058       if (B->mayWriteToMemory())
6059         StoreGroups.insert(Group);
6060       else
6061         LoadGroups.insert(Group);
6062     }
6063 
6064     for (auto AI = std::next(BI); AI != E; ++AI) {
6065       Instruction *A = AI->first;
6066       StrideDescriptor DesA = AI->second;
6067 
6068       // Our code motion strategy implies that we can't have dependences
6069       // between accesses in an interleaved group and other accesses located
6070       // between the first and last member of the group. Note that this also
6071       // means that a group can't have more than one member at a given offset.
6072       // The accesses in a group can have dependences with other accesses, but
6073       // we must ensure we don't extend the boundaries of the group such that
6074       // we encompass those dependent accesses.
6075       //
6076       // For example, assume we have the sequence of accesses shown below in a
6077       // stride-2 loop:
6078       //
6079       //  (1, 2) is a group | A[i]   = a;  // (1)
6080       //                    | A[i-1] = b;  // (2) |
6081       //                      A[i-3] = c;  // (3)
6082       //                      A[i]   = d;  // (4) | (2, 4) is not a group
6083       //
6084       // Because accesses (2) and (3) are dependent, we can group (2) with (1)
6085       // but not with (4). If we did, the dependent access (3) would be within
6086       // the boundaries of the (2, 4) group.
6087       if (!canReorderMemAccessesForInterleavedGroups(&*AI, &*BI)) {
6088         // If a dependence exists and A is already in a group, we know that A
6089         // must be a store since A precedes B and WAR dependences are allowed.
6090         // Thus, A would be sunk below B. We release A's group to prevent this
6091         // illegal code motion. A will then be free to form another group with
6092         // instructions that precede it.
6093         if (isInterleaved(A)) {
6094           InterleaveGroup *StoreGroup = getInterleaveGroup(A);
6095           StoreGroups.remove(StoreGroup);
6096           releaseGroup(StoreGroup);
6097         }
6098 
6099         // If a dependence exists and A is not already in a group (or it was
6100         // and we just released it), B might be hoisted above A (if B is a
6101         // load) or another store might be sunk below A (if B is a store). In
6102         // either case, we can't add additional instructions to B's group. B
6103         // will only form a group with instructions that it precedes.
6104         break;
6105       }
6106 
6107       // At this point, we've checked for illegal code motion. If either A or B
6108       // isn't strided, there's nothing left to do.
6109       if (!isStrided(DesA.Stride) || !isStrided(DesB.Stride))
6110         continue;
6111 
6112       // Ignore A if it's already in a group or isn't the same kind of memory
6113       // operation as B.
6114       if (isInterleaved(A) || A->mayReadFromMemory() != B->mayReadFromMemory())
6115         continue;
6116 
6117       // Check rules 1 and 2. Ignore A if its stride or size is different from
6118       // that of B.
6119       if (DesA.Stride != DesB.Stride || DesA.Size != DesB.Size)
6120         continue;
6121 
6122       // Ignore A if the memory object of A and B don't belong to the same
6123       // address space
6124       if (getMemInstAddressSpace(A) != getMemInstAddressSpace(B))
6125         continue;
6126 
6127       // Calculate the distance from A to B.
6128       const SCEVConstant *DistToB = dyn_cast<SCEVConstant>(
6129           PSE.getSE()->getMinusSCEV(DesA.Scev, DesB.Scev));
6130       if (!DistToB)
6131         continue;
6132       int64_t DistanceToB = DistToB->getAPInt().getSExtValue();
6133 
6134       // Check rule 3. Ignore A if its distance to B is not a multiple of the
6135       // size.
6136       if (DistanceToB % static_cast<int64_t>(DesB.Size))
6137         continue;
6138 
6139       // Ignore A if either A or B is in a predicated block. Although we
6140       // currently prevent group formation for predicated accesses, we may be
6141       // able to relax this limitation in the future once we handle more
6142       // complicated blocks.
6143       if (isPredicated(A->getParent()) || isPredicated(B->getParent()))
6144         continue;
6145 
6146       // The index of A is the index of B plus A's distance to B in multiples
6147       // of the size.
6148       int IndexA =
6149           Group->getIndex(B) + DistanceToB / static_cast<int64_t>(DesB.Size);
6150 
6151       // Try to insert A into B's group.
6152       if (Group->insertMember(A, IndexA, DesA.Align)) {
6153         DEBUG(dbgs() << "LV: Inserted:" << *A << '\n'
6154                      << "    into the interleave group with" << *B << '\n');
6155         InterleaveGroupMap[A] = Group;
6156 
6157         // Set the first load in program order as the insert position.
6158         if (A->mayReadFromMemory())
6159           Group->setInsertPos(A);
6160       }
6161     } // Iteration over A accesses.
6162   } // Iteration over B accesses.
6163 
6164   // Remove interleaved store groups with gaps.
6165   for (InterleaveGroup *Group : StoreGroups)
6166     if (Group->getNumMembers() != Group->getFactor()) {
6167       DEBUG(dbgs() << "LV: Invalidate candidate interleaved store group due "
6168                       "to gaps.\n");
6169       releaseGroup(Group);
6170     }
6171   // Remove interleaved groups with gaps (currently only loads) whose memory
6172   // accesses may wrap around. We have to revisit the getPtrStride analysis,
6173   // this time with ShouldCheckWrap=true, since collectConstStrideAccesses does
6174   // not check wrapping (see documentation there).
6175   // FORNOW we use Assume=false;
6176   // TODO: Change to Assume=true but making sure we don't exceed the threshold
6177   // of runtime SCEV assumptions checks (thereby potentially failing to
6178   // vectorize altogether).
6179   // Additional optional optimizations:
6180   // TODO: If we are peeling the loop and we know that the first pointer doesn't
6181   // wrap then we can deduce that all pointers in the group don't wrap.
6182   // This means that we can forcefully peel the loop in order to only have to
6183   // check the first pointer for no-wrap. When we'll change to use Assume=true
6184   // we'll only need at most one runtime check per interleaved group.
6185   for (InterleaveGroup *Group : LoadGroups) {
6186     // Case 1: A full group. Can Skip the checks; For full groups, if the wide
6187     // load would wrap around the address space we would do a memory access at
6188     // nullptr even without the transformation.
6189     if (Group->getNumMembers() == Group->getFactor())
6190       continue;
6191 
6192     // Case 2: If first and last members of the group don't wrap this implies
6193     // that all the pointers in the group don't wrap.
6194     // So we check only group member 0 (which is always guaranteed to exist),
6195     // and group member Factor - 1; If the latter doesn't exist we rely on
6196     // peeling (if it is a non-reveresed accsess -- see Case 3).
6197     Value *FirstMemberPtr = getPointerOperand(Group->getMember(0));
6198     if (!getPtrStride(PSE, FirstMemberPtr, TheLoop, Strides, /*Assume=*/false,
6199                       /*ShouldCheckWrap=*/true)) {
6200       DEBUG(dbgs() << "LV: Invalidate candidate interleaved group due to "
6201                       "first group member potentially pointer-wrapping.\n");
6202       releaseGroup(Group);
6203       continue;
6204     }
6205     Instruction *LastMember = Group->getMember(Group->getFactor() - 1);
6206     if (LastMember) {
6207       Value *LastMemberPtr = getPointerOperand(LastMember);
6208       if (!getPtrStride(PSE, LastMemberPtr, TheLoop, Strides, /*Assume=*/false,
6209                         /*ShouldCheckWrap=*/true)) {
6210         DEBUG(dbgs() << "LV: Invalidate candidate interleaved group due to "
6211                         "last group member potentially pointer-wrapping.\n");
6212         releaseGroup(Group);
6213       }
6214     } else {
6215       // Case 3: A non-reversed interleaved load group with gaps: We need
6216       // to execute at least one scalar epilogue iteration. This will ensure
6217       // we don't speculatively access memory out-of-bounds. We only need
6218       // to look for a member at index factor - 1, since every group must have
6219       // a member at index zero.
6220       if (Group->isReverse()) {
6221         DEBUG(dbgs() << "LV: Invalidate candidate interleaved group due to "
6222                         "a reverse access with gaps.\n");
6223         releaseGroup(Group);
6224         continue;
6225       }
6226       DEBUG(dbgs() << "LV: Interleaved group requires epilogue iteration.\n");
6227       RequiresScalarEpilogue = true;
6228     }
6229   }
6230 }
6231 
6232 Optional<unsigned> LoopVectorizationCostModel::computeMaxVF(bool OptForSize) {
6233   if (!EnableCondStoresVectorization && Legal->getNumPredStores()) {
6234     ORE->emit(createMissedAnalysis("ConditionalStore")
6235               << "store that is conditionally executed prevents vectorization");
6236     DEBUG(dbgs() << "LV: No vectorization. There are conditional stores.\n");
6237     return None;
6238   }
6239 
6240   if (Legal->getRuntimePointerChecking()->Need && TTI.hasBranchDivergence()) {
6241     // TODO: It may by useful to do since it's still likely to be dynamically
6242     // uniform if the target can skip.
6243     DEBUG(dbgs() << "LV: Not inserting runtime ptr check for divergent target");
6244 
6245     ORE->emit(
6246       createMissedAnalysis("CantVersionLoopWithDivergentTarget")
6247       << "runtime pointer checks needed. Not enabled for divergent target");
6248 
6249     return None;
6250   }
6251 
6252   unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop);
6253   if (!OptForSize) // Remaining checks deal with scalar loop when OptForSize.
6254     return computeFeasibleMaxVF(OptForSize, TC);
6255 
6256   if (Legal->getRuntimePointerChecking()->Need) {
6257     ORE->emit(createMissedAnalysis("CantVersionLoopWithOptForSize")
6258               << "runtime pointer checks needed. Enable vectorization of this "
6259                  "loop with '#pragma clang loop vectorize(enable)' when "
6260                  "compiling with -Os/-Oz");
6261     DEBUG(dbgs()
6262           << "LV: Aborting. Runtime ptr check is required with -Os/-Oz.\n");
6263     return None;
6264   }
6265 
6266   // If we optimize the program for size, avoid creating the tail loop.
6267   DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n');
6268 
6269   // If we don't know the precise trip count, don't try to vectorize.
6270   if (TC < 2) {
6271     ORE->emit(
6272         createMissedAnalysis("UnknownLoopCountComplexCFG")
6273         << "unable to calculate the loop count due to complex control flow");
6274     DEBUG(dbgs() << "LV: Aborting. A tail loop is required with -Os/-Oz.\n");
6275     return None;
6276   }
6277 
6278   unsigned MaxVF = computeFeasibleMaxVF(OptForSize, TC);
6279 
6280   if (TC % MaxVF != 0) {
6281     // If the trip count that we found modulo the vectorization factor is not
6282     // zero then we require a tail.
6283     // FIXME: look for a smaller MaxVF that does divide TC rather than give up.
6284     // FIXME: return None if loop requiresScalarEpilog(<MaxVF>), or look for a
6285     //        smaller MaxVF that does not require a scalar epilog.
6286 
6287     ORE->emit(createMissedAnalysis("NoTailLoopWithOptForSize")
6288               << "cannot optimize for size and vectorize at the "
6289                  "same time. Enable vectorization of this loop "
6290                  "with '#pragma clang loop vectorize(enable)' "
6291                  "when compiling with -Os/-Oz");
6292     DEBUG(dbgs() << "LV: Aborting. A tail loop is required with -Os/-Oz.\n");
6293     return None;
6294   }
6295 
6296   return MaxVF;
6297 }
6298 
6299 unsigned
6300 LoopVectorizationCostModel::computeFeasibleMaxVF(bool OptForSize,
6301                                                  unsigned ConstTripCount) {
6302   MinBWs = computeMinimumValueSizes(TheLoop->getBlocks(), *DB, &TTI);
6303   unsigned SmallestType, WidestType;
6304   std::tie(SmallestType, WidestType) = getSmallestAndWidestTypes();
6305   unsigned WidestRegister = TTI.getRegisterBitWidth(true);
6306 
6307   // Get the maximum safe dependence distance in bits computed by LAA.
6308   // It is computed by MaxVF * sizeOf(type) * 8, where type is taken from
6309   // the memory accesses that is most restrictive (involved in the smallest
6310   // dependence distance).
6311   unsigned MaxSafeRegisterWidth = Legal->getMaxSafeRegisterWidth();
6312 
6313   WidestRegister = std::min(WidestRegister, MaxSafeRegisterWidth);
6314 
6315   unsigned MaxVectorSize = WidestRegister / WidestType;
6316 
6317   DEBUG(dbgs() << "LV: The Smallest and Widest types: " << SmallestType << " / "
6318                << WidestType << " bits.\n");
6319   DEBUG(dbgs() << "LV: The Widest register safe to use is: " << WidestRegister
6320                << " bits.\n");
6321 
6322   assert(MaxVectorSize <= 64 && "Did not expect to pack so many elements"
6323                                 " into one vector!");
6324   if (MaxVectorSize == 0) {
6325     DEBUG(dbgs() << "LV: The target has no vector registers.\n");
6326     MaxVectorSize = 1;
6327     return MaxVectorSize;
6328   } else if (ConstTripCount && ConstTripCount < MaxVectorSize &&
6329              isPowerOf2_32(ConstTripCount)) {
6330     // We need to clamp the VF to be the ConstTripCount. There is no point in
6331     // choosing a higher viable VF as done in the loop below.
6332     DEBUG(dbgs() << "LV: Clamping the MaxVF to the constant trip count: "
6333                  << ConstTripCount << "\n");
6334     MaxVectorSize = ConstTripCount;
6335     return MaxVectorSize;
6336   }
6337 
6338   unsigned MaxVF = MaxVectorSize;
6339   if (MaximizeBandwidth && !OptForSize) {
6340     // Collect all viable vectorization factors larger than the default MaxVF
6341     // (i.e. MaxVectorSize).
6342     SmallVector<unsigned, 8> VFs;
6343     unsigned NewMaxVectorSize = WidestRegister / SmallestType;
6344     for (unsigned VS = MaxVectorSize * 2; VS <= NewMaxVectorSize; VS *= 2)
6345       VFs.push_back(VS);
6346 
6347     // For each VF calculate its register usage.
6348     auto RUs = calculateRegisterUsage(VFs);
6349 
6350     // Select the largest VF which doesn't require more registers than existing
6351     // ones.
6352     unsigned TargetNumRegisters = TTI.getNumberOfRegisters(true);
6353     for (int i = RUs.size() - 1; i >= 0; --i) {
6354       if (RUs[i].MaxLocalUsers <= TargetNumRegisters) {
6355         MaxVF = VFs[i];
6356         break;
6357       }
6358     }
6359   }
6360   return MaxVF;
6361 }
6362 
6363 LoopVectorizationCostModel::VectorizationFactor
6364 LoopVectorizationCostModel::selectVectorizationFactor(unsigned MaxVF) {
6365   float Cost = expectedCost(1).first;
6366 #ifndef NDEBUG
6367   const float ScalarCost = Cost;
6368 #endif /* NDEBUG */
6369   unsigned Width = 1;
6370   DEBUG(dbgs() << "LV: Scalar loop costs: " << (int)ScalarCost << ".\n");
6371 
6372   bool ForceVectorization = Hints->getForce() == LoopVectorizeHints::FK_Enabled;
6373   // Ignore scalar width, because the user explicitly wants vectorization.
6374   if (ForceVectorization && MaxVF > 1) {
6375     Width = 2;
6376     Cost = expectedCost(Width).first / (float)Width;
6377   }
6378 
6379   for (unsigned i = 2; i <= MaxVF; i *= 2) {
6380     // Notice that the vector loop needs to be executed less times, so
6381     // we need to divide the cost of the vector loops by the width of
6382     // the vector elements.
6383     VectorizationCostTy C = expectedCost(i);
6384     float VectorCost = C.first / (float)i;
6385     DEBUG(dbgs() << "LV: Vector loop of width " << i
6386                  << " costs: " << (int)VectorCost << ".\n");
6387     if (!C.second && !ForceVectorization) {
6388       DEBUG(
6389           dbgs() << "LV: Not considering vector loop of width " << i
6390                  << " because it will not generate any vector instructions.\n");
6391       continue;
6392     }
6393     if (VectorCost < Cost) {
6394       Cost = VectorCost;
6395       Width = i;
6396     }
6397   }
6398 
6399   DEBUG(if (ForceVectorization && Width > 1 && Cost >= ScalarCost) dbgs()
6400         << "LV: Vectorization seems to be not beneficial, "
6401         << "but was forced by a user.\n");
6402   DEBUG(dbgs() << "LV: Selecting VF: " << Width << ".\n");
6403   VectorizationFactor Factor = {Width, (unsigned)(Width * Cost)};
6404   return Factor;
6405 }
6406 
6407 std::pair<unsigned, unsigned>
6408 LoopVectorizationCostModel::getSmallestAndWidestTypes() {
6409   unsigned MinWidth = -1U;
6410   unsigned MaxWidth = 8;
6411   const DataLayout &DL = TheFunction->getParent()->getDataLayout();
6412 
6413   // For each block.
6414   for (BasicBlock *BB : TheLoop->blocks()) {
6415     // For each instruction in the loop.
6416     for (Instruction &I : *BB) {
6417       Type *T = I.getType();
6418 
6419       // Skip ignored values.
6420       if (ValuesToIgnore.count(&I))
6421         continue;
6422 
6423       // Only examine Loads, Stores and PHINodes.
6424       if (!isa<LoadInst>(I) && !isa<StoreInst>(I) && !isa<PHINode>(I))
6425         continue;
6426 
6427       // Examine PHI nodes that are reduction variables. Update the type to
6428       // account for the recurrence type.
6429       if (auto *PN = dyn_cast<PHINode>(&I)) {
6430         if (!Legal->isReductionVariable(PN))
6431           continue;
6432         RecurrenceDescriptor RdxDesc = (*Legal->getReductionVars())[PN];
6433         T = RdxDesc.getRecurrenceType();
6434       }
6435 
6436       // Examine the stored values.
6437       if (auto *ST = dyn_cast<StoreInst>(&I))
6438         T = ST->getValueOperand()->getType();
6439 
6440       // Ignore loaded pointer types and stored pointer types that are not
6441       // vectorizable.
6442       //
6443       // FIXME: The check here attempts to predict whether a load or store will
6444       //        be vectorized. We only know this for certain after a VF has
6445       //        been selected. Here, we assume that if an access can be
6446       //        vectorized, it will be. We should also look at extending this
6447       //        optimization to non-pointer types.
6448       //
6449       if (T->isPointerTy() && !isConsecutiveLoadOrStore(&I) &&
6450           !Legal->isAccessInterleaved(&I) && !Legal->isLegalGatherOrScatter(&I))
6451         continue;
6452 
6453       MinWidth = std::min(MinWidth,
6454                           (unsigned)DL.getTypeSizeInBits(T->getScalarType()));
6455       MaxWidth = std::max(MaxWidth,
6456                           (unsigned)DL.getTypeSizeInBits(T->getScalarType()));
6457     }
6458   }
6459 
6460   return {MinWidth, MaxWidth};
6461 }
6462 
6463 unsigned LoopVectorizationCostModel::selectInterleaveCount(bool OptForSize,
6464                                                            unsigned VF,
6465                                                            unsigned LoopCost) {
6466   // -- The interleave heuristics --
6467   // We interleave the loop in order to expose ILP and reduce the loop overhead.
6468   // There are many micro-architectural considerations that we can't predict
6469   // at this level. For example, frontend pressure (on decode or fetch) due to
6470   // code size, or the number and capabilities of the execution ports.
6471   //
6472   // We use the following heuristics to select the interleave count:
6473   // 1. If the code has reductions, then we interleave to break the cross
6474   // iteration dependency.
6475   // 2. If the loop is really small, then we interleave to reduce the loop
6476   // overhead.
6477   // 3. We don't interleave if we think that we will spill registers to memory
6478   // due to the increased register pressure.
6479 
6480   // When we optimize for size, we don't interleave.
6481   if (OptForSize)
6482     return 1;
6483 
6484   // We used the distance for the interleave count.
6485   if (Legal->getMaxSafeDepDistBytes() != -1U)
6486     return 1;
6487 
6488   // Do not interleave loops with a relatively small trip count.
6489   unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop);
6490   if (TC > 1 && TC < TinyTripCountInterleaveThreshold)
6491     return 1;
6492 
6493   unsigned TargetNumRegisters = TTI.getNumberOfRegisters(VF > 1);
6494   DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters
6495                << " registers\n");
6496 
6497   if (VF == 1) {
6498     if (ForceTargetNumScalarRegs.getNumOccurrences() > 0)
6499       TargetNumRegisters = ForceTargetNumScalarRegs;
6500   } else {
6501     if (ForceTargetNumVectorRegs.getNumOccurrences() > 0)
6502       TargetNumRegisters = ForceTargetNumVectorRegs;
6503   }
6504 
6505   RegisterUsage R = calculateRegisterUsage({VF})[0];
6506   // We divide by these constants so assume that we have at least one
6507   // instruction that uses at least one register.
6508   R.MaxLocalUsers = std::max(R.MaxLocalUsers, 1U);
6509   R.NumInstructions = std::max(R.NumInstructions, 1U);
6510 
6511   // We calculate the interleave count using the following formula.
6512   // Subtract the number of loop invariants from the number of available
6513   // registers. These registers are used by all of the interleaved instances.
6514   // Next, divide the remaining registers by the number of registers that is
6515   // required by the loop, in order to estimate how many parallel instances
6516   // fit without causing spills. All of this is rounded down if necessary to be
6517   // a power of two. We want power of two interleave count to simplify any
6518   // addressing operations or alignment considerations.
6519   unsigned IC = PowerOf2Floor((TargetNumRegisters - R.LoopInvariantRegs) /
6520                               R.MaxLocalUsers);
6521 
6522   // Don't count the induction variable as interleaved.
6523   if (EnableIndVarRegisterHeur)
6524     IC = PowerOf2Floor((TargetNumRegisters - R.LoopInvariantRegs - 1) /
6525                        std::max(1U, (R.MaxLocalUsers - 1)));
6526 
6527   // Clamp the interleave ranges to reasonable counts.
6528   unsigned MaxInterleaveCount = TTI.getMaxInterleaveFactor(VF);
6529 
6530   // Check if the user has overridden the max.
6531   if (VF == 1) {
6532     if (ForceTargetMaxScalarInterleaveFactor.getNumOccurrences() > 0)
6533       MaxInterleaveCount = ForceTargetMaxScalarInterleaveFactor;
6534   } else {
6535     if (ForceTargetMaxVectorInterleaveFactor.getNumOccurrences() > 0)
6536       MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor;
6537   }
6538 
6539   // If we did not calculate the cost for VF (because the user selected the VF)
6540   // then we calculate the cost of VF here.
6541   if (LoopCost == 0)
6542     LoopCost = expectedCost(VF).first;
6543 
6544   // Clamp the calculated IC to be between the 1 and the max interleave count
6545   // that the target allows.
6546   if (IC > MaxInterleaveCount)
6547     IC = MaxInterleaveCount;
6548   else if (IC < 1)
6549     IC = 1;
6550 
6551   // Interleave if we vectorized this loop and there is a reduction that could
6552   // benefit from interleaving.
6553   if (VF > 1 && !Legal->getReductionVars()->empty()) {
6554     DEBUG(dbgs() << "LV: Interleaving because of reductions.\n");
6555     return IC;
6556   }
6557 
6558   // Note that if we've already vectorized the loop we will have done the
6559   // runtime check and so interleaving won't require further checks.
6560   bool InterleavingRequiresRuntimePointerCheck =
6561       (VF == 1 && Legal->getRuntimePointerChecking()->Need);
6562 
6563   // We want to interleave small loops in order to reduce the loop overhead and
6564   // potentially expose ILP opportunities.
6565   DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n');
6566   if (!InterleavingRequiresRuntimePointerCheck && LoopCost < SmallLoopCost) {
6567     // We assume that the cost overhead is 1 and we use the cost model
6568     // to estimate the cost of the loop and interleave until the cost of the
6569     // loop overhead is about 5% of the cost of the loop.
6570     unsigned SmallIC =
6571         std::min(IC, (unsigned)PowerOf2Floor(SmallLoopCost / LoopCost));
6572 
6573     // Interleave until store/load ports (estimated by max interleave count) are
6574     // saturated.
6575     unsigned NumStores = Legal->getNumStores();
6576     unsigned NumLoads = Legal->getNumLoads();
6577     unsigned StoresIC = IC / (NumStores ? NumStores : 1);
6578     unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1);
6579 
6580     // If we have a scalar reduction (vector reductions are already dealt with
6581     // by this point), we can increase the critical path length if the loop
6582     // we're interleaving is inside another loop. Limit, by default to 2, so the
6583     // critical path only gets increased by one reduction operation.
6584     if (!Legal->getReductionVars()->empty() && TheLoop->getLoopDepth() > 1) {
6585       unsigned F = static_cast<unsigned>(MaxNestedScalarReductionIC);
6586       SmallIC = std::min(SmallIC, F);
6587       StoresIC = std::min(StoresIC, F);
6588       LoadsIC = std::min(LoadsIC, F);
6589     }
6590 
6591     if (EnableLoadStoreRuntimeInterleave &&
6592         std::max(StoresIC, LoadsIC) > SmallIC) {
6593       DEBUG(dbgs() << "LV: Interleaving to saturate store or load ports.\n");
6594       return std::max(StoresIC, LoadsIC);
6595     }
6596 
6597     DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n");
6598     return SmallIC;
6599   }
6600 
6601   // Interleave if this is a large loop (small loops are already dealt with by
6602   // this point) that could benefit from interleaving.
6603   bool HasReductions = !Legal->getReductionVars()->empty();
6604   if (TTI.enableAggressiveInterleaving(HasReductions)) {
6605     DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n");
6606     return IC;
6607   }
6608 
6609   DEBUG(dbgs() << "LV: Not Interleaving.\n");
6610   return 1;
6611 }
6612 
6613 SmallVector<LoopVectorizationCostModel::RegisterUsage, 8>
6614 LoopVectorizationCostModel::calculateRegisterUsage(ArrayRef<unsigned> VFs) {
6615   // This function calculates the register usage by measuring the highest number
6616   // of values that are alive at a single location. Obviously, this is a very
6617   // rough estimation. We scan the loop in a topological order in order and
6618   // assign a number to each instruction. We use RPO to ensure that defs are
6619   // met before their users. We assume that each instruction that has in-loop
6620   // users starts an interval. We record every time that an in-loop value is
6621   // used, so we have a list of the first and last occurrences of each
6622   // instruction. Next, we transpose this data structure into a multi map that
6623   // holds the list of intervals that *end* at a specific location. This multi
6624   // map allows us to perform a linear search. We scan the instructions linearly
6625   // and record each time that a new interval starts, by placing it in a set.
6626   // If we find this value in the multi-map then we remove it from the set.
6627   // The max register usage is the maximum size of the set.
6628   // We also search for instructions that are defined outside the loop, but are
6629   // used inside the loop. We need this number separately from the max-interval
6630   // usage number because when we unroll, loop-invariant values do not take
6631   // more register.
6632   LoopBlocksDFS DFS(TheLoop);
6633   DFS.perform(LI);
6634 
6635   RegisterUsage RU;
6636   RU.NumInstructions = 0;
6637 
6638   // Each 'key' in the map opens a new interval. The values
6639   // of the map are the index of the 'last seen' usage of the
6640   // instruction that is the key.
6641   using IntervalMap = DenseMap<Instruction *, unsigned>;
6642 
6643   // Maps instruction to its index.
6644   DenseMap<unsigned, Instruction *> IdxToInstr;
6645   // Marks the end of each interval.
6646   IntervalMap EndPoint;
6647   // Saves the list of instruction indices that are used in the loop.
6648   SmallSet<Instruction *, 8> Ends;
6649   // Saves the list of values that are used in the loop but are
6650   // defined outside the loop, such as arguments and constants.
6651   SmallPtrSet<Value *, 8> LoopInvariants;
6652 
6653   unsigned Index = 0;
6654   for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) {
6655     RU.NumInstructions += BB->size();
6656     for (Instruction &I : *BB) {
6657       IdxToInstr[Index++] = &I;
6658 
6659       // Save the end location of each USE.
6660       for (Value *U : I.operands()) {
6661         auto *Instr = dyn_cast<Instruction>(U);
6662 
6663         // Ignore non-instruction values such as arguments, constants, etc.
6664         if (!Instr)
6665           continue;
6666 
6667         // If this instruction is outside the loop then record it and continue.
6668         if (!TheLoop->contains(Instr)) {
6669           LoopInvariants.insert(Instr);
6670           continue;
6671         }
6672 
6673         // Overwrite previous end points.
6674         EndPoint[Instr] = Index;
6675         Ends.insert(Instr);
6676       }
6677     }
6678   }
6679 
6680   // Saves the list of intervals that end with the index in 'key'.
6681   using InstrList = SmallVector<Instruction *, 2>;
6682   DenseMap<unsigned, InstrList> TransposeEnds;
6683 
6684   // Transpose the EndPoints to a list of values that end at each index.
6685   for (auto &Interval : EndPoint)
6686     TransposeEnds[Interval.second].push_back(Interval.first);
6687 
6688   SmallSet<Instruction *, 8> OpenIntervals;
6689 
6690   // Get the size of the widest register.
6691   unsigned MaxSafeDepDist = -1U;
6692   if (Legal->getMaxSafeDepDistBytes() != -1U)
6693     MaxSafeDepDist = Legal->getMaxSafeDepDistBytes() * 8;
6694   unsigned WidestRegister =
6695       std::min(TTI.getRegisterBitWidth(true), MaxSafeDepDist);
6696   const DataLayout &DL = TheFunction->getParent()->getDataLayout();
6697 
6698   SmallVector<RegisterUsage, 8> RUs(VFs.size());
6699   SmallVector<unsigned, 8> MaxUsages(VFs.size(), 0);
6700 
6701   DEBUG(dbgs() << "LV(REG): Calculating max register usage:\n");
6702 
6703   // A lambda that gets the register usage for the given type and VF.
6704   auto GetRegUsage = [&DL, WidestRegister](Type *Ty, unsigned VF) {
6705     if (Ty->isTokenTy())
6706       return 0U;
6707     unsigned TypeSize = DL.getTypeSizeInBits(Ty->getScalarType());
6708     return std::max<unsigned>(1, VF * TypeSize / WidestRegister);
6709   };
6710 
6711   for (unsigned int i = 0; i < Index; ++i) {
6712     Instruction *I = IdxToInstr[i];
6713 
6714     // Remove all of the instructions that end at this location.
6715     InstrList &List = TransposeEnds[i];
6716     for (Instruction *ToRemove : List)
6717       OpenIntervals.erase(ToRemove);
6718 
6719     // Ignore instructions that are never used within the loop.
6720     if (!Ends.count(I))
6721       continue;
6722 
6723     // Skip ignored values.
6724     if (ValuesToIgnore.count(I))
6725       continue;
6726 
6727     // For each VF find the maximum usage of registers.
6728     for (unsigned j = 0, e = VFs.size(); j < e; ++j) {
6729       if (VFs[j] == 1) {
6730         MaxUsages[j] = std::max(MaxUsages[j], OpenIntervals.size());
6731         continue;
6732       }
6733       collectUniformsAndScalars(VFs[j]);
6734       // Count the number of live intervals.
6735       unsigned RegUsage = 0;
6736       for (auto Inst : OpenIntervals) {
6737         // Skip ignored values for VF > 1.
6738         if (VecValuesToIgnore.count(Inst) ||
6739             isScalarAfterVectorization(Inst, VFs[j]))
6740           continue;
6741         RegUsage += GetRegUsage(Inst->getType(), VFs[j]);
6742       }
6743       MaxUsages[j] = std::max(MaxUsages[j], RegUsage);
6744     }
6745 
6746     DEBUG(dbgs() << "LV(REG): At #" << i << " Interval # "
6747                  << OpenIntervals.size() << '\n');
6748 
6749     // Add the current instruction to the list of open intervals.
6750     OpenIntervals.insert(I);
6751   }
6752 
6753   for (unsigned i = 0, e = VFs.size(); i < e; ++i) {
6754     unsigned Invariant = 0;
6755     if (VFs[i] == 1)
6756       Invariant = LoopInvariants.size();
6757     else {
6758       for (auto Inst : LoopInvariants)
6759         Invariant += GetRegUsage(Inst->getType(), VFs[i]);
6760     }
6761 
6762     DEBUG(dbgs() << "LV(REG): VF = " << VFs[i] << '\n');
6763     DEBUG(dbgs() << "LV(REG): Found max usage: " << MaxUsages[i] << '\n');
6764     DEBUG(dbgs() << "LV(REG): Found invariant usage: " << Invariant << '\n');
6765     DEBUG(dbgs() << "LV(REG): LoopSize: " << RU.NumInstructions << '\n');
6766 
6767     RU.LoopInvariantRegs = Invariant;
6768     RU.MaxLocalUsers = MaxUsages[i];
6769     RUs[i] = RU;
6770   }
6771 
6772   return RUs;
6773 }
6774 
6775 void LoopVectorizationCostModel::collectInstsToScalarize(unsigned VF) {
6776   // If we aren't vectorizing the loop, or if we've already collected the
6777   // instructions to scalarize, there's nothing to do. Collection may already
6778   // have occurred if we have a user-selected VF and are now computing the
6779   // expected cost for interleaving.
6780   if (VF < 2 || InstsToScalarize.count(VF))
6781     return;
6782 
6783   // Initialize a mapping for VF in InstsToScalalarize. If we find that it's
6784   // not profitable to scalarize any instructions, the presence of VF in the
6785   // map will indicate that we've analyzed it already.
6786   ScalarCostsTy &ScalarCostsVF = InstsToScalarize[VF];
6787 
6788   // Find all the instructions that are scalar with predication in the loop and
6789   // determine if it would be better to not if-convert the blocks they are in.
6790   // If so, we also record the instructions to scalarize.
6791   for (BasicBlock *BB : TheLoop->blocks()) {
6792     if (!Legal->blockNeedsPredication(BB))
6793       continue;
6794     for (Instruction &I : *BB)
6795       if (Legal->isScalarWithPredication(&I)) {
6796         ScalarCostsTy ScalarCosts;
6797         if (computePredInstDiscount(&I, ScalarCosts, VF) >= 0)
6798           ScalarCostsVF.insert(ScalarCosts.begin(), ScalarCosts.end());
6799 
6800         // Remember that BB will remain after vectorization.
6801         PredicatedBBsAfterVectorization.insert(BB);
6802       }
6803   }
6804 }
6805 
6806 int LoopVectorizationCostModel::computePredInstDiscount(
6807     Instruction *PredInst, DenseMap<Instruction *, unsigned> &ScalarCosts,
6808     unsigned VF) {
6809   assert(!isUniformAfterVectorization(PredInst, VF) &&
6810          "Instruction marked uniform-after-vectorization will be predicated");
6811 
6812   // Initialize the discount to zero, meaning that the scalar version and the
6813   // vector version cost the same.
6814   int Discount = 0;
6815 
6816   // Holds instructions to analyze. The instructions we visit are mapped in
6817   // ScalarCosts. Those instructions are the ones that would be scalarized if
6818   // we find that the scalar version costs less.
6819   SmallVector<Instruction *, 8> Worklist;
6820 
6821   // Returns true if the given instruction can be scalarized.
6822   auto canBeScalarized = [&](Instruction *I) -> bool {
6823     // We only attempt to scalarize instructions forming a single-use chain
6824     // from the original predicated block that would otherwise be vectorized.
6825     // Although not strictly necessary, we give up on instructions we know will
6826     // already be scalar to avoid traversing chains that are unlikely to be
6827     // beneficial.
6828     if (!I->hasOneUse() || PredInst->getParent() != I->getParent() ||
6829         isScalarAfterVectorization(I, VF))
6830       return false;
6831 
6832     // If the instruction is scalar with predication, it will be analyzed
6833     // separately. We ignore it within the context of PredInst.
6834     if (Legal->isScalarWithPredication(I))
6835       return false;
6836 
6837     // If any of the instruction's operands are uniform after vectorization,
6838     // the instruction cannot be scalarized. This prevents, for example, a
6839     // masked load from being scalarized.
6840     //
6841     // We assume we will only emit a value for lane zero of an instruction
6842     // marked uniform after vectorization, rather than VF identical values.
6843     // Thus, if we scalarize an instruction that uses a uniform, we would
6844     // create uses of values corresponding to the lanes we aren't emitting code
6845     // for. This behavior can be changed by allowing getScalarValue to clone
6846     // the lane zero values for uniforms rather than asserting.
6847     for (Use &U : I->operands())
6848       if (auto *J = dyn_cast<Instruction>(U.get()))
6849         if (isUniformAfterVectorization(J, VF))
6850           return false;
6851 
6852     // Otherwise, we can scalarize the instruction.
6853     return true;
6854   };
6855 
6856   // Returns true if an operand that cannot be scalarized must be extracted
6857   // from a vector. We will account for this scalarization overhead below. Note
6858   // that the non-void predicated instructions are placed in their own blocks,
6859   // and their return values are inserted into vectors. Thus, an extract would
6860   // still be required.
6861   auto needsExtract = [&](Instruction *I) -> bool {
6862     return TheLoop->contains(I) && !isScalarAfterVectorization(I, VF);
6863   };
6864 
6865   // Compute the expected cost discount from scalarizing the entire expression
6866   // feeding the predicated instruction. We currently only consider expressions
6867   // that are single-use instruction chains.
6868   Worklist.push_back(PredInst);
6869   while (!Worklist.empty()) {
6870     Instruction *I = Worklist.pop_back_val();
6871 
6872     // If we've already analyzed the instruction, there's nothing to do.
6873     if (ScalarCosts.count(I))
6874       continue;
6875 
6876     // Compute the cost of the vector instruction. Note that this cost already
6877     // includes the scalarization overhead of the predicated instruction.
6878     unsigned VectorCost = getInstructionCost(I, VF).first;
6879 
6880     // Compute the cost of the scalarized instruction. This cost is the cost of
6881     // the instruction as if it wasn't if-converted and instead remained in the
6882     // predicated block. We will scale this cost by block probability after
6883     // computing the scalarization overhead.
6884     unsigned ScalarCost = VF * getInstructionCost(I, 1).first;
6885 
6886     // Compute the scalarization overhead of needed insertelement instructions
6887     // and phi nodes.
6888     if (Legal->isScalarWithPredication(I) && !I->getType()->isVoidTy()) {
6889       ScalarCost += TTI.getScalarizationOverhead(ToVectorTy(I->getType(), VF),
6890                                                  true, false);
6891       ScalarCost += VF * TTI.getCFInstrCost(Instruction::PHI);
6892     }
6893 
6894     // Compute the scalarization overhead of needed extractelement
6895     // instructions. For each of the instruction's operands, if the operand can
6896     // be scalarized, add it to the worklist; otherwise, account for the
6897     // overhead.
6898     for (Use &U : I->operands())
6899       if (auto *J = dyn_cast<Instruction>(U.get())) {
6900         assert(VectorType::isValidElementType(J->getType()) &&
6901                "Instruction has non-scalar type");
6902         if (canBeScalarized(J))
6903           Worklist.push_back(J);
6904         else if (needsExtract(J))
6905           ScalarCost += TTI.getScalarizationOverhead(
6906                               ToVectorTy(J->getType(),VF), false, true);
6907       }
6908 
6909     // Scale the total scalar cost by block probability.
6910     ScalarCost /= getReciprocalPredBlockProb();
6911 
6912     // Compute the discount. A non-negative discount means the vector version
6913     // of the instruction costs more, and scalarizing would be beneficial.
6914     Discount += VectorCost - ScalarCost;
6915     ScalarCosts[I] = ScalarCost;
6916   }
6917 
6918   return Discount;
6919 }
6920 
6921 LoopVectorizationCostModel::VectorizationCostTy
6922 LoopVectorizationCostModel::expectedCost(unsigned VF) {
6923   VectorizationCostTy Cost;
6924 
6925   // For each block.
6926   for (BasicBlock *BB : TheLoop->blocks()) {
6927     VectorizationCostTy BlockCost;
6928 
6929     // For each instruction in the old loop.
6930     for (Instruction &I : *BB) {
6931       // Skip dbg intrinsics.
6932       if (isa<DbgInfoIntrinsic>(I))
6933         continue;
6934 
6935       // Skip ignored values.
6936       if (ValuesToIgnore.count(&I) ||
6937           (VF > 1 && VecValuesToIgnore.count(&I)))
6938         continue;
6939 
6940       VectorizationCostTy C = getInstructionCost(&I, VF);
6941 
6942       // Check if we should override the cost.
6943       if (ForceTargetInstructionCost.getNumOccurrences() > 0)
6944         C.first = ForceTargetInstructionCost;
6945 
6946       BlockCost.first += C.first;
6947       BlockCost.second |= C.second;
6948       DEBUG(dbgs() << "LV: Found an estimated cost of " << C.first << " for VF "
6949                    << VF << " For instruction: " << I << '\n');
6950     }
6951 
6952     // If we are vectorizing a predicated block, it will have been
6953     // if-converted. This means that the block's instructions (aside from
6954     // stores and instructions that may divide by zero) will now be
6955     // unconditionally executed. For the scalar case, we may not always execute
6956     // the predicated block. Thus, scale the block's cost by the probability of
6957     // executing it.
6958     if (VF == 1 && Legal->blockNeedsPredication(BB))
6959       BlockCost.first /= getReciprocalPredBlockProb();
6960 
6961     Cost.first += BlockCost.first;
6962     Cost.second |= BlockCost.second;
6963   }
6964 
6965   return Cost;
6966 }
6967 
6968 /// \brief Gets Address Access SCEV after verifying that the access pattern
6969 /// is loop invariant except the induction variable dependence.
6970 ///
6971 /// This SCEV can be sent to the Target in order to estimate the address
6972 /// calculation cost.
6973 static const SCEV *getAddressAccessSCEV(
6974               Value *Ptr,
6975               LoopVectorizationLegality *Legal,
6976               PredicatedScalarEvolution &PSE,
6977               const Loop *TheLoop) {
6978 
6979   auto *Gep = dyn_cast<GetElementPtrInst>(Ptr);
6980   if (!Gep)
6981     return nullptr;
6982 
6983   // We are looking for a gep with all loop invariant indices except for one
6984   // which should be an induction variable.
6985   auto SE = PSE.getSE();
6986   unsigned NumOperands = Gep->getNumOperands();
6987   for (unsigned i = 1; i < NumOperands; ++i) {
6988     Value *Opd = Gep->getOperand(i);
6989     if (!SE->isLoopInvariant(SE->getSCEV(Opd), TheLoop) &&
6990         !Legal->isInductionVariable(Opd))
6991       return nullptr;
6992   }
6993 
6994   // Now we know we have a GEP ptr, %inv, %ind, %inv. return the Ptr SCEV.
6995   return PSE.getSCEV(Ptr);
6996 }
6997 
6998 static bool isStrideMul(Instruction *I, LoopVectorizationLegality *Legal) {
6999   return Legal->hasStride(I->getOperand(0)) ||
7000          Legal->hasStride(I->getOperand(1));
7001 }
7002 
7003 unsigned LoopVectorizationCostModel::getMemInstScalarizationCost(Instruction *I,
7004                                                                  unsigned VF) {
7005   Type *ValTy = getMemInstValueType(I);
7006   auto SE = PSE.getSE();
7007 
7008   unsigned Alignment = getMemInstAlignment(I);
7009   unsigned AS = getMemInstAddressSpace(I);
7010   Value *Ptr = getPointerOperand(I);
7011   Type *PtrTy = ToVectorTy(Ptr->getType(), VF);
7012 
7013   // Figure out whether the access is strided and get the stride value
7014   // if it's known in compile time
7015   const SCEV *PtrSCEV = getAddressAccessSCEV(Ptr, Legal, PSE, TheLoop);
7016 
7017   // Get the cost of the scalar memory instruction and address computation.
7018   unsigned Cost = VF * TTI.getAddressComputationCost(PtrTy, SE, PtrSCEV);
7019 
7020   Cost += VF *
7021           TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), Alignment,
7022                               AS, I);
7023 
7024   // Get the overhead of the extractelement and insertelement instructions
7025   // we might create due to scalarization.
7026   Cost += getScalarizationOverhead(I, VF, TTI);
7027 
7028   // If we have a predicated store, it may not be executed for each vector
7029   // lane. Scale the cost by the probability of executing the predicated
7030   // block.
7031   if (Legal->isScalarWithPredication(I))
7032     Cost /= getReciprocalPredBlockProb();
7033 
7034   return Cost;
7035 }
7036 
7037 unsigned LoopVectorizationCostModel::getConsecutiveMemOpCost(Instruction *I,
7038                                                              unsigned VF) {
7039   Type *ValTy = getMemInstValueType(I);
7040   Type *VectorTy = ToVectorTy(ValTy, VF);
7041   unsigned Alignment = getMemInstAlignment(I);
7042   Value *Ptr = getPointerOperand(I);
7043   unsigned AS = getMemInstAddressSpace(I);
7044   int ConsecutiveStride = Legal->isConsecutivePtr(Ptr);
7045 
7046   assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) &&
7047          "Stride should be 1 or -1 for consecutive memory access");
7048   unsigned Cost = 0;
7049   if (Legal->isMaskRequired(I))
7050     Cost += TTI.getMaskedMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS);
7051   else
7052     Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS, I);
7053 
7054   bool Reverse = ConsecutiveStride < 0;
7055   if (Reverse)
7056     Cost += TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, 0);
7057   return Cost;
7058 }
7059 
7060 unsigned LoopVectorizationCostModel::getUniformMemOpCost(Instruction *I,
7061                                                          unsigned VF) {
7062   LoadInst *LI = cast<LoadInst>(I);
7063   Type *ValTy = LI->getType();
7064   Type *VectorTy = ToVectorTy(ValTy, VF);
7065   unsigned Alignment = LI->getAlignment();
7066   unsigned AS = LI->getPointerAddressSpace();
7067 
7068   return TTI.getAddressComputationCost(ValTy) +
7069          TTI.getMemoryOpCost(Instruction::Load, ValTy, Alignment, AS) +
7070          TTI.getShuffleCost(TargetTransformInfo::SK_Broadcast, VectorTy);
7071 }
7072 
7073 unsigned LoopVectorizationCostModel::getGatherScatterCost(Instruction *I,
7074                                                           unsigned VF) {
7075   Type *ValTy = getMemInstValueType(I);
7076   Type *VectorTy = ToVectorTy(ValTy, VF);
7077   unsigned Alignment = getMemInstAlignment(I);
7078   Value *Ptr = getPointerOperand(I);
7079 
7080   return TTI.getAddressComputationCost(VectorTy) +
7081          TTI.getGatherScatterOpCost(I->getOpcode(), VectorTy, Ptr,
7082                                     Legal->isMaskRequired(I), Alignment);
7083 }
7084 
7085 unsigned LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I,
7086                                                             unsigned VF) {
7087   Type *ValTy = getMemInstValueType(I);
7088   Type *VectorTy = ToVectorTy(ValTy, VF);
7089   unsigned AS = getMemInstAddressSpace(I);
7090 
7091   auto Group = Legal->getInterleavedAccessGroup(I);
7092   assert(Group && "Fail to get an interleaved access group.");
7093 
7094   unsigned InterleaveFactor = Group->getFactor();
7095   Type *WideVecTy = VectorType::get(ValTy, VF * InterleaveFactor);
7096 
7097   // Holds the indices of existing members in an interleaved load group.
7098   // An interleaved store group doesn't need this as it doesn't allow gaps.
7099   SmallVector<unsigned, 4> Indices;
7100   if (isa<LoadInst>(I)) {
7101     for (unsigned i = 0; i < InterleaveFactor; i++)
7102       if (Group->getMember(i))
7103         Indices.push_back(i);
7104   }
7105 
7106   // Calculate the cost of the whole interleaved group.
7107   unsigned Cost = TTI.getInterleavedMemoryOpCost(I->getOpcode(), WideVecTy,
7108                                                  Group->getFactor(), Indices,
7109                                                  Group->getAlignment(), AS);
7110 
7111   if (Group->isReverse())
7112     Cost += Group->getNumMembers() *
7113             TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, 0);
7114   return Cost;
7115 }
7116 
7117 unsigned LoopVectorizationCostModel::getMemoryInstructionCost(Instruction *I,
7118                                                               unsigned VF) {
7119   // Calculate scalar cost only. Vectorization cost should be ready at this
7120   // moment.
7121   if (VF == 1) {
7122     Type *ValTy = getMemInstValueType(I);
7123     unsigned Alignment = getMemInstAlignment(I);
7124     unsigned AS = getMemInstAddressSpace(I);
7125 
7126     return TTI.getAddressComputationCost(ValTy) +
7127            TTI.getMemoryOpCost(I->getOpcode(), ValTy, Alignment, AS, I);
7128   }
7129   return getWideningCost(I, VF);
7130 }
7131 
7132 LoopVectorizationCostModel::VectorizationCostTy
7133 LoopVectorizationCostModel::getInstructionCost(Instruction *I, unsigned VF) {
7134   // If we know that this instruction will remain uniform, check the cost of
7135   // the scalar version.
7136   if (isUniformAfterVectorization(I, VF))
7137     VF = 1;
7138 
7139   if (VF > 1 && isProfitableToScalarize(I, VF))
7140     return VectorizationCostTy(InstsToScalarize[VF][I], false);
7141 
7142   // Forced scalars do not have any scalarization overhead.
7143   if (VF > 1 && ForcedScalars.count(VF) &&
7144       ForcedScalars.find(VF)->second.count(I))
7145     return VectorizationCostTy((getInstructionCost(I, 1).first * VF), false);
7146 
7147   Type *VectorTy;
7148   unsigned C = getInstructionCost(I, VF, VectorTy);
7149 
7150   bool TypeNotScalarized =
7151       VF > 1 && VectorTy->isVectorTy() && TTI.getNumberOfParts(VectorTy) < VF;
7152   return VectorizationCostTy(C, TypeNotScalarized);
7153 }
7154 
7155 void LoopVectorizationCostModel::setCostBasedWideningDecision(unsigned VF) {
7156   if (VF == 1)
7157     return;
7158   for (BasicBlock *BB : TheLoop->blocks()) {
7159     // For each instruction in the old loop.
7160     for (Instruction &I : *BB) {
7161       Value *Ptr = getPointerOperand(&I);
7162       if (!Ptr)
7163         continue;
7164 
7165       if (isa<LoadInst>(&I) && Legal->isUniform(Ptr)) {
7166         // Scalar load + broadcast
7167         unsigned Cost = getUniformMemOpCost(&I, VF);
7168         setWideningDecision(&I, VF, CM_Scalarize, Cost);
7169         continue;
7170       }
7171 
7172       // We assume that widening is the best solution when possible.
7173       if (Legal->memoryInstructionCanBeWidened(&I, VF)) {
7174         unsigned Cost = getConsecutiveMemOpCost(&I, VF);
7175         int ConsecutiveStride = Legal->isConsecutivePtr(getPointerOperand(&I));
7176         assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) &&
7177                "Expected consecutive stride.");
7178         InstWidening Decision =
7179             ConsecutiveStride == 1 ? CM_Widen : CM_Widen_Reverse;
7180         setWideningDecision(&I, VF, Decision, Cost);
7181         continue;
7182       }
7183 
7184       // Choose between Interleaving, Gather/Scatter or Scalarization.
7185       unsigned InterleaveCost = std::numeric_limits<unsigned>::max();
7186       unsigned NumAccesses = 1;
7187       if (Legal->isAccessInterleaved(&I)) {
7188         auto Group = Legal->getInterleavedAccessGroup(&I);
7189         assert(Group && "Fail to get an interleaved access group.");
7190 
7191         // Make one decision for the whole group.
7192         if (getWideningDecision(&I, VF) != CM_Unknown)
7193           continue;
7194 
7195         NumAccesses = Group->getNumMembers();
7196         InterleaveCost = getInterleaveGroupCost(&I, VF);
7197       }
7198 
7199       unsigned GatherScatterCost =
7200           Legal->isLegalGatherOrScatter(&I)
7201               ? getGatherScatterCost(&I, VF) * NumAccesses
7202               : std::numeric_limits<unsigned>::max();
7203 
7204       unsigned ScalarizationCost =
7205           getMemInstScalarizationCost(&I, VF) * NumAccesses;
7206 
7207       // Choose better solution for the current VF,
7208       // write down this decision and use it during vectorization.
7209       unsigned Cost;
7210       InstWidening Decision;
7211       if (InterleaveCost <= GatherScatterCost &&
7212           InterleaveCost < ScalarizationCost) {
7213         Decision = CM_Interleave;
7214         Cost = InterleaveCost;
7215       } else if (GatherScatterCost < ScalarizationCost) {
7216         Decision = CM_GatherScatter;
7217         Cost = GatherScatterCost;
7218       } else {
7219         Decision = CM_Scalarize;
7220         Cost = ScalarizationCost;
7221       }
7222       // If the instructions belongs to an interleave group, the whole group
7223       // receives the same decision. The whole group receives the cost, but
7224       // the cost will actually be assigned to one instruction.
7225       if (auto Group = Legal->getInterleavedAccessGroup(&I))
7226         setWideningDecision(Group, VF, Decision, Cost);
7227       else
7228         setWideningDecision(&I, VF, Decision, Cost);
7229     }
7230   }
7231 
7232   // Make sure that any load of address and any other address computation
7233   // remains scalar unless there is gather/scatter support. This avoids
7234   // inevitable extracts into address registers, and also has the benefit of
7235   // activating LSR more, since that pass can't optimize vectorized
7236   // addresses.
7237   if (TTI.prefersVectorizedAddressing())
7238     return;
7239 
7240   // Start with all scalar pointer uses.
7241   SmallPtrSet<Instruction *, 8> AddrDefs;
7242   for (BasicBlock *BB : TheLoop->blocks())
7243     for (Instruction &I : *BB) {
7244       Instruction *PtrDef =
7245         dyn_cast_or_null<Instruction>(getPointerOperand(&I));
7246       if (PtrDef && TheLoop->contains(PtrDef) &&
7247           getWideningDecision(&I, VF) != CM_GatherScatter)
7248         AddrDefs.insert(PtrDef);
7249     }
7250 
7251   // Add all instructions used to generate the addresses.
7252   SmallVector<Instruction *, 4> Worklist;
7253   for (auto *I : AddrDefs)
7254     Worklist.push_back(I);
7255   while (!Worklist.empty()) {
7256     Instruction *I = Worklist.pop_back_val();
7257     for (auto &Op : I->operands())
7258       if (auto *InstOp = dyn_cast<Instruction>(Op))
7259         if ((InstOp->getParent() == I->getParent()) && !isa<PHINode>(InstOp) &&
7260             AddrDefs.insert(InstOp).second)
7261           Worklist.push_back(InstOp);
7262   }
7263 
7264   for (auto *I : AddrDefs) {
7265     if (isa<LoadInst>(I)) {
7266       // Setting the desired widening decision should ideally be handled in
7267       // by cost functions, but since this involves the task of finding out
7268       // if the loaded register is involved in an address computation, it is
7269       // instead changed here when we know this is the case.
7270       InstWidening Decision = getWideningDecision(I, VF);
7271       if (Decision == CM_Widen || Decision == CM_Widen_Reverse)
7272         // Scalarize a widened load of address.
7273         setWideningDecision(I, VF, CM_Scalarize,
7274                             (VF * getMemoryInstructionCost(I, 1)));
7275       else if (auto Group = Legal->getInterleavedAccessGroup(I)) {
7276         // Scalarize an interleave group of address loads.
7277         for (unsigned I = 0; I < Group->getFactor(); ++I) {
7278           if (Instruction *Member = Group->getMember(I))
7279             setWideningDecision(Member, VF, CM_Scalarize,
7280                                 (VF * getMemoryInstructionCost(Member, 1)));
7281         }
7282       }
7283     } else
7284       // Make sure I gets scalarized and a cost estimate without
7285       // scalarization overhead.
7286       ForcedScalars[VF].insert(I);
7287   }
7288 }
7289 
7290 unsigned LoopVectorizationCostModel::getInstructionCost(Instruction *I,
7291                                                         unsigned VF,
7292                                                         Type *&VectorTy) {
7293   Type *RetTy = I->getType();
7294   if (canTruncateToMinimalBitwidth(I, VF))
7295     RetTy = IntegerType::get(RetTy->getContext(), MinBWs[I]);
7296   VectorTy = isScalarAfterVectorization(I, VF) ? RetTy : ToVectorTy(RetTy, VF);
7297   auto SE = PSE.getSE();
7298 
7299   // TODO: We need to estimate the cost of intrinsic calls.
7300   switch (I->getOpcode()) {
7301   case Instruction::GetElementPtr:
7302     // We mark this instruction as zero-cost because the cost of GEPs in
7303     // vectorized code depends on whether the corresponding memory instruction
7304     // is scalarized or not. Therefore, we handle GEPs with the memory
7305     // instruction cost.
7306     return 0;
7307   case Instruction::Br: {
7308     // In cases of scalarized and predicated instructions, there will be VF
7309     // predicated blocks in the vectorized loop. Each branch around these
7310     // blocks requires also an extract of its vector compare i1 element.
7311     bool ScalarPredicatedBB = false;
7312     BranchInst *BI = cast<BranchInst>(I);
7313     if (VF > 1 && BI->isConditional() &&
7314         (PredicatedBBsAfterVectorization.count(BI->getSuccessor(0)) ||
7315          PredicatedBBsAfterVectorization.count(BI->getSuccessor(1))))
7316       ScalarPredicatedBB = true;
7317 
7318     if (ScalarPredicatedBB) {
7319       // Return cost for branches around scalarized and predicated blocks.
7320       Type *Vec_i1Ty =
7321           VectorType::get(IntegerType::getInt1Ty(RetTy->getContext()), VF);
7322       return (TTI.getScalarizationOverhead(Vec_i1Ty, false, true) +
7323               (TTI.getCFInstrCost(Instruction::Br) * VF));
7324     } else if (I->getParent() == TheLoop->getLoopLatch() || VF == 1)
7325       // The back-edge branch will remain, as will all scalar branches.
7326       return TTI.getCFInstrCost(Instruction::Br);
7327     else
7328       // This branch will be eliminated by if-conversion.
7329       return 0;
7330     // Note: We currently assume zero cost for an unconditional branch inside
7331     // a predicated block since it will become a fall-through, although we
7332     // may decide in the future to call TTI for all branches.
7333   }
7334   case Instruction::PHI: {
7335     auto *Phi = cast<PHINode>(I);
7336 
7337     // First-order recurrences are replaced by vector shuffles inside the loop.
7338     if (VF > 1 && Legal->isFirstOrderRecurrence(Phi))
7339       return TTI.getShuffleCost(TargetTransformInfo::SK_ExtractSubvector,
7340                                 VectorTy, VF - 1, VectorTy);
7341 
7342     // Phi nodes in non-header blocks (not inductions, reductions, etc.) are
7343     // converted into select instructions. We require N - 1 selects per phi
7344     // node, where N is the number of incoming values.
7345     if (VF > 1 && Phi->getParent() != TheLoop->getHeader())
7346       return (Phi->getNumIncomingValues() - 1) *
7347              TTI.getCmpSelInstrCost(
7348                  Instruction::Select, ToVectorTy(Phi->getType(), VF),
7349                  ToVectorTy(Type::getInt1Ty(Phi->getContext()), VF));
7350 
7351     return TTI.getCFInstrCost(Instruction::PHI);
7352   }
7353   case Instruction::UDiv:
7354   case Instruction::SDiv:
7355   case Instruction::URem:
7356   case Instruction::SRem:
7357     // If we have a predicated instruction, it may not be executed for each
7358     // vector lane. Get the scalarization cost and scale this amount by the
7359     // probability of executing the predicated block. If the instruction is not
7360     // predicated, we fall through to the next case.
7361     if (VF > 1 && Legal->isScalarWithPredication(I)) {
7362       unsigned Cost = 0;
7363 
7364       // These instructions have a non-void type, so account for the phi nodes
7365       // that we will create. This cost is likely to be zero. The phi node
7366       // cost, if any, should be scaled by the block probability because it
7367       // models a copy at the end of each predicated block.
7368       Cost += VF * TTI.getCFInstrCost(Instruction::PHI);
7369 
7370       // The cost of the non-predicated instruction.
7371       Cost += VF * TTI.getArithmeticInstrCost(I->getOpcode(), RetTy);
7372 
7373       // The cost of insertelement and extractelement instructions needed for
7374       // scalarization.
7375       Cost += getScalarizationOverhead(I, VF, TTI);
7376 
7377       // Scale the cost by the probability of executing the predicated blocks.
7378       // This assumes the predicated block for each vector lane is equally
7379       // likely.
7380       return Cost / getReciprocalPredBlockProb();
7381     }
7382     LLVM_FALLTHROUGH;
7383   case Instruction::Add:
7384   case Instruction::FAdd:
7385   case Instruction::Sub:
7386   case Instruction::FSub:
7387   case Instruction::Mul:
7388   case Instruction::FMul:
7389   case Instruction::FDiv:
7390   case Instruction::FRem:
7391   case Instruction::Shl:
7392   case Instruction::LShr:
7393   case Instruction::AShr:
7394   case Instruction::And:
7395   case Instruction::Or:
7396   case Instruction::Xor: {
7397     // Since we will replace the stride by 1 the multiplication should go away.
7398     if (I->getOpcode() == Instruction::Mul && isStrideMul(I, Legal))
7399       return 0;
7400     // Certain instructions can be cheaper to vectorize if they have a constant
7401     // second vector operand. One example of this are shifts on x86.
7402     TargetTransformInfo::OperandValueKind Op1VK =
7403         TargetTransformInfo::OK_AnyValue;
7404     TargetTransformInfo::OperandValueKind Op2VK =
7405         TargetTransformInfo::OK_AnyValue;
7406     TargetTransformInfo::OperandValueProperties Op1VP =
7407         TargetTransformInfo::OP_None;
7408     TargetTransformInfo::OperandValueProperties Op2VP =
7409         TargetTransformInfo::OP_None;
7410     Value *Op2 = I->getOperand(1);
7411 
7412     // Check for a splat or for a non uniform vector of constants.
7413     if (isa<ConstantInt>(Op2)) {
7414       ConstantInt *CInt = cast<ConstantInt>(Op2);
7415       if (CInt && CInt->getValue().isPowerOf2())
7416         Op2VP = TargetTransformInfo::OP_PowerOf2;
7417       Op2VK = TargetTransformInfo::OK_UniformConstantValue;
7418     } else if (isa<ConstantVector>(Op2) || isa<ConstantDataVector>(Op2)) {
7419       Op2VK = TargetTransformInfo::OK_NonUniformConstantValue;
7420       Constant *SplatValue = cast<Constant>(Op2)->getSplatValue();
7421       if (SplatValue) {
7422         ConstantInt *CInt = dyn_cast<ConstantInt>(SplatValue);
7423         if (CInt && CInt->getValue().isPowerOf2())
7424           Op2VP = TargetTransformInfo::OP_PowerOf2;
7425         Op2VK = TargetTransformInfo::OK_UniformConstantValue;
7426       }
7427     } else if (Legal->isUniform(Op2)) {
7428       Op2VK = TargetTransformInfo::OK_UniformValue;
7429     }
7430     SmallVector<const Value *, 4> Operands(I->operand_values());
7431     unsigned N = isScalarAfterVectorization(I, VF) ? VF : 1;
7432     return N * TTI.getArithmeticInstrCost(I->getOpcode(), VectorTy, Op1VK,
7433                                           Op2VK, Op1VP, Op2VP, Operands);
7434   }
7435   case Instruction::Select: {
7436     SelectInst *SI = cast<SelectInst>(I);
7437     const SCEV *CondSCEV = SE->getSCEV(SI->getCondition());
7438     bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop));
7439     Type *CondTy = SI->getCondition()->getType();
7440     if (!ScalarCond)
7441       CondTy = VectorType::get(CondTy, VF);
7442 
7443     return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, CondTy, I);
7444   }
7445   case Instruction::ICmp:
7446   case Instruction::FCmp: {
7447     Type *ValTy = I->getOperand(0)->getType();
7448     Instruction *Op0AsInstruction = dyn_cast<Instruction>(I->getOperand(0));
7449     if (canTruncateToMinimalBitwidth(Op0AsInstruction, VF))
7450       ValTy = IntegerType::get(ValTy->getContext(), MinBWs[Op0AsInstruction]);
7451     VectorTy = ToVectorTy(ValTy, VF);
7452     return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, nullptr, I);
7453   }
7454   case Instruction::Store:
7455   case Instruction::Load: {
7456     unsigned Width = VF;
7457     if (Width > 1) {
7458       InstWidening Decision = getWideningDecision(I, Width);
7459       assert(Decision != CM_Unknown &&
7460              "CM decision should be taken at this point");
7461       if (Decision == CM_Scalarize)
7462         Width = 1;
7463     }
7464     VectorTy = ToVectorTy(getMemInstValueType(I), Width);
7465     return getMemoryInstructionCost(I, VF);
7466   }
7467   case Instruction::ZExt:
7468   case Instruction::SExt:
7469   case Instruction::FPToUI:
7470   case Instruction::FPToSI:
7471   case Instruction::FPExt:
7472   case Instruction::PtrToInt:
7473   case Instruction::IntToPtr:
7474   case Instruction::SIToFP:
7475   case Instruction::UIToFP:
7476   case Instruction::Trunc:
7477   case Instruction::FPTrunc:
7478   case Instruction::BitCast: {
7479     // We optimize the truncation of induction variables having constant
7480     // integer steps. The cost of these truncations is the same as the scalar
7481     // operation.
7482     if (isOptimizableIVTruncate(I, VF)) {
7483       auto *Trunc = cast<TruncInst>(I);
7484       return TTI.getCastInstrCost(Instruction::Trunc, Trunc->getDestTy(),
7485                                   Trunc->getSrcTy(), Trunc);
7486     }
7487 
7488     Type *SrcScalarTy = I->getOperand(0)->getType();
7489     Type *SrcVecTy =
7490         VectorTy->isVectorTy() ? ToVectorTy(SrcScalarTy, VF) : SrcScalarTy;
7491     if (canTruncateToMinimalBitwidth(I, VF)) {
7492       // This cast is going to be shrunk. This may remove the cast or it might
7493       // turn it into slightly different cast. For example, if MinBW == 16,
7494       // "zext i8 %1 to i32" becomes "zext i8 %1 to i16".
7495       //
7496       // Calculate the modified src and dest types.
7497       Type *MinVecTy = VectorTy;
7498       if (I->getOpcode() == Instruction::Trunc) {
7499         SrcVecTy = smallestIntegerVectorType(SrcVecTy, MinVecTy);
7500         VectorTy =
7501             largestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy);
7502       } else if (I->getOpcode() == Instruction::ZExt ||
7503                  I->getOpcode() == Instruction::SExt) {
7504         SrcVecTy = largestIntegerVectorType(SrcVecTy, MinVecTy);
7505         VectorTy =
7506             smallestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy);
7507       }
7508     }
7509 
7510     unsigned N = isScalarAfterVectorization(I, VF) ? VF : 1;
7511     return N * TTI.getCastInstrCost(I->getOpcode(), VectorTy, SrcVecTy, I);
7512   }
7513   case Instruction::Call: {
7514     bool NeedToScalarize;
7515     CallInst *CI = cast<CallInst>(I);
7516     unsigned CallCost = getVectorCallCost(CI, VF, TTI, TLI, NeedToScalarize);
7517     if (getVectorIntrinsicIDForCall(CI, TLI))
7518       return std::min(CallCost, getVectorIntrinsicCost(CI, VF, TTI, TLI));
7519     return CallCost;
7520   }
7521   default:
7522     // The cost of executing VF copies of the scalar instruction. This opcode
7523     // is unknown. Assume that it is the same as 'mul'.
7524     return VF * TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy) +
7525            getScalarizationOverhead(I, VF, TTI);
7526   } // end of switch.
7527 }
7528 
7529 char LoopVectorize::ID = 0;
7530 
7531 static const char lv_name[] = "Loop Vectorization";
7532 
7533 INITIALIZE_PASS_BEGIN(LoopVectorize, LV_NAME, lv_name, false, false)
7534 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
7535 INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass)
7536 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
7537 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
7538 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
7539 INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass)
7540 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
7541 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
7542 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
7543 INITIALIZE_PASS_DEPENDENCY(LoopAccessLegacyAnalysis)
7544 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass)
7545 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass)
7546 INITIALIZE_PASS_END(LoopVectorize, LV_NAME, lv_name, false, false)
7547 
7548 namespace llvm {
7549 
7550 Pass *createLoopVectorizePass(bool NoUnrolling, bool AlwaysVectorize) {
7551   return new LoopVectorize(NoUnrolling, AlwaysVectorize);
7552 }
7553 
7554 } // end namespace llvm
7555 
7556 bool LoopVectorizationCostModel::isConsecutiveLoadOrStore(Instruction *Inst) {
7557   // Check if the pointer operand of a load or store instruction is
7558   // consecutive.
7559   if (auto *Ptr = getPointerOperand(Inst))
7560     return Legal->isConsecutivePtr(Ptr);
7561   return false;
7562 }
7563 
7564 void LoopVectorizationCostModel::collectValuesToIgnore() {
7565   // Ignore ephemeral values.
7566   CodeMetrics::collectEphemeralValues(TheLoop, AC, ValuesToIgnore);
7567 
7568   // Ignore type-promoting instructions we identified during reduction
7569   // detection.
7570   for (auto &Reduction : *Legal->getReductionVars()) {
7571     RecurrenceDescriptor &RedDes = Reduction.second;
7572     SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts();
7573     VecValuesToIgnore.insert(Casts.begin(), Casts.end());
7574   }
7575   // Ignore type-casting instructions we identified during induction
7576   // detection.
7577   for (auto &Induction : *Legal->getInductionVars()) {
7578     InductionDescriptor &IndDes = Induction.second;
7579     const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts();
7580     VecValuesToIgnore.insert(Casts.begin(), Casts.end());
7581   }
7582 }
7583 
7584 LoopVectorizationCostModel::VectorizationFactor
7585 LoopVectorizationPlanner::plan(bool OptForSize, unsigned UserVF) {
7586   // Width 1 means no vectorize, cost 0 means uncomputed cost.
7587   const LoopVectorizationCostModel::VectorizationFactor NoVectorization = {1U,
7588                                                                            0U};
7589   Optional<unsigned> MaybeMaxVF = CM.computeMaxVF(OptForSize);
7590   if (!MaybeMaxVF.hasValue()) // Cases considered too costly to vectorize.
7591     return NoVectorization;
7592 
7593   if (UserVF) {
7594     DEBUG(dbgs() << "LV: Using user VF " << UserVF << ".\n");
7595     assert(isPowerOf2_32(UserVF) && "VF needs to be a power of two");
7596     // Collect the instructions (and their associated costs) that will be more
7597     // profitable to scalarize.
7598     CM.selectUserVectorizationFactor(UserVF);
7599     buildVPlans(UserVF, UserVF);
7600     DEBUG(printPlans(dbgs()));
7601     return {UserVF, 0};
7602   }
7603 
7604   unsigned MaxVF = MaybeMaxVF.getValue();
7605   assert(MaxVF != 0 && "MaxVF is zero.");
7606 
7607   for (unsigned VF = 1; VF <= MaxVF; VF *= 2) {
7608     // Collect Uniform and Scalar instructions after vectorization with VF.
7609     CM.collectUniformsAndScalars(VF);
7610 
7611     // Collect the instructions (and their associated costs) that will be more
7612     // profitable to scalarize.
7613     if (VF > 1)
7614       CM.collectInstsToScalarize(VF);
7615   }
7616 
7617   buildVPlans(1, MaxVF);
7618   DEBUG(printPlans(dbgs()));
7619   if (MaxVF == 1)
7620     return NoVectorization;
7621 
7622   // Select the optimal vectorization factor.
7623   return CM.selectVectorizationFactor(MaxVF);
7624 }
7625 
7626 void LoopVectorizationPlanner::setBestPlan(unsigned VF, unsigned UF) {
7627   DEBUG(dbgs() << "Setting best plan to VF=" << VF << ", UF=" << UF << '\n');
7628   BestVF = VF;
7629   BestUF = UF;
7630 
7631   erase_if(VPlans, [VF](const VPlanPtr &Plan) {
7632     return !Plan->hasVF(VF);
7633   });
7634   assert(VPlans.size() == 1 && "Best VF has not a single VPlan.");
7635 }
7636 
7637 void LoopVectorizationPlanner::executePlan(InnerLoopVectorizer &ILV,
7638                                            DominatorTree *DT) {
7639   // Perform the actual loop transformation.
7640 
7641   // 1. Create a new empty loop. Unlink the old loop and connect the new one.
7642   VPCallbackILV CallbackILV(ILV);
7643 
7644   VPTransformState State{BestVF, BestUF,      LI,
7645                          DT,     ILV.Builder, ILV.VectorLoopValueMap,
7646                          &ILV,   CallbackILV};
7647   State.CFG.PrevBB = ILV.createVectorizedLoopSkeleton();
7648 
7649   //===------------------------------------------------===//
7650   //
7651   // Notice: any optimization or new instruction that go
7652   // into the code below should also be implemented in
7653   // the cost-model.
7654   //
7655   //===------------------------------------------------===//
7656 
7657   // 2. Copy and widen instructions from the old loop into the new loop.
7658   assert(VPlans.size() == 1 && "Not a single VPlan to execute.");
7659   VPlans.front()->execute(&State);
7660 
7661   // 3. Fix the vectorized code: take care of header phi's, live-outs,
7662   //    predication, updating analyses.
7663   ILV.fixVectorizedLoop();
7664 }
7665 
7666 void LoopVectorizationPlanner::collectTriviallyDeadInstructions(
7667     SmallPtrSetImpl<Instruction *> &DeadInstructions) {
7668   BasicBlock *Latch = OrigLoop->getLoopLatch();
7669 
7670   // We create new control-flow for the vectorized loop, so the original
7671   // condition will be dead after vectorization if it's only used by the
7672   // branch.
7673   auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0));
7674   if (Cmp && Cmp->hasOneUse())
7675     DeadInstructions.insert(Cmp);
7676 
7677   // We create new "steps" for induction variable updates to which the original
7678   // induction variables map. An original update instruction will be dead if
7679   // all its users except the induction variable are dead.
7680   for (auto &Induction : *Legal->getInductionVars()) {
7681     PHINode *Ind = Induction.first;
7682     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
7683     if (llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
7684           return U == Ind || DeadInstructions.count(cast<Instruction>(U));
7685         }))
7686       DeadInstructions.insert(IndUpdate);
7687 
7688     // We record as "Dead" also the type-casting instructions we had identified
7689     // during induction analysis. We don't need any handling for them in the
7690     // vectorized loop because we have proven that, under a proper runtime
7691     // test guarding the vectorized loop, the value of the phi, and the casted
7692     // value of the phi, are the same. The last instruction in this casting chain
7693     // will get its scalar/vector/widened def from the scalar/vector/widened def
7694     // of the respective phi node. Any other casts in the induction def-use chain
7695     // have no other uses outside the phi update chain, and will be ignored.
7696     InductionDescriptor &IndDes = Induction.second;
7697     const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts();
7698     DeadInstructions.insert(Casts.begin(), Casts.end());
7699   }
7700 }
7701 
7702 Value *InnerLoopUnroller::reverseVector(Value *Vec) { return Vec; }
7703 
7704 Value *InnerLoopUnroller::getBroadcastInstrs(Value *V) { return V; }
7705 
7706 Value *InnerLoopUnroller::getStepVector(Value *Val, int StartIdx, Value *Step,
7707                                         Instruction::BinaryOps BinOp) {
7708   // When unrolling and the VF is 1, we only need to add a simple scalar.
7709   Type *Ty = Val->getType();
7710   assert(!Ty->isVectorTy() && "Val must be a scalar");
7711 
7712   if (Ty->isFloatingPointTy()) {
7713     Constant *C = ConstantFP::get(Ty, (double)StartIdx);
7714 
7715     // Floating point operations had to be 'fast' to enable the unrolling.
7716     Value *MulOp = addFastMathFlag(Builder.CreateFMul(C, Step));
7717     return addFastMathFlag(Builder.CreateBinOp(BinOp, Val, MulOp));
7718   }
7719   Constant *C = ConstantInt::get(Ty, StartIdx);
7720   return Builder.CreateAdd(Val, Builder.CreateMul(C, Step), "induction");
7721 }
7722 
7723 static void AddRuntimeUnrollDisableMetaData(Loop *L) {
7724   SmallVector<Metadata *, 4> MDs;
7725   // Reserve first location for self reference to the LoopID metadata node.
7726   MDs.push_back(nullptr);
7727   bool IsUnrollMetadata = false;
7728   MDNode *LoopID = L->getLoopID();
7729   if (LoopID) {
7730     // First find existing loop unrolling disable metadata.
7731     for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) {
7732       auto *MD = dyn_cast<MDNode>(LoopID->getOperand(i));
7733       if (MD) {
7734         const auto *S = dyn_cast<MDString>(MD->getOperand(0));
7735         IsUnrollMetadata =
7736             S && S->getString().startswith("llvm.loop.unroll.disable");
7737       }
7738       MDs.push_back(LoopID->getOperand(i));
7739     }
7740   }
7741 
7742   if (!IsUnrollMetadata) {
7743     // Add runtime unroll disable metadata.
7744     LLVMContext &Context = L->getHeader()->getContext();
7745     SmallVector<Metadata *, 1> DisableOperands;
7746     DisableOperands.push_back(
7747         MDString::get(Context, "llvm.loop.unroll.runtime.disable"));
7748     MDNode *DisableNode = MDNode::get(Context, DisableOperands);
7749     MDs.push_back(DisableNode);
7750     MDNode *NewLoopID = MDNode::get(Context, MDs);
7751     // Set operand 0 to refer to the loop id itself.
7752     NewLoopID->replaceOperandWith(0, NewLoopID);
7753     L->setLoopID(NewLoopID);
7754   }
7755 }
7756 
7757 bool LoopVectorizationPlanner::getDecisionAndClampRange(
7758     const std::function<bool(unsigned)> &Predicate, VFRange &Range) {
7759   assert(Range.End > Range.Start && "Trying to test an empty VF range.");
7760   bool PredicateAtRangeStart = Predicate(Range.Start);
7761 
7762   for (unsigned TmpVF = Range.Start * 2; TmpVF < Range.End; TmpVF *= 2)
7763     if (Predicate(TmpVF) != PredicateAtRangeStart) {
7764       Range.End = TmpVF;
7765       break;
7766     }
7767 
7768   return PredicateAtRangeStart;
7769 }
7770 
7771 /// Build VPlans for the full range of feasible VF's = {\p MinVF, 2 * \p MinVF,
7772 /// 4 * \p MinVF, ..., \p MaxVF} by repeatedly building a VPlan for a sub-range
7773 /// of VF's starting at a given VF and extending it as much as possible. Each
7774 /// vectorization decision can potentially shorten this sub-range during
7775 /// buildVPlan().
7776 void LoopVectorizationPlanner::buildVPlans(unsigned MinVF, unsigned MaxVF) {
7777 
7778   // Collect conditions feeding internal conditional branches; they need to be
7779   // represented in VPlan for it to model masking.
7780   SmallPtrSet<Value *, 1> NeedDef;
7781 
7782   auto *Latch = OrigLoop->getLoopLatch();
7783   for (BasicBlock *BB : OrigLoop->blocks()) {
7784     if (BB == Latch)
7785       continue;
7786     BranchInst *Branch = dyn_cast<BranchInst>(BB->getTerminator());
7787     if (Branch && Branch->isConditional())
7788       NeedDef.insert(Branch->getCondition());
7789   }
7790 
7791   for (unsigned VF = MinVF; VF < MaxVF + 1;) {
7792     VFRange SubRange = {VF, MaxVF + 1};
7793     VPlans.push_back(buildVPlan(SubRange, NeedDef));
7794     VF = SubRange.End;
7795   }
7796 }
7797 
7798 VPValue *LoopVectorizationPlanner::createEdgeMask(BasicBlock *Src,
7799                                                   BasicBlock *Dst,
7800                                                   VPlanPtr &Plan) {
7801   assert(is_contained(predecessors(Dst), Src) && "Invalid edge");
7802 
7803   // Look for cached value.
7804   std::pair<BasicBlock *, BasicBlock *> Edge(Src, Dst);
7805   EdgeMaskCacheTy::iterator ECEntryIt = EdgeMaskCache.find(Edge);
7806   if (ECEntryIt != EdgeMaskCache.end())
7807     return ECEntryIt->second;
7808 
7809   VPValue *SrcMask = createBlockInMask(Src, Plan);
7810 
7811   // The terminator has to be a branch inst!
7812   BranchInst *BI = dyn_cast<BranchInst>(Src->getTerminator());
7813   assert(BI && "Unexpected terminator found");
7814 
7815   if (!BI->isConditional())
7816     return EdgeMaskCache[Edge] = SrcMask;
7817 
7818   VPValue *EdgeMask = Plan->getVPValue(BI->getCondition());
7819   assert(EdgeMask && "No Edge Mask found for condition");
7820 
7821   if (BI->getSuccessor(0) != Dst)
7822     EdgeMask = Builder.createNot(EdgeMask);
7823 
7824   if (SrcMask) // Otherwise block in-mask is all-one, no need to AND.
7825     EdgeMask = Builder.createAnd(EdgeMask, SrcMask);
7826 
7827   return EdgeMaskCache[Edge] = EdgeMask;
7828 }
7829 
7830 VPValue *LoopVectorizationPlanner::createBlockInMask(BasicBlock *BB,
7831                                                      VPlanPtr &Plan) {
7832   assert(OrigLoop->contains(BB) && "Block is not a part of a loop");
7833 
7834   // Look for cached value.
7835   BlockMaskCacheTy::iterator BCEntryIt = BlockMaskCache.find(BB);
7836   if (BCEntryIt != BlockMaskCache.end())
7837     return BCEntryIt->second;
7838 
7839   // All-one mask is modelled as no-mask following the convention for masked
7840   // load/store/gather/scatter. Initialize BlockMask to no-mask.
7841   VPValue *BlockMask = nullptr;
7842 
7843   // Loop incoming mask is all-one.
7844   if (OrigLoop->getHeader() == BB)
7845     return BlockMaskCache[BB] = BlockMask;
7846 
7847   // This is the block mask. We OR all incoming edges.
7848   for (auto *Predecessor : predecessors(BB)) {
7849     VPValue *EdgeMask = createEdgeMask(Predecessor, BB, Plan);
7850     if (!EdgeMask) // Mask of predecessor is all-one so mask of block is too.
7851       return BlockMaskCache[BB] = EdgeMask;
7852 
7853     if (!BlockMask) { // BlockMask has its initialized nullptr value.
7854       BlockMask = EdgeMask;
7855       continue;
7856     }
7857 
7858     BlockMask = Builder.createOr(BlockMask, EdgeMask);
7859   }
7860 
7861   return BlockMaskCache[BB] = BlockMask;
7862 }
7863 
7864 VPInterleaveRecipe *
7865 LoopVectorizationPlanner::tryToInterleaveMemory(Instruction *I,
7866                                                 VFRange &Range) {
7867   const InterleaveGroup *IG = Legal->getInterleavedAccessGroup(I);
7868   if (!IG)
7869     return nullptr;
7870 
7871   // Now check if IG is relevant for VF's in the given range.
7872   auto isIGMember = [&](Instruction *I) -> std::function<bool(unsigned)> {
7873     return [=](unsigned VF) -> bool {
7874       return (VF >= 2 && // Query is illegal for VF == 1
7875               CM.getWideningDecision(I, VF) ==
7876                   LoopVectorizationCostModel::CM_Interleave);
7877     };
7878   };
7879   if (!getDecisionAndClampRange(isIGMember(I), Range))
7880     return nullptr;
7881 
7882   // I is a member of an InterleaveGroup for VF's in the (possibly trimmed)
7883   // range. If it's the primary member of the IG construct a VPInterleaveRecipe.
7884   // Otherwise, it's an adjunct member of the IG, do not construct any Recipe.
7885   assert(I == IG->getInsertPos() &&
7886          "Generating a recipe for an adjunct member of an interleave group");
7887 
7888   return new VPInterleaveRecipe(IG);
7889 }
7890 
7891 VPWidenMemoryInstructionRecipe *
7892 LoopVectorizationPlanner::tryToWidenMemory(Instruction *I, VFRange &Range,
7893                                            VPlanPtr &Plan) {
7894   if (!isa<LoadInst>(I) && !isa<StoreInst>(I))
7895     return nullptr;
7896 
7897   auto willWiden = [&](unsigned VF) -> bool {
7898     if (VF == 1)
7899       return false;
7900     if (CM.isScalarAfterVectorization(I, VF) ||
7901         CM.isProfitableToScalarize(I, VF))
7902       return false;
7903     LoopVectorizationCostModel::InstWidening Decision =
7904         CM.getWideningDecision(I, VF);
7905     assert(Decision != LoopVectorizationCostModel::CM_Unknown &&
7906            "CM decision should be taken at this point.");
7907     assert(Decision != LoopVectorizationCostModel::CM_Interleave &&
7908            "Interleave memory opportunity should be caught earlier.");
7909     return Decision != LoopVectorizationCostModel::CM_Scalarize;
7910   };
7911 
7912   if (!getDecisionAndClampRange(willWiden, Range))
7913     return nullptr;
7914 
7915   VPValue *Mask = nullptr;
7916   if (Legal->isMaskRequired(I))
7917     Mask = createBlockInMask(I->getParent(), Plan);
7918 
7919   return new VPWidenMemoryInstructionRecipe(*I, Mask);
7920 }
7921 
7922 VPWidenIntOrFpInductionRecipe *
7923 LoopVectorizationPlanner::tryToOptimizeInduction(Instruction *I,
7924                                                  VFRange &Range) {
7925   if (PHINode *Phi = dyn_cast<PHINode>(I)) {
7926     // Check if this is an integer or fp induction. If so, build the recipe that
7927     // produces its scalar and vector values.
7928     InductionDescriptor II = Legal->getInductionVars()->lookup(Phi);
7929     if (II.getKind() == InductionDescriptor::IK_IntInduction ||
7930         II.getKind() == InductionDescriptor::IK_FpInduction)
7931       return new VPWidenIntOrFpInductionRecipe(Phi);
7932 
7933     return nullptr;
7934   }
7935 
7936   // Optimize the special case where the source is a constant integer
7937   // induction variable. Notice that we can only optimize the 'trunc' case
7938   // because (a) FP conversions lose precision, (b) sext/zext may wrap, and
7939   // (c) other casts depend on pointer size.
7940 
7941   // Determine whether \p K is a truncation based on an induction variable that
7942   // can be optimized.
7943   auto isOptimizableIVTruncate =
7944       [&](Instruction *K) -> std::function<bool(unsigned)> {
7945     return
7946         [=](unsigned VF) -> bool { return CM.isOptimizableIVTruncate(K, VF); };
7947   };
7948 
7949   if (isa<TruncInst>(I) &&
7950       getDecisionAndClampRange(isOptimizableIVTruncate(I), Range))
7951     return new VPWidenIntOrFpInductionRecipe(cast<PHINode>(I->getOperand(0)),
7952                                              cast<TruncInst>(I));
7953   return nullptr;
7954 }
7955 
7956 VPBlendRecipe *
7957 LoopVectorizationPlanner::tryToBlend(Instruction *I, VPlanPtr &Plan) {
7958   PHINode *Phi = dyn_cast<PHINode>(I);
7959   if (!Phi || Phi->getParent() == OrigLoop->getHeader())
7960     return nullptr;
7961 
7962   // We know that all PHIs in non-header blocks are converted into selects, so
7963   // we don't have to worry about the insertion order and we can just use the
7964   // builder. At this point we generate the predication tree. There may be
7965   // duplications since this is a simple recursive scan, but future
7966   // optimizations will clean it up.
7967 
7968   SmallVector<VPValue *, 2> Masks;
7969   unsigned NumIncoming = Phi->getNumIncomingValues();
7970   for (unsigned In = 0; In < NumIncoming; In++) {
7971     VPValue *EdgeMask =
7972       createEdgeMask(Phi->getIncomingBlock(In), Phi->getParent(), Plan);
7973     assert((EdgeMask || NumIncoming == 1) &&
7974            "Multiple predecessors with one having a full mask");
7975     if (EdgeMask)
7976       Masks.push_back(EdgeMask);
7977   }
7978   return new VPBlendRecipe(Phi, Masks);
7979 }
7980 
7981 bool LoopVectorizationPlanner::tryToWiden(Instruction *I, VPBasicBlock *VPBB,
7982                                           VFRange &Range) {
7983   if (Legal->isScalarWithPredication(I))
7984     return false;
7985 
7986   auto IsVectorizableOpcode = [](unsigned Opcode) {
7987     switch (Opcode) {
7988     case Instruction::Add:
7989     case Instruction::And:
7990     case Instruction::AShr:
7991     case Instruction::BitCast:
7992     case Instruction::Br:
7993     case Instruction::Call:
7994     case Instruction::FAdd:
7995     case Instruction::FCmp:
7996     case Instruction::FDiv:
7997     case Instruction::FMul:
7998     case Instruction::FPExt:
7999     case Instruction::FPToSI:
8000     case Instruction::FPToUI:
8001     case Instruction::FPTrunc:
8002     case Instruction::FRem:
8003     case Instruction::FSub:
8004     case Instruction::GetElementPtr:
8005     case Instruction::ICmp:
8006     case Instruction::IntToPtr:
8007     case Instruction::Load:
8008     case Instruction::LShr:
8009     case Instruction::Mul:
8010     case Instruction::Or:
8011     case Instruction::PHI:
8012     case Instruction::PtrToInt:
8013     case Instruction::SDiv:
8014     case Instruction::Select:
8015     case Instruction::SExt:
8016     case Instruction::Shl:
8017     case Instruction::SIToFP:
8018     case Instruction::SRem:
8019     case Instruction::Store:
8020     case Instruction::Sub:
8021     case Instruction::Trunc:
8022     case Instruction::UDiv:
8023     case Instruction::UIToFP:
8024     case Instruction::URem:
8025     case Instruction::Xor:
8026     case Instruction::ZExt:
8027       return true;
8028     }
8029     return false;
8030   };
8031 
8032   if (!IsVectorizableOpcode(I->getOpcode()))
8033     return false;
8034 
8035   if (CallInst *CI = dyn_cast<CallInst>(I)) {
8036     Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
8037     if (ID && (ID == Intrinsic::assume || ID == Intrinsic::lifetime_end ||
8038                ID == Intrinsic::lifetime_start || ID == Intrinsic::sideeffect))
8039       return false;
8040   }
8041 
8042   auto willWiden = [&](unsigned VF) -> bool {
8043     if (!isa<PHINode>(I) && (CM.isScalarAfterVectorization(I, VF) ||
8044                              CM.isProfitableToScalarize(I, VF)))
8045       return false;
8046     if (CallInst *CI = dyn_cast<CallInst>(I)) {
8047       Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
8048       // The following case may be scalarized depending on the VF.
8049       // The flag shows whether we use Intrinsic or a usual Call for vectorized
8050       // version of the instruction.
8051       // Is it beneficial to perform intrinsic call compared to lib call?
8052       bool NeedToScalarize;
8053       unsigned CallCost = getVectorCallCost(CI, VF, *TTI, TLI, NeedToScalarize);
8054       bool UseVectorIntrinsic =
8055           ID && getVectorIntrinsicCost(CI, VF, *TTI, TLI) <= CallCost;
8056       return UseVectorIntrinsic || !NeedToScalarize;
8057     }
8058     if (isa<LoadInst>(I) || isa<StoreInst>(I)) {
8059       assert(CM.getWideningDecision(I, VF) ==
8060                  LoopVectorizationCostModel::CM_Scalarize &&
8061              "Memory widening decisions should have been taken care by now");
8062       return false;
8063     }
8064     return true;
8065   };
8066 
8067   if (!getDecisionAndClampRange(willWiden, Range))
8068     return false;
8069 
8070   // Success: widen this instruction. We optimize the common case where
8071   // consecutive instructions can be represented by a single recipe.
8072   if (!VPBB->empty()) {
8073     VPWidenRecipe *LastWidenRecipe = dyn_cast<VPWidenRecipe>(&VPBB->back());
8074     if (LastWidenRecipe && LastWidenRecipe->appendInstruction(I))
8075       return true;
8076   }
8077 
8078   VPBB->appendRecipe(new VPWidenRecipe(I));
8079   return true;
8080 }
8081 
8082 VPBasicBlock *LoopVectorizationPlanner::handleReplication(
8083     Instruction *I, VFRange &Range, VPBasicBlock *VPBB,
8084     DenseMap<Instruction *, VPReplicateRecipe *> &PredInst2Recipe,
8085     VPlanPtr &Plan) {
8086   bool IsUniform = getDecisionAndClampRange(
8087       [&](unsigned VF) { return CM.isUniformAfterVectorization(I, VF); },
8088       Range);
8089 
8090   bool IsPredicated = Legal->isScalarWithPredication(I);
8091   auto *Recipe = new VPReplicateRecipe(I, IsUniform, IsPredicated);
8092 
8093   // Find if I uses a predicated instruction. If so, it will use its scalar
8094   // value. Avoid hoisting the insert-element which packs the scalar value into
8095   // a vector value, as that happens iff all users use the vector value.
8096   for (auto &Op : I->operands())
8097     if (auto *PredInst = dyn_cast<Instruction>(Op))
8098       if (PredInst2Recipe.find(PredInst) != PredInst2Recipe.end())
8099         PredInst2Recipe[PredInst]->setAlsoPack(false);
8100 
8101   // Finalize the recipe for Instr, first if it is not predicated.
8102   if (!IsPredicated) {
8103     DEBUG(dbgs() << "LV: Scalarizing:" << *I << "\n");
8104     VPBB->appendRecipe(Recipe);
8105     return VPBB;
8106   }
8107   DEBUG(dbgs() << "LV: Scalarizing and predicating:" << *I << "\n");
8108   assert(VPBB->getSuccessors().empty() &&
8109          "VPBB has successors when handling predicated replication.");
8110   // Record predicated instructions for above packing optimizations.
8111   PredInst2Recipe[I] = Recipe;
8112   VPBlockBase *Region =
8113     VPBB->setOneSuccessor(createReplicateRegion(I, Recipe, Plan));
8114   return cast<VPBasicBlock>(Region->setOneSuccessor(new VPBasicBlock()));
8115 }
8116 
8117 VPRegionBlock *
8118 LoopVectorizationPlanner::createReplicateRegion(Instruction *Instr,
8119                                                 VPRecipeBase *PredRecipe,
8120                                                 VPlanPtr &Plan) {
8121   // Instructions marked for predication are replicated and placed under an
8122   // if-then construct to prevent side-effects.
8123 
8124   // Generate recipes to compute the block mask for this region.
8125   VPValue *BlockInMask = createBlockInMask(Instr->getParent(), Plan);
8126 
8127   // Build the triangular if-then region.
8128   std::string RegionName = (Twine("pred.") + Instr->getOpcodeName()).str();
8129   assert(Instr->getParent() && "Predicated instruction not in any basic block");
8130   auto *BOMRecipe = new VPBranchOnMaskRecipe(BlockInMask);
8131   auto *Entry = new VPBasicBlock(Twine(RegionName) + ".entry", BOMRecipe);
8132   auto *PHIRecipe =
8133       Instr->getType()->isVoidTy() ? nullptr : new VPPredInstPHIRecipe(Instr);
8134   auto *Exit = new VPBasicBlock(Twine(RegionName) + ".continue", PHIRecipe);
8135   auto *Pred = new VPBasicBlock(Twine(RegionName) + ".if", PredRecipe);
8136   VPRegionBlock *Region = new VPRegionBlock(Entry, Exit, RegionName, true);
8137 
8138   // Note: first set Entry as region entry and then connect successors starting
8139   // from it in order, to propagate the "parent" of each VPBasicBlock.
8140   Entry->setTwoSuccessors(Pred, Exit);
8141   Pred->setOneSuccessor(Exit);
8142 
8143   return Region;
8144 }
8145 
8146 LoopVectorizationPlanner::VPlanPtr
8147 LoopVectorizationPlanner::buildVPlan(VFRange &Range,
8148                                      const SmallPtrSetImpl<Value *> &NeedDef) {
8149   EdgeMaskCache.clear();
8150   BlockMaskCache.clear();
8151   DenseMap<Instruction *, Instruction *> &SinkAfter = Legal->getSinkAfter();
8152   DenseMap<Instruction *, Instruction *> SinkAfterInverse;
8153 
8154   // Collect instructions from the original loop that will become trivially dead
8155   // in the vectorized loop. We don't need to vectorize these instructions. For
8156   // example, original induction update instructions can become dead because we
8157   // separately emit induction "steps" when generating code for the new loop.
8158   // Similarly, we create a new latch condition when setting up the structure
8159   // of the new loop, so the old one can become dead.
8160   SmallPtrSet<Instruction *, 4> DeadInstructions;
8161   collectTriviallyDeadInstructions(DeadInstructions);
8162 
8163   // Hold a mapping from predicated instructions to their recipes, in order to
8164   // fix their AlsoPack behavior if a user is determined to replicate and use a
8165   // scalar instead of vector value.
8166   DenseMap<Instruction *, VPReplicateRecipe *> PredInst2Recipe;
8167 
8168   // Create a dummy pre-entry VPBasicBlock to start building the VPlan.
8169   VPBasicBlock *VPBB = new VPBasicBlock("Pre-Entry");
8170   auto Plan = llvm::make_unique<VPlan>(VPBB);
8171 
8172   // Represent values that will have defs inside VPlan.
8173   for (Value *V : NeedDef)
8174     Plan->addVPValue(V);
8175 
8176   // Scan the body of the loop in a topological order to visit each basic block
8177   // after having visited its predecessor basic blocks.
8178   LoopBlocksDFS DFS(OrigLoop);
8179   DFS.perform(LI);
8180 
8181   for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) {
8182     // Relevant instructions from basic block BB will be grouped into VPRecipe
8183     // ingredients and fill a new VPBasicBlock.
8184     unsigned VPBBsForBB = 0;
8185     auto *FirstVPBBForBB = new VPBasicBlock(BB->getName());
8186     VPBB->setOneSuccessor(FirstVPBBForBB);
8187     VPBB = FirstVPBBForBB;
8188     Builder.setInsertPoint(VPBB);
8189 
8190     std::vector<Instruction *> Ingredients;
8191 
8192     // Organize the ingredients to vectorize from current basic block in the
8193     // right order.
8194     for (Instruction &I : *BB) {
8195       Instruction *Instr = &I;
8196 
8197       // First filter out irrelevant instructions, to ensure no recipes are
8198       // built for them.
8199       if (isa<BranchInst>(Instr) || isa<DbgInfoIntrinsic>(Instr) ||
8200           DeadInstructions.count(Instr))
8201         continue;
8202 
8203       // I is a member of an InterleaveGroup for Range.Start. If it's an adjunct
8204       // member of the IG, do not construct any Recipe for it.
8205       const InterleaveGroup *IG = Legal->getInterleavedAccessGroup(Instr);
8206       if (IG && Instr != IG->getInsertPos() &&
8207           Range.Start >= 2 && // Query is illegal for VF == 1
8208           CM.getWideningDecision(Instr, Range.Start) ==
8209               LoopVectorizationCostModel::CM_Interleave) {
8210         if (SinkAfterInverse.count(Instr))
8211           Ingredients.push_back(SinkAfterInverse.find(Instr)->second);
8212         continue;
8213       }
8214 
8215       // Move instructions to handle first-order recurrences, step 1: avoid
8216       // handling this instruction until after we've handled the instruction it
8217       // should follow.
8218       auto SAIt = SinkAfter.find(Instr);
8219       if (SAIt != SinkAfter.end()) {
8220         DEBUG(dbgs() << "Sinking" << *SAIt->first << " after" << *SAIt->second
8221                      << " to vectorize a 1st order recurrence.\n");
8222         SinkAfterInverse[SAIt->second] = Instr;
8223         continue;
8224       }
8225 
8226       Ingredients.push_back(Instr);
8227 
8228       // Move instructions to handle first-order recurrences, step 2: push the
8229       // instruction to be sunk at its insertion point.
8230       auto SAInvIt = SinkAfterInverse.find(Instr);
8231       if (SAInvIt != SinkAfterInverse.end())
8232         Ingredients.push_back(SAInvIt->second);
8233     }
8234 
8235     // Introduce each ingredient into VPlan.
8236     for (Instruction *Instr : Ingredients) {
8237       VPRecipeBase *Recipe = nullptr;
8238 
8239       // Check if Instr should belong to an interleave memory recipe, or already
8240       // does. In the latter case Instr is irrelevant.
8241       if ((Recipe = tryToInterleaveMemory(Instr, Range))) {
8242         VPBB->appendRecipe(Recipe);
8243         continue;
8244       }
8245 
8246       // Check if Instr is a memory operation that should be widened.
8247       if ((Recipe = tryToWidenMemory(Instr, Range, Plan))) {
8248         VPBB->appendRecipe(Recipe);
8249         continue;
8250       }
8251 
8252       // Check if Instr should form some PHI recipe.
8253       if ((Recipe = tryToOptimizeInduction(Instr, Range))) {
8254         VPBB->appendRecipe(Recipe);
8255         continue;
8256       }
8257       if ((Recipe = tryToBlend(Instr, Plan))) {
8258         VPBB->appendRecipe(Recipe);
8259         continue;
8260       }
8261       if (PHINode *Phi = dyn_cast<PHINode>(Instr)) {
8262         VPBB->appendRecipe(new VPWidenPHIRecipe(Phi));
8263         continue;
8264       }
8265 
8266       // Check if Instr is to be widened by a general VPWidenRecipe, after
8267       // having first checked for specific widening recipes that deal with
8268       // Interleave Groups, Inductions and Phi nodes.
8269       if (tryToWiden(Instr, VPBB, Range))
8270         continue;
8271 
8272       // Otherwise, if all widening options failed, Instruction is to be
8273       // replicated. This may create a successor for VPBB.
8274       VPBasicBlock *NextVPBB =
8275         handleReplication(Instr, Range, VPBB, PredInst2Recipe, Plan);
8276       if (NextVPBB != VPBB) {
8277         VPBB = NextVPBB;
8278         VPBB->setName(BB->hasName() ? BB->getName() + "." + Twine(VPBBsForBB++)
8279                                     : "");
8280       }
8281     }
8282   }
8283 
8284   // Discard empty dummy pre-entry VPBasicBlock. Note that other VPBasicBlocks
8285   // may also be empty, such as the last one VPBB, reflecting original
8286   // basic-blocks with no recipes.
8287   VPBasicBlock *PreEntry = cast<VPBasicBlock>(Plan->getEntry());
8288   assert(PreEntry->empty() && "Expecting empty pre-entry block.");
8289   VPBlockBase *Entry = Plan->setEntry(PreEntry->getSingleSuccessor());
8290   PreEntry->disconnectSuccessor(Entry);
8291   delete PreEntry;
8292 
8293   std::string PlanName;
8294   raw_string_ostream RSO(PlanName);
8295   unsigned VF = Range.Start;
8296   Plan->addVF(VF);
8297   RSO << "Initial VPlan for VF={" << VF;
8298   for (VF *= 2; VF < Range.End; VF *= 2) {
8299     Plan->addVF(VF);
8300     RSO << "," << VF;
8301   }
8302   RSO << "},UF>=1";
8303   RSO.flush();
8304   Plan->setName(PlanName);
8305 
8306   return Plan;
8307 }
8308 
8309 void VPInterleaveRecipe::print(raw_ostream &O, const Twine &Indent) const {
8310   O << " +\n"
8311     << Indent << "\"INTERLEAVE-GROUP with factor " << IG->getFactor() << " at ";
8312   IG->getInsertPos()->printAsOperand(O, false);
8313   O << "\\l\"";
8314   for (unsigned i = 0; i < IG->getFactor(); ++i)
8315     if (Instruction *I = IG->getMember(i))
8316       O << " +\n"
8317         << Indent << "\"  " << VPlanIngredient(I) << " " << i << "\\l\"";
8318 }
8319 
8320 void VPWidenRecipe::execute(VPTransformState &State) {
8321   for (auto &Instr : make_range(Begin, End))
8322     State.ILV->widenInstruction(Instr);
8323 }
8324 
8325 void VPWidenIntOrFpInductionRecipe::execute(VPTransformState &State) {
8326   assert(!State.Instance && "Int or FP induction being replicated.");
8327   State.ILV->widenIntOrFpInduction(IV, Trunc);
8328 }
8329 
8330 void VPWidenPHIRecipe::execute(VPTransformState &State) {
8331   State.ILV->widenPHIInstruction(Phi, State.UF, State.VF);
8332 }
8333 
8334 void VPBlendRecipe::execute(VPTransformState &State) {
8335   State.ILV->setDebugLocFromInst(State.Builder, Phi);
8336   // We know that all PHIs in non-header blocks are converted into
8337   // selects, so we don't have to worry about the insertion order and we
8338   // can just use the builder.
8339   // At this point we generate the predication tree. There may be
8340   // duplications since this is a simple recursive scan, but future
8341   // optimizations will clean it up.
8342 
8343   unsigned NumIncoming = Phi->getNumIncomingValues();
8344 
8345   assert((User || NumIncoming == 1) &&
8346          "Multiple predecessors with predecessors having a full mask");
8347   // Generate a sequence of selects of the form:
8348   // SELECT(Mask3, In3,
8349   //      SELECT(Mask2, In2,
8350   //                   ( ...)))
8351   InnerLoopVectorizer::VectorParts Entry(State.UF);
8352   for (unsigned In = 0; In < NumIncoming; ++In) {
8353     for (unsigned Part = 0; Part < State.UF; ++Part) {
8354       // We might have single edge PHIs (blocks) - use an identity
8355       // 'select' for the first PHI operand.
8356       Value *In0 =
8357           State.ILV->getOrCreateVectorValue(Phi->getIncomingValue(In), Part);
8358       if (In == 0)
8359         Entry[Part] = In0; // Initialize with the first incoming value.
8360       else {
8361         // Select between the current value and the previous incoming edge
8362         // based on the incoming mask.
8363         Value *Cond = State.get(User->getOperand(In), Part);
8364         Entry[Part] =
8365             State.Builder.CreateSelect(Cond, In0, Entry[Part], "predphi");
8366       }
8367     }
8368   }
8369   for (unsigned Part = 0; Part < State.UF; ++Part)
8370     State.ValueMap.setVectorValue(Phi, Part, Entry[Part]);
8371 }
8372 
8373 void VPInterleaveRecipe::execute(VPTransformState &State) {
8374   assert(!State.Instance && "Interleave group being replicated.");
8375   State.ILV->vectorizeInterleaveGroup(IG->getInsertPos());
8376 }
8377 
8378 void VPReplicateRecipe::execute(VPTransformState &State) {
8379   if (State.Instance) { // Generate a single instance.
8380     State.ILV->scalarizeInstruction(Ingredient, *State.Instance, IsPredicated);
8381     // Insert scalar instance packing it into a vector.
8382     if (AlsoPack && State.VF > 1) {
8383       // If we're constructing lane 0, initialize to start from undef.
8384       if (State.Instance->Lane == 0) {
8385         Value *Undef =
8386             UndefValue::get(VectorType::get(Ingredient->getType(), State.VF));
8387         State.ValueMap.setVectorValue(Ingredient, State.Instance->Part, Undef);
8388       }
8389       State.ILV->packScalarIntoVectorValue(Ingredient, *State.Instance);
8390     }
8391     return;
8392   }
8393 
8394   // Generate scalar instances for all VF lanes of all UF parts, unless the
8395   // instruction is uniform inwhich case generate only the first lane for each
8396   // of the UF parts.
8397   unsigned EndLane = IsUniform ? 1 : State.VF;
8398   for (unsigned Part = 0; Part < State.UF; ++Part)
8399     for (unsigned Lane = 0; Lane < EndLane; ++Lane)
8400       State.ILV->scalarizeInstruction(Ingredient, {Part, Lane}, IsPredicated);
8401 }
8402 
8403 void VPBranchOnMaskRecipe::execute(VPTransformState &State) {
8404   assert(State.Instance && "Branch on Mask works only on single instance.");
8405 
8406   unsigned Part = State.Instance->Part;
8407   unsigned Lane = State.Instance->Lane;
8408 
8409   Value *ConditionBit = nullptr;
8410   if (!User) // Block in mask is all-one.
8411     ConditionBit = State.Builder.getTrue();
8412   else {
8413     VPValue *BlockInMask = User->getOperand(0);
8414     ConditionBit = State.get(BlockInMask, Part);
8415     if (ConditionBit->getType()->isVectorTy())
8416       ConditionBit = State.Builder.CreateExtractElement(
8417           ConditionBit, State.Builder.getInt32(Lane));
8418   }
8419 
8420   // Replace the temporary unreachable terminator with a new conditional branch,
8421   // whose two destinations will be set later when they are created.
8422   auto *CurrentTerminator = State.CFG.PrevBB->getTerminator();
8423   assert(isa<UnreachableInst>(CurrentTerminator) &&
8424          "Expected to replace unreachable terminator with conditional branch.");
8425   auto *CondBr = BranchInst::Create(State.CFG.PrevBB, nullptr, ConditionBit);
8426   CondBr->setSuccessor(0, nullptr);
8427   ReplaceInstWithInst(CurrentTerminator, CondBr);
8428 }
8429 
8430 void VPPredInstPHIRecipe::execute(VPTransformState &State) {
8431   assert(State.Instance && "Predicated instruction PHI works per instance.");
8432   Instruction *ScalarPredInst = cast<Instruction>(
8433       State.ValueMap.getScalarValue(PredInst, *State.Instance));
8434   BasicBlock *PredicatedBB = ScalarPredInst->getParent();
8435   BasicBlock *PredicatingBB = PredicatedBB->getSinglePredecessor();
8436   assert(PredicatingBB && "Predicated block has no single predecessor.");
8437 
8438   // By current pack/unpack logic we need to generate only a single phi node: if
8439   // a vector value for the predicated instruction exists at this point it means
8440   // the instruction has vector users only, and a phi for the vector value is
8441   // needed. In this case the recipe of the predicated instruction is marked to
8442   // also do that packing, thereby "hoisting" the insert-element sequence.
8443   // Otherwise, a phi node for the scalar value is needed.
8444   unsigned Part = State.Instance->Part;
8445   if (State.ValueMap.hasVectorValue(PredInst, Part)) {
8446     Value *VectorValue = State.ValueMap.getVectorValue(PredInst, Part);
8447     InsertElementInst *IEI = cast<InsertElementInst>(VectorValue);
8448     PHINode *VPhi = State.Builder.CreatePHI(IEI->getType(), 2);
8449     VPhi->addIncoming(IEI->getOperand(0), PredicatingBB); // Unmodified vector.
8450     VPhi->addIncoming(IEI, PredicatedBB); // New vector with inserted element.
8451     State.ValueMap.resetVectorValue(PredInst, Part, VPhi); // Update cache.
8452   } else {
8453     Type *PredInstType = PredInst->getType();
8454     PHINode *Phi = State.Builder.CreatePHI(PredInstType, 2);
8455     Phi->addIncoming(UndefValue::get(ScalarPredInst->getType()), PredicatingBB);
8456     Phi->addIncoming(ScalarPredInst, PredicatedBB);
8457     State.ValueMap.resetScalarValue(PredInst, *State.Instance, Phi);
8458   }
8459 }
8460 
8461 void VPWidenMemoryInstructionRecipe::execute(VPTransformState &State) {
8462   if (!User)
8463     return State.ILV->vectorizeMemoryInstruction(&Instr);
8464 
8465   // Last (and currently only) operand is a mask.
8466   InnerLoopVectorizer::VectorParts MaskValues(State.UF);
8467   VPValue *Mask = User->getOperand(User->getNumOperands() - 1);
8468   for (unsigned Part = 0; Part < State.UF; ++Part)
8469     MaskValues[Part] = State.get(Mask, Part);
8470   State.ILV->vectorizeMemoryInstruction(&Instr, &MaskValues);
8471 }
8472 
8473 bool LoopVectorizePass::processLoop(Loop *L) {
8474   assert(L->empty() && "Only process inner loops.");
8475 
8476 #ifndef NDEBUG
8477   const std::string DebugLocStr = getDebugLocString(L);
8478 #endif /* NDEBUG */
8479 
8480   DEBUG(dbgs() << "\nLV: Checking a loop in \""
8481                << L->getHeader()->getParent()->getName() << "\" from "
8482                << DebugLocStr << "\n");
8483 
8484   LoopVectorizeHints Hints(L, DisableUnrolling, *ORE);
8485 
8486   DEBUG(dbgs() << "LV: Loop hints:"
8487                << " force="
8488                << (Hints.getForce() == LoopVectorizeHints::FK_Disabled
8489                        ? "disabled"
8490                        : (Hints.getForce() == LoopVectorizeHints::FK_Enabled
8491                               ? "enabled"
8492                               : "?"))
8493                << " width=" << Hints.getWidth()
8494                << " unroll=" << Hints.getInterleave() << "\n");
8495 
8496   // Function containing loop
8497   Function *F = L->getHeader()->getParent();
8498 
8499   // Looking at the diagnostic output is the only way to determine if a loop
8500   // was vectorized (other than looking at the IR or machine code), so it
8501   // is important to generate an optimization remark for each loop. Most of
8502   // these messages are generated as OptimizationRemarkAnalysis. Remarks
8503   // generated as OptimizationRemark and OptimizationRemarkMissed are
8504   // less verbose reporting vectorized loops and unvectorized loops that may
8505   // benefit from vectorization, respectively.
8506 
8507   if (!Hints.allowVectorization(F, L, AlwaysVectorize)) {
8508     DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n");
8509     return false;
8510   }
8511 
8512   PredicatedScalarEvolution PSE(*SE, *L);
8513 
8514   // Check if it is legal to vectorize the loop.
8515   LoopVectorizationRequirements Requirements(*ORE);
8516   LoopVectorizationLegality LVL(L, PSE, DT, TLI, AA, F, TTI, GetLAA, LI, ORE,
8517                                 &Requirements, &Hints);
8518   if (!LVL.canVectorize()) {
8519     DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n");
8520     emitMissedWarning(F, L, Hints, ORE);
8521     return false;
8522   }
8523 
8524   // Check the function attributes to find out if this function should be
8525   // optimized for size.
8526   bool OptForSize =
8527       Hints.getForce() != LoopVectorizeHints::FK_Enabled && F->optForSize();
8528 
8529   // Check the loop for a trip count threshold: vectorize loops with a tiny trip
8530   // count by optimizing for size, to minimize overheads.
8531   unsigned ExpectedTC = SE->getSmallConstantMaxTripCount(L);
8532   bool HasExpectedTC = (ExpectedTC > 0);
8533 
8534   if (!HasExpectedTC && LoopVectorizeWithBlockFrequency) {
8535     auto EstimatedTC = getLoopEstimatedTripCount(L);
8536     if (EstimatedTC) {
8537       ExpectedTC = *EstimatedTC;
8538       HasExpectedTC = true;
8539     }
8540   }
8541 
8542   if (HasExpectedTC && ExpectedTC < TinyTripCountVectorThreshold) {
8543     DEBUG(dbgs() << "LV: Found a loop with a very small trip count. "
8544                  << "This loop is worth vectorizing only if no scalar "
8545                  << "iteration overheads are incurred.");
8546     if (Hints.getForce() == LoopVectorizeHints::FK_Enabled)
8547       DEBUG(dbgs() << " But vectorizing was explicitly forced.\n");
8548     else {
8549       DEBUG(dbgs() << "\n");
8550       // Loops with a very small trip count are considered for vectorization
8551       // under OptForSize, thereby making sure the cost of their loop body is
8552       // dominant, free of runtime guards and scalar iteration overheads.
8553       OptForSize = true;
8554     }
8555   }
8556 
8557   // Check the function attributes to see if implicit floats are allowed.
8558   // FIXME: This check doesn't seem possibly correct -- what if the loop is
8559   // an integer loop and the vector instructions selected are purely integer
8560   // vector instructions?
8561   if (F->hasFnAttribute(Attribute::NoImplicitFloat)) {
8562     DEBUG(dbgs() << "LV: Can't vectorize when the NoImplicitFloat"
8563                     "attribute is used.\n");
8564     ORE->emit(createMissedAnalysis(Hints.vectorizeAnalysisPassName(),
8565                                    "NoImplicitFloat", L)
8566               << "loop not vectorized due to NoImplicitFloat attribute");
8567     emitMissedWarning(F, L, Hints, ORE);
8568     return false;
8569   }
8570 
8571   // Check if the target supports potentially unsafe FP vectorization.
8572   // FIXME: Add a check for the type of safety issue (denormal, signaling)
8573   // for the target we're vectorizing for, to make sure none of the
8574   // additional fp-math flags can help.
8575   if (Hints.isPotentiallyUnsafe() &&
8576       TTI->isFPVectorizationPotentiallyUnsafe()) {
8577     DEBUG(dbgs() << "LV: Potentially unsafe FP op prevents vectorization.\n");
8578     ORE->emit(
8579         createMissedAnalysis(Hints.vectorizeAnalysisPassName(), "UnsafeFP", L)
8580         << "loop not vectorized due to unsafe FP support.");
8581     emitMissedWarning(F, L, Hints, ORE);
8582     return false;
8583   }
8584 
8585   // Use the cost model.
8586   LoopVectorizationCostModel CM(L, PSE, LI, &LVL, *TTI, TLI, DB, AC, ORE, F,
8587                                 &Hints);
8588   CM.collectValuesToIgnore();
8589 
8590   // Use the planner for vectorization.
8591   LoopVectorizationPlanner LVP(L, LI, TLI, TTI, &LVL, CM);
8592 
8593   // Get user vectorization factor.
8594   unsigned UserVF = Hints.getWidth();
8595 
8596   // Plan how to best vectorize, return the best VF and its cost.
8597   LoopVectorizationCostModel::VectorizationFactor VF =
8598       LVP.plan(OptForSize, UserVF);
8599 
8600   // Select the interleave count.
8601   unsigned IC = CM.selectInterleaveCount(OptForSize, VF.Width, VF.Cost);
8602 
8603   // Get user interleave count.
8604   unsigned UserIC = Hints.getInterleave();
8605 
8606   // Identify the diagnostic messages that should be produced.
8607   std::pair<StringRef, std::string> VecDiagMsg, IntDiagMsg;
8608   bool VectorizeLoop = true, InterleaveLoop = true;
8609   if (Requirements.doesNotMeet(F, L, Hints)) {
8610     DEBUG(dbgs() << "LV: Not vectorizing: loop did not meet vectorization "
8611                     "requirements.\n");
8612     emitMissedWarning(F, L, Hints, ORE);
8613     return false;
8614   }
8615 
8616   if (VF.Width == 1) {
8617     DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n");
8618     VecDiagMsg = std::make_pair(
8619         "VectorizationNotBeneficial",
8620         "the cost-model indicates that vectorization is not beneficial");
8621     VectorizeLoop = false;
8622   }
8623 
8624   if (IC == 1 && UserIC <= 1) {
8625     // Tell the user interleaving is not beneficial.
8626     DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n");
8627     IntDiagMsg = std::make_pair(
8628         "InterleavingNotBeneficial",
8629         "the cost-model indicates that interleaving is not beneficial");
8630     InterleaveLoop = false;
8631     if (UserIC == 1) {
8632       IntDiagMsg.first = "InterleavingNotBeneficialAndDisabled";
8633       IntDiagMsg.second +=
8634           " and is explicitly disabled or interleave count is set to 1";
8635     }
8636   } else if (IC > 1 && UserIC == 1) {
8637     // Tell the user interleaving is beneficial, but it explicitly disabled.
8638     DEBUG(dbgs()
8639           << "LV: Interleaving is beneficial but is explicitly disabled.");
8640     IntDiagMsg = std::make_pair(
8641         "InterleavingBeneficialButDisabled",
8642         "the cost-model indicates that interleaving is beneficial "
8643         "but is explicitly disabled or interleave count is set to 1");
8644     InterleaveLoop = false;
8645   }
8646 
8647   // Override IC if user provided an interleave count.
8648   IC = UserIC > 0 ? UserIC : IC;
8649 
8650   // Emit diagnostic messages, if any.
8651   const char *VAPassName = Hints.vectorizeAnalysisPassName();
8652   if (!VectorizeLoop && !InterleaveLoop) {
8653     // Do not vectorize or interleaving the loop.
8654     ORE->emit([&]() {
8655       return OptimizationRemarkMissed(VAPassName, VecDiagMsg.first,
8656                                       L->getStartLoc(), L->getHeader())
8657              << VecDiagMsg.second;
8658     });
8659     ORE->emit([&]() {
8660       return OptimizationRemarkMissed(LV_NAME, IntDiagMsg.first,
8661                                       L->getStartLoc(), L->getHeader())
8662              << IntDiagMsg.second;
8663     });
8664     return false;
8665   } else if (!VectorizeLoop && InterleaveLoop) {
8666     DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n');
8667     ORE->emit([&]() {
8668       return OptimizationRemarkAnalysis(VAPassName, VecDiagMsg.first,
8669                                         L->getStartLoc(), L->getHeader())
8670              << VecDiagMsg.second;
8671     });
8672   } else if (VectorizeLoop && !InterleaveLoop) {
8673     DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width << ") in "
8674                  << DebugLocStr << '\n');
8675     ORE->emit([&]() {
8676       return OptimizationRemarkAnalysis(LV_NAME, IntDiagMsg.first,
8677                                         L->getStartLoc(), L->getHeader())
8678              << IntDiagMsg.second;
8679     });
8680   } else if (VectorizeLoop && InterleaveLoop) {
8681     DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width << ") in "
8682                  << DebugLocStr << '\n');
8683     DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n');
8684   }
8685 
8686   LVP.setBestPlan(VF.Width, IC);
8687 
8688   using namespace ore;
8689 
8690   if (!VectorizeLoop) {
8691     assert(IC > 1 && "interleave count should not be 1 or 0");
8692     // If we decided that it is not legal to vectorize the loop, then
8693     // interleave it.
8694     InnerLoopUnroller Unroller(L, PSE, LI, DT, TLI, TTI, AC, ORE, IC, &LVL,
8695                                &CM);
8696     LVP.executePlan(Unroller, DT);
8697 
8698     ORE->emit([&]() {
8699       return OptimizationRemark(LV_NAME, "Interleaved", L->getStartLoc(),
8700                                 L->getHeader())
8701              << "interleaved loop (interleaved count: "
8702              << NV("InterleaveCount", IC) << ")";
8703     });
8704   } else {
8705     // If we decided that it is *legal* to vectorize the loop, then do it.
8706     InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, IC,
8707                            &LVL, &CM);
8708     LVP.executePlan(LB, DT);
8709     ++LoopsVectorized;
8710 
8711     // Add metadata to disable runtime unrolling a scalar loop when there are
8712     // no runtime checks about strides and memory. A scalar loop that is
8713     // rarely used is not worth unrolling.
8714     if (!LB.areSafetyChecksAdded())
8715       AddRuntimeUnrollDisableMetaData(L);
8716 
8717     // Report the vectorization decision.
8718     ORE->emit([&]() {
8719       return OptimizationRemark(LV_NAME, "Vectorized", L->getStartLoc(),
8720                                 L->getHeader())
8721              << "vectorized loop (vectorization width: "
8722              << NV("VectorizationFactor", VF.Width)
8723              << ", interleaved count: " << NV("InterleaveCount", IC) << ")";
8724     });
8725   }
8726 
8727   // Mark the loop as already vectorized to avoid vectorizing again.
8728   Hints.setAlreadyVectorized();
8729 
8730   DEBUG(verifyFunction(*L->getHeader()->getParent()));
8731   return true;
8732 }
8733 
8734 bool LoopVectorizePass::runImpl(
8735     Function &F, ScalarEvolution &SE_, LoopInfo &LI_, TargetTransformInfo &TTI_,
8736     DominatorTree &DT_, BlockFrequencyInfo &BFI_, TargetLibraryInfo *TLI_,
8737     DemandedBits &DB_, AliasAnalysis &AA_, AssumptionCache &AC_,
8738     std::function<const LoopAccessInfo &(Loop &)> &GetLAA_,
8739     OptimizationRemarkEmitter &ORE_) {
8740   SE = &SE_;
8741   LI = &LI_;
8742   TTI = &TTI_;
8743   DT = &DT_;
8744   BFI = &BFI_;
8745   TLI = TLI_;
8746   AA = &AA_;
8747   AC = &AC_;
8748   GetLAA = &GetLAA_;
8749   DB = &DB_;
8750   ORE = &ORE_;
8751 
8752   // Don't attempt if
8753   // 1. the target claims to have no vector registers, and
8754   // 2. interleaving won't help ILP.
8755   //
8756   // The second condition is necessary because, even if the target has no
8757   // vector registers, loop vectorization may still enable scalar
8758   // interleaving.
8759   if (!TTI->getNumberOfRegisters(true) && TTI->getMaxInterleaveFactor(1) < 2)
8760     return false;
8761 
8762   bool Changed = false;
8763 
8764   // The vectorizer requires loops to be in simplified form.
8765   // Since simplification may add new inner loops, it has to run before the
8766   // legality and profitability checks. This means running the loop vectorizer
8767   // will simplify all loops, regardless of whether anything end up being
8768   // vectorized.
8769   for (auto &L : *LI)
8770     Changed |= simplifyLoop(L, DT, LI, SE, AC, false /* PreserveLCSSA */);
8771 
8772   // Build up a worklist of inner-loops to vectorize. This is necessary as
8773   // the act of vectorizing or partially unrolling a loop creates new loops
8774   // and can invalidate iterators across the loops.
8775   SmallVector<Loop *, 8> Worklist;
8776 
8777   for (Loop *L : *LI)
8778     addAcyclicInnerLoop(*L, Worklist);
8779 
8780   LoopsAnalyzed += Worklist.size();
8781 
8782   // Now walk the identified inner loops.
8783   while (!Worklist.empty()) {
8784     Loop *L = Worklist.pop_back_val();
8785 
8786     // For the inner loops we actually process, form LCSSA to simplify the
8787     // transform.
8788     Changed |= formLCSSARecursively(*L, *DT, LI, SE);
8789 
8790     Changed |= processLoop(L);
8791   }
8792 
8793   // Process each loop nest in the function.
8794   return Changed;
8795 }
8796 
8797 PreservedAnalyses LoopVectorizePass::run(Function &F,
8798                                          FunctionAnalysisManager &AM) {
8799     auto &SE = AM.getResult<ScalarEvolutionAnalysis>(F);
8800     auto &LI = AM.getResult<LoopAnalysis>(F);
8801     auto &TTI = AM.getResult<TargetIRAnalysis>(F);
8802     auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
8803     auto &BFI = AM.getResult<BlockFrequencyAnalysis>(F);
8804     auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
8805     auto &AA = AM.getResult<AAManager>(F);
8806     auto &AC = AM.getResult<AssumptionAnalysis>(F);
8807     auto &DB = AM.getResult<DemandedBitsAnalysis>(F);
8808     auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F);
8809 
8810     auto &LAM = AM.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager();
8811     std::function<const LoopAccessInfo &(Loop &)> GetLAA =
8812         [&](Loop &L) -> const LoopAccessInfo & {
8813       LoopStandardAnalysisResults AR = {AA, AC, DT, LI, SE, TLI, TTI, nullptr};
8814       return LAM.getResult<LoopAccessAnalysis>(L, AR);
8815     };
8816     bool Changed =
8817         runImpl(F, SE, LI, TTI, DT, BFI, &TLI, DB, AA, AC, GetLAA, ORE);
8818     if (!Changed)
8819       return PreservedAnalyses::all();
8820     PreservedAnalyses PA;
8821     PA.preserve<LoopAnalysis>();
8822     PA.preserve<DominatorTreeAnalysis>();
8823     PA.preserve<BasicAA>();
8824     PA.preserve<GlobalsAA>();
8825     return PA;
8826 }
8827