1 //===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops
11 // and generates target-independent LLVM-IR.
12 // The vectorizer uses the TargetTransformInfo analysis to estimate the costs
13 // of instructions in order to estimate the profitability of vectorization.
14 //
15 // The loop vectorizer combines consecutive loop iterations into a single
16 // 'wide' iteration. After this transformation the index is incremented
17 // by the SIMD vector width, and not by one.
18 //
19 // This pass has three parts:
20 // 1. The main loop pass that drives the different parts.
21 // 2. LoopVectorizationLegality - A unit that checks for the legality
22 //    of the vectorization.
23 // 3. InnerLoopVectorizer - A unit that performs the actual
24 //    widening of instructions.
25 // 4. LoopVectorizationCostModel - A unit that checks for the profitability
26 //    of vectorization. It decides on the optimal vector width, which
27 //    can be one, if vectorization is not profitable.
28 //
29 // There is a development effort going on to migrate loop vectorizer to the
30 // VPlan infrastructure and to introduce outer loop vectorization support (see
31 // docs/Proposal/VectorizationPlan.rst and
32 // http://lists.llvm.org/pipermail/llvm-dev/2017-December/119523.html). For this
33 // purpose, we temporarily introduced the VPlan-native vectorization path: an
34 // alternative vectorization path that is natively implemented on top of the
35 // VPlan infrastructure. See EnableVPlanNativePath for enabling.
36 //
37 //===----------------------------------------------------------------------===//
38 //
39 // The reduction-variable vectorization is based on the paper:
40 //  D. Nuzman and R. Henderson. Multi-platform Auto-vectorization.
41 //
42 // Variable uniformity checks are inspired by:
43 //  Karrenberg, R. and Hack, S. Whole Function Vectorization.
44 //
45 // The interleaved access vectorization is based on the paper:
46 //  Dorit Nuzman, Ira Rosen and Ayal Zaks.  Auto-Vectorization of Interleaved
47 //  Data for SIMD
48 //
49 // Other ideas/concepts are from:
50 //  A. Zaks and D. Nuzman. Autovectorization in GCC-two years later.
51 //
52 //  S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua.  An Evaluation of
53 //  Vectorizing Compilers.
54 //
55 //===----------------------------------------------------------------------===//
56 
57 #include "llvm/Transforms/Vectorize/LoopVectorize.h"
58 #include "LoopVectorizationPlanner.h"
59 #include "llvm/ADT/APInt.h"
60 #include "llvm/ADT/ArrayRef.h"
61 #include "llvm/ADT/DenseMap.h"
62 #include "llvm/ADT/DenseMapInfo.h"
63 #include "llvm/ADT/Hashing.h"
64 #include "llvm/ADT/MapVector.h"
65 #include "llvm/ADT/None.h"
66 #include "llvm/ADT/Optional.h"
67 #include "llvm/ADT/STLExtras.h"
68 #include "llvm/ADT/SetVector.h"
69 #include "llvm/ADT/SmallPtrSet.h"
70 #include "llvm/ADT/SmallSet.h"
71 #include "llvm/ADT/SmallVector.h"
72 #include "llvm/ADT/Statistic.h"
73 #include "llvm/ADT/StringRef.h"
74 #include "llvm/ADT/Twine.h"
75 #include "llvm/ADT/iterator_range.h"
76 #include "llvm/Analysis/AssumptionCache.h"
77 #include "llvm/Analysis/BasicAliasAnalysis.h"
78 #include "llvm/Analysis/BlockFrequencyInfo.h"
79 #include "llvm/Analysis/CFG.h"
80 #include "llvm/Analysis/CodeMetrics.h"
81 #include "llvm/Analysis/DemandedBits.h"
82 #include "llvm/Analysis/GlobalsModRef.h"
83 #include "llvm/Analysis/LoopAccessAnalysis.h"
84 #include "llvm/Analysis/LoopAnalysisManager.h"
85 #include "llvm/Analysis/LoopInfo.h"
86 #include "llvm/Analysis/LoopIterator.h"
87 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
88 #include "llvm/Analysis/ScalarEvolution.h"
89 #include "llvm/Analysis/ScalarEvolutionExpander.h"
90 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
91 #include "llvm/Analysis/TargetLibraryInfo.h"
92 #include "llvm/Analysis/TargetTransformInfo.h"
93 #include "llvm/Analysis/VectorUtils.h"
94 #include "llvm/IR/Attributes.h"
95 #include "llvm/IR/BasicBlock.h"
96 #include "llvm/IR/CFG.h"
97 #include "llvm/IR/Constant.h"
98 #include "llvm/IR/Constants.h"
99 #include "llvm/IR/DataLayout.h"
100 #include "llvm/IR/DebugInfoMetadata.h"
101 #include "llvm/IR/DebugLoc.h"
102 #include "llvm/IR/DerivedTypes.h"
103 #include "llvm/IR/DiagnosticInfo.h"
104 #include "llvm/IR/Dominators.h"
105 #include "llvm/IR/Function.h"
106 #include "llvm/IR/IRBuilder.h"
107 #include "llvm/IR/InstrTypes.h"
108 #include "llvm/IR/Instruction.h"
109 #include "llvm/IR/Instructions.h"
110 #include "llvm/IR/IntrinsicInst.h"
111 #include "llvm/IR/Intrinsics.h"
112 #include "llvm/IR/LLVMContext.h"
113 #include "llvm/IR/Metadata.h"
114 #include "llvm/IR/Module.h"
115 #include "llvm/IR/Operator.h"
116 #include "llvm/IR/Type.h"
117 #include "llvm/IR/Use.h"
118 #include "llvm/IR/User.h"
119 #include "llvm/IR/Value.h"
120 #include "llvm/IR/ValueHandle.h"
121 #include "llvm/IR/Verifier.h"
122 #include "llvm/Pass.h"
123 #include "llvm/Support/Casting.h"
124 #include "llvm/Support/CommandLine.h"
125 #include "llvm/Support/Compiler.h"
126 #include "llvm/Support/Debug.h"
127 #include "llvm/Support/ErrorHandling.h"
128 #include "llvm/Support/MathExtras.h"
129 #include "llvm/Support/raw_ostream.h"
130 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
131 #include "llvm/Transforms/Utils/LoopSimplify.h"
132 #include "llvm/Transforms/Utils/LoopUtils.h"
133 #include "llvm/Transforms/Utils/LoopVersioning.h"
134 #include "llvm/Transforms/Vectorize/LoopVectorizationLegality.h"
135 #include <algorithm>
136 #include <cassert>
137 #include <cstdint>
138 #include <cstdlib>
139 #include <functional>
140 #include <iterator>
141 #include <limits>
142 #include <memory>
143 #include <string>
144 #include <tuple>
145 #include <utility>
146 #include <vector>
147 
148 using namespace llvm;
149 
150 #define LV_NAME "loop-vectorize"
151 #define DEBUG_TYPE LV_NAME
152 
153 STATISTIC(LoopsVectorized, "Number of loops vectorized");
154 STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization");
155 
156 /// Loops with a known constant trip count below this number are vectorized only
157 /// if no scalar iteration overheads are incurred.
158 static cl::opt<unsigned> TinyTripCountVectorThreshold(
159     "vectorizer-min-trip-count", cl::init(16), cl::Hidden,
160     cl::desc("Loops with a constant trip count that is smaller than this "
161              "value are vectorized only if no scalar iteration overheads "
162              "are incurred."));
163 
164 static cl::opt<bool> MaximizeBandwidth(
165     "vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden,
166     cl::desc("Maximize bandwidth when selecting vectorization factor which "
167              "will be determined by the smallest type in loop."));
168 
169 static cl::opt<bool> EnableInterleavedMemAccesses(
170     "enable-interleaved-mem-accesses", cl::init(false), cl::Hidden,
171     cl::desc("Enable vectorization on interleaved memory accesses in a loop"));
172 
173 /// Maximum factor for an interleaved memory access.
174 static cl::opt<unsigned> MaxInterleaveGroupFactor(
175     "max-interleave-group-factor", cl::Hidden,
176     cl::desc("Maximum factor for an interleaved access group (default = 8)"),
177     cl::init(8));
178 
179 /// We don't interleave loops with a known constant trip count below this
180 /// number.
181 static const unsigned TinyTripCountInterleaveThreshold = 128;
182 
183 static cl::opt<unsigned> ForceTargetNumScalarRegs(
184     "force-target-num-scalar-regs", cl::init(0), cl::Hidden,
185     cl::desc("A flag that overrides the target's number of scalar registers."));
186 
187 static cl::opt<unsigned> ForceTargetNumVectorRegs(
188     "force-target-num-vector-regs", cl::init(0), cl::Hidden,
189     cl::desc("A flag that overrides the target's number of vector registers."));
190 
191 static cl::opt<unsigned> ForceTargetMaxScalarInterleaveFactor(
192     "force-target-max-scalar-interleave", cl::init(0), cl::Hidden,
193     cl::desc("A flag that overrides the target's max interleave factor for "
194              "scalar loops."));
195 
196 static cl::opt<unsigned> ForceTargetMaxVectorInterleaveFactor(
197     "force-target-max-vector-interleave", cl::init(0), cl::Hidden,
198     cl::desc("A flag that overrides the target's max interleave factor for "
199              "vectorized loops."));
200 
201 static cl::opt<unsigned> ForceTargetInstructionCost(
202     "force-target-instruction-cost", cl::init(0), cl::Hidden,
203     cl::desc("A flag that overrides the target's expected cost for "
204              "an instruction to a single constant value. Mostly "
205              "useful for getting consistent testing."));
206 
207 static cl::opt<unsigned> SmallLoopCost(
208     "small-loop-cost", cl::init(20), cl::Hidden,
209     cl::desc(
210         "The cost of a loop that is considered 'small' by the interleaver."));
211 
212 static cl::opt<bool> LoopVectorizeWithBlockFrequency(
213     "loop-vectorize-with-block-frequency", cl::init(true), cl::Hidden,
214     cl::desc("Enable the use of the block frequency analysis to access PGO "
215              "heuristics minimizing code growth in cold regions and being more "
216              "aggressive in hot regions."));
217 
218 // Runtime interleave loops for load/store throughput.
219 static cl::opt<bool> EnableLoadStoreRuntimeInterleave(
220     "enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden,
221     cl::desc(
222         "Enable runtime interleaving until load/store ports are saturated"));
223 
224 /// The number of stores in a loop that are allowed to need predication.
225 static cl::opt<unsigned> NumberOfStoresToPredicate(
226     "vectorize-num-stores-pred", cl::init(1), cl::Hidden,
227     cl::desc("Max number of stores to be predicated behind an if."));
228 
229 static cl::opt<bool> EnableIndVarRegisterHeur(
230     "enable-ind-var-reg-heur", cl::init(true), cl::Hidden,
231     cl::desc("Count the induction variable only once when interleaving"));
232 
233 static cl::opt<bool> EnableCondStoresVectorization(
234     "enable-cond-stores-vec", cl::init(true), cl::Hidden,
235     cl::desc("Enable if predication of stores during vectorization."));
236 
237 static cl::opt<unsigned> MaxNestedScalarReductionIC(
238     "max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden,
239     cl::desc("The maximum interleave count to use when interleaving a scalar "
240              "reduction in a nested loop."));
241 
242 static cl::opt<bool> EnableVPlanNativePath(
243     "enable-vplan-native-path", cl::init(false), cl::Hidden,
244     cl::desc("Enable VPlan-native vectorization path with "
245              "support for outer loop vectorization."));
246 
247 /// A helper function for converting Scalar types to vector types.
248 /// If the incoming type is void, we return void. If the VF is 1, we return
249 /// the scalar type.
250 static Type *ToVectorTy(Type *Scalar, unsigned VF) {
251   if (Scalar->isVoidTy() || VF == 1)
252     return Scalar;
253   return VectorType::get(Scalar, VF);
254 }
255 
256 // FIXME: The following helper functions have multiple implementations
257 // in the project. They can be effectively organized in a common Load/Store
258 // utilities unit.
259 
260 /// A helper function that returns the type of loaded or stored value.
261 static Type *getMemInstValueType(Value *I) {
262   assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
263          "Expected Load or Store instruction");
264   if (auto *LI = dyn_cast<LoadInst>(I))
265     return LI->getType();
266   return cast<StoreInst>(I)->getValueOperand()->getType();
267 }
268 
269 /// A helper function that returns the alignment of load or store instruction.
270 static unsigned getMemInstAlignment(Value *I) {
271   assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
272          "Expected Load or Store instruction");
273   if (auto *LI = dyn_cast<LoadInst>(I))
274     return LI->getAlignment();
275   return cast<StoreInst>(I)->getAlignment();
276 }
277 
278 /// A helper function that returns the address space of the pointer operand of
279 /// load or store instruction.
280 static unsigned getMemInstAddressSpace(Value *I) {
281   assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
282          "Expected Load or Store instruction");
283   if (auto *LI = dyn_cast<LoadInst>(I))
284     return LI->getPointerAddressSpace();
285   return cast<StoreInst>(I)->getPointerAddressSpace();
286 }
287 
288 /// A helper function that returns true if the given type is irregular. The
289 /// type is irregular if its allocated size doesn't equal the store size of an
290 /// element of the corresponding vector type at the given vectorization factor.
291 static bool hasIrregularType(Type *Ty, const DataLayout &DL, unsigned VF) {
292   // Determine if an array of VF elements of type Ty is "bitcast compatible"
293   // with a <VF x Ty> vector.
294   if (VF > 1) {
295     auto *VectorTy = VectorType::get(Ty, VF);
296     return VF * DL.getTypeAllocSize(Ty) != DL.getTypeStoreSize(VectorTy);
297   }
298 
299   // If the vectorization factor is one, we just check if an array of type Ty
300   // requires padding between elements.
301   return DL.getTypeAllocSizeInBits(Ty) != DL.getTypeSizeInBits(Ty);
302 }
303 
304 /// A helper function that returns the reciprocal of the block probability of
305 /// predicated blocks. If we return X, we are assuming the predicated block
306 /// will execute once for every X iterations of the loop header.
307 ///
308 /// TODO: We should use actual block probability here, if available. Currently,
309 ///       we always assume predicated blocks have a 50% chance of executing.
310 static unsigned getReciprocalPredBlockProb() { return 2; }
311 
312 /// A helper function that adds a 'fast' flag to floating-point operations.
313 static Value *addFastMathFlag(Value *V) {
314   if (isa<FPMathOperator>(V)) {
315     FastMathFlags Flags;
316     Flags.setFast();
317     cast<Instruction>(V)->setFastMathFlags(Flags);
318   }
319   return V;
320 }
321 
322 /// A helper function that returns an integer or floating-point constant with
323 /// value C.
324 static Constant *getSignedIntOrFpConstant(Type *Ty, int64_t C) {
325   return Ty->isIntegerTy() ? ConstantInt::getSigned(Ty, C)
326                            : ConstantFP::get(Ty, C);
327 }
328 
329 namespace llvm {
330 
331 /// InnerLoopVectorizer vectorizes loops which contain only one basic
332 /// block to a specified vectorization factor (VF).
333 /// This class performs the widening of scalars into vectors, or multiple
334 /// scalars. This class also implements the following features:
335 /// * It inserts an epilogue loop for handling loops that don't have iteration
336 ///   counts that are known to be a multiple of the vectorization factor.
337 /// * It handles the code generation for reduction variables.
338 /// * Scalarization (implementation using scalars) of un-vectorizable
339 ///   instructions.
340 /// InnerLoopVectorizer does not perform any vectorization-legality
341 /// checks, and relies on the caller to check for the different legality
342 /// aspects. The InnerLoopVectorizer relies on the
343 /// LoopVectorizationLegality class to provide information about the induction
344 /// and reduction variables that were found to a given vectorization factor.
345 class InnerLoopVectorizer {
346 public:
347   InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE,
348                       LoopInfo *LI, DominatorTree *DT,
349                       const TargetLibraryInfo *TLI,
350                       const TargetTransformInfo *TTI, AssumptionCache *AC,
351                       OptimizationRemarkEmitter *ORE, unsigned VecWidth,
352                       unsigned UnrollFactor, LoopVectorizationLegality *LVL,
353                       LoopVectorizationCostModel *CM)
354       : OrigLoop(OrigLoop), PSE(PSE), LI(LI), DT(DT), TLI(TLI), TTI(TTI),
355         AC(AC), ORE(ORE), VF(VecWidth), UF(UnrollFactor),
356         Builder(PSE.getSE()->getContext()),
357         VectorLoopValueMap(UnrollFactor, VecWidth), Legal(LVL), Cost(CM) {}
358   virtual ~InnerLoopVectorizer() = default;
359 
360   /// Create a new empty loop. Unlink the old loop and connect the new one.
361   /// Return the pre-header block of the new loop.
362   BasicBlock *createVectorizedLoopSkeleton();
363 
364   /// Widen a single instruction within the innermost loop.
365   void widenInstruction(Instruction &I);
366 
367   /// Fix the vectorized code, taking care of header phi's, live-outs, and more.
368   void fixVectorizedLoop();
369 
370   // Return true if any runtime check is added.
371   bool areSafetyChecksAdded() { return AddedSafetyChecks; }
372 
373   /// A type for vectorized values in the new loop. Each value from the
374   /// original loop, when vectorized, is represented by UF vector values in the
375   /// new unrolled loop, where UF is the unroll factor.
376   using VectorParts = SmallVector<Value *, 2>;
377 
378   /// Vectorize a single PHINode in a block. This method handles the induction
379   /// variable canonicalization. It supports both VF = 1 for unrolled loops and
380   /// arbitrary length vectors.
381   void widenPHIInstruction(Instruction *PN, unsigned UF, unsigned VF);
382 
383   /// A helper function to scalarize a single Instruction in the innermost loop.
384   /// Generates a sequence of scalar instances for each lane between \p MinLane
385   /// and \p MaxLane, times each part between \p MinPart and \p MaxPart,
386   /// inclusive..
387   void scalarizeInstruction(Instruction *Instr, const VPIteration &Instance,
388                             bool IfPredicateInstr);
389 
390   /// Widen an integer or floating-point induction variable \p IV. If \p Trunc
391   /// is provided, the integer induction variable will first be truncated to
392   /// the corresponding type.
393   void widenIntOrFpInduction(PHINode *IV, TruncInst *Trunc = nullptr);
394 
395   /// getOrCreateVectorValue and getOrCreateScalarValue coordinate to generate a
396   /// vector or scalar value on-demand if one is not yet available. When
397   /// vectorizing a loop, we visit the definition of an instruction before its
398   /// uses. When visiting the definition, we either vectorize or scalarize the
399   /// instruction, creating an entry for it in the corresponding map. (In some
400   /// cases, such as induction variables, we will create both vector and scalar
401   /// entries.) Then, as we encounter uses of the definition, we derive values
402   /// for each scalar or vector use unless such a value is already available.
403   /// For example, if we scalarize a definition and one of its uses is vector,
404   /// we build the required vector on-demand with an insertelement sequence
405   /// when visiting the use. Otherwise, if the use is scalar, we can use the
406   /// existing scalar definition.
407   ///
408   /// Return a value in the new loop corresponding to \p V from the original
409   /// loop at unroll index \p Part. If the value has already been vectorized,
410   /// the corresponding vector entry in VectorLoopValueMap is returned. If,
411   /// however, the value has a scalar entry in VectorLoopValueMap, we construct
412   /// a new vector value on-demand by inserting the scalar values into a vector
413   /// with an insertelement sequence. If the value has been neither vectorized
414   /// nor scalarized, it must be loop invariant, so we simply broadcast the
415   /// value into a vector.
416   Value *getOrCreateVectorValue(Value *V, unsigned Part);
417 
418   /// Return a value in the new loop corresponding to \p V from the original
419   /// loop at unroll and vector indices \p Instance. If the value has been
420   /// vectorized but not scalarized, the necessary extractelement instruction
421   /// will be generated.
422   Value *getOrCreateScalarValue(Value *V, const VPIteration &Instance);
423 
424   /// Construct the vector value of a scalarized value \p V one lane at a time.
425   void packScalarIntoVectorValue(Value *V, const VPIteration &Instance);
426 
427   /// Try to vectorize the interleaved access group that \p Instr belongs to.
428   void vectorizeInterleaveGroup(Instruction *Instr);
429 
430   /// Vectorize Load and Store instructions, optionally masking the vector
431   /// operations if \p BlockInMask is non-null.
432   void vectorizeMemoryInstruction(Instruction *Instr,
433                                   VectorParts *BlockInMask = nullptr);
434 
435   /// \brief Set the debug location in the builder using the debug location in
436   /// the instruction.
437   void setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr);
438 
439 protected:
440   friend class LoopVectorizationPlanner;
441 
442   /// A small list of PHINodes.
443   using PhiVector = SmallVector<PHINode *, 4>;
444 
445   /// A type for scalarized values in the new loop. Each value from the
446   /// original loop, when scalarized, is represented by UF x VF scalar values
447   /// in the new unrolled loop, where UF is the unroll factor and VF is the
448   /// vectorization factor.
449   using ScalarParts = SmallVector<SmallVector<Value *, 4>, 2>;
450 
451   /// Set up the values of the IVs correctly when exiting the vector loop.
452   void fixupIVUsers(PHINode *OrigPhi, const InductionDescriptor &II,
453                     Value *CountRoundDown, Value *EndValue,
454                     BasicBlock *MiddleBlock);
455 
456   /// Create a new induction variable inside L.
457   PHINode *createInductionVariable(Loop *L, Value *Start, Value *End,
458                                    Value *Step, Instruction *DL);
459 
460   /// Handle all cross-iteration phis in the header.
461   void fixCrossIterationPHIs();
462 
463   /// Fix a first-order recurrence. This is the second phase of vectorizing
464   /// this phi node.
465   void fixFirstOrderRecurrence(PHINode *Phi);
466 
467   /// Fix a reduction cross-iteration phi. This is the second phase of
468   /// vectorizing this phi node.
469   void fixReduction(PHINode *Phi);
470 
471   /// \brief The Loop exit block may have single value PHI nodes with some
472   /// incoming value. While vectorizing we only handled real values
473   /// that were defined inside the loop and we should have one value for
474   /// each predecessor of its parent basic block. See PR14725.
475   void fixLCSSAPHIs();
476 
477   /// Iteratively sink the scalarized operands of a predicated instruction into
478   /// the block that was created for it.
479   void sinkScalarOperands(Instruction *PredInst);
480 
481   /// Shrinks vector element sizes to the smallest bitwidth they can be legally
482   /// represented as.
483   void truncateToMinimalBitwidths();
484 
485   /// Insert the new loop to the loop hierarchy and pass manager
486   /// and update the analysis passes.
487   void updateAnalysis();
488 
489   /// Create a broadcast instruction. This method generates a broadcast
490   /// instruction (shuffle) for loop invariant values and for the induction
491   /// value. If this is the induction variable then we extend it to N, N+1, ...
492   /// this is needed because each iteration in the loop corresponds to a SIMD
493   /// element.
494   virtual Value *getBroadcastInstrs(Value *V);
495 
496   /// This function adds (StartIdx, StartIdx + Step, StartIdx + 2*Step, ...)
497   /// to each vector element of Val. The sequence starts at StartIndex.
498   /// \p Opcode is relevant for FP induction variable.
499   virtual Value *getStepVector(Value *Val, int StartIdx, Value *Step,
500                                Instruction::BinaryOps Opcode =
501                                Instruction::BinaryOpsEnd);
502 
503   /// Compute scalar induction steps. \p ScalarIV is the scalar induction
504   /// variable on which to base the steps, \p Step is the size of the step, and
505   /// \p EntryVal is the value from the original loop that maps to the steps.
506   /// Note that \p EntryVal doesn't have to be an induction variable - it
507   /// can also be a truncate instruction.
508   void buildScalarSteps(Value *ScalarIV, Value *Step, Instruction *EntryVal,
509                         const InductionDescriptor &ID);
510 
511   /// Create a vector induction phi node based on an existing scalar one. \p
512   /// EntryVal is the value from the original loop that maps to the vector phi
513   /// node, and \p Step is the loop-invariant step. If \p EntryVal is a
514   /// truncate instruction, instead of widening the original IV, we widen a
515   /// version of the IV truncated to \p EntryVal's type.
516   void createVectorIntOrFpInductionPHI(const InductionDescriptor &II,
517                                        Value *Step, Instruction *EntryVal);
518 
519   /// Returns true if an instruction \p I should be scalarized instead of
520   /// vectorized for the chosen vectorization factor.
521   bool shouldScalarizeInstruction(Instruction *I) const;
522 
523   /// Returns true if we should generate a scalar version of \p IV.
524   bool needsScalarInduction(Instruction *IV) const;
525 
526   /// If there is a cast involved in the induction variable \p ID, which should
527   /// be ignored in the vectorized loop body, this function records the
528   /// VectorLoopValue of the respective Phi also as the VectorLoopValue of the
529   /// cast. We had already proved that the casted Phi is equal to the uncasted
530   /// Phi in the vectorized loop (under a runtime guard), and therefore
531   /// there is no need to vectorize the cast - the same value can be used in the
532   /// vector loop for both the Phi and the cast.
533   /// If \p VectorLoopValue is a scalarized value, \p Lane is also specified,
534   /// Otherwise, \p VectorLoopValue is a widened/vectorized value.
535   ///
536   /// \p EntryVal is the value from the original loop that maps to the vector
537   /// phi node and is used to distinguish what is the IV currently being
538   /// processed - original one (if \p EntryVal is a phi corresponding to the
539   /// original IV) or the "newly-created" one based on the proof mentioned above
540   /// (see also buildScalarSteps() and createVectorIntOrFPInductionPHI()). In the
541   /// latter case \p EntryVal is a TruncInst and we must not record anything for
542   /// that IV, but it's error-prone to expect callers of this routine to care
543   /// about that, hence this explicit parameter.
544   void recordVectorLoopValueForInductionCast(const InductionDescriptor &ID,
545                                              const Instruction *EntryVal,
546                                              Value *VectorLoopValue,
547                                              unsigned Part,
548                                              unsigned Lane = UINT_MAX);
549 
550   /// Generate a shuffle sequence that will reverse the vector Vec.
551   virtual Value *reverseVector(Value *Vec);
552 
553   /// Returns (and creates if needed) the original loop trip count.
554   Value *getOrCreateTripCount(Loop *NewLoop);
555 
556   /// Returns (and creates if needed) the trip count of the widened loop.
557   Value *getOrCreateVectorTripCount(Loop *NewLoop);
558 
559   /// Returns a bitcasted value to the requested vector type.
560   /// Also handles bitcasts of vector<float> <-> vector<pointer> types.
561   Value *createBitOrPointerCast(Value *V, VectorType *DstVTy,
562                                 const DataLayout &DL);
563 
564   /// Emit a bypass check to see if the vector trip count is zero, including if
565   /// it overflows.
566   void emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass);
567 
568   /// Emit a bypass check to see if all of the SCEV assumptions we've
569   /// had to make are correct.
570   void emitSCEVChecks(Loop *L, BasicBlock *Bypass);
571 
572   /// Emit bypass checks to check any memory assumptions we may have made.
573   void emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass);
574 
575   /// Add additional metadata to \p To that was not present on \p Orig.
576   ///
577   /// Currently this is used to add the noalias annotations based on the
578   /// inserted memchecks.  Use this for instructions that are *cloned* into the
579   /// vector loop.
580   void addNewMetadata(Instruction *To, const Instruction *Orig);
581 
582   /// Add metadata from one instruction to another.
583   ///
584   /// This includes both the original MDs from \p From and additional ones (\see
585   /// addNewMetadata).  Use this for *newly created* instructions in the vector
586   /// loop.
587   void addMetadata(Instruction *To, Instruction *From);
588 
589   /// \brief Similar to the previous function but it adds the metadata to a
590   /// vector of instructions.
591   void addMetadata(ArrayRef<Value *> To, Instruction *From);
592 
593   /// The original loop.
594   Loop *OrigLoop;
595 
596   /// A wrapper around ScalarEvolution used to add runtime SCEV checks. Applies
597   /// dynamic knowledge to simplify SCEV expressions and converts them to a
598   /// more usable form.
599   PredicatedScalarEvolution &PSE;
600 
601   /// Loop Info.
602   LoopInfo *LI;
603 
604   /// Dominator Tree.
605   DominatorTree *DT;
606 
607   /// Alias Analysis.
608   AliasAnalysis *AA;
609 
610   /// Target Library Info.
611   const TargetLibraryInfo *TLI;
612 
613   /// Target Transform Info.
614   const TargetTransformInfo *TTI;
615 
616   /// Assumption Cache.
617   AssumptionCache *AC;
618 
619   /// Interface to emit optimization remarks.
620   OptimizationRemarkEmitter *ORE;
621 
622   /// \brief LoopVersioning.  It's only set up (non-null) if memchecks were
623   /// used.
624   ///
625   /// This is currently only used to add no-alias metadata based on the
626   /// memchecks.  The actually versioning is performed manually.
627   std::unique_ptr<LoopVersioning> LVer;
628 
629   /// The vectorization SIMD factor to use. Each vector will have this many
630   /// vector elements.
631   unsigned VF;
632 
633   /// The vectorization unroll factor to use. Each scalar is vectorized to this
634   /// many different vector instructions.
635   unsigned UF;
636 
637   /// The builder that we use
638   IRBuilder<> Builder;
639 
640   // --- Vectorization state ---
641 
642   /// The vector-loop preheader.
643   BasicBlock *LoopVectorPreHeader;
644 
645   /// The scalar-loop preheader.
646   BasicBlock *LoopScalarPreHeader;
647 
648   /// Middle Block between the vector and the scalar.
649   BasicBlock *LoopMiddleBlock;
650 
651   /// The ExitBlock of the scalar loop.
652   BasicBlock *LoopExitBlock;
653 
654   /// The vector loop body.
655   BasicBlock *LoopVectorBody;
656 
657   /// The scalar loop body.
658   BasicBlock *LoopScalarBody;
659 
660   /// A list of all bypass blocks. The first block is the entry of the loop.
661   SmallVector<BasicBlock *, 4> LoopBypassBlocks;
662 
663   /// The new Induction variable which was added to the new block.
664   PHINode *Induction = nullptr;
665 
666   /// The induction variable of the old basic block.
667   PHINode *OldInduction = nullptr;
668 
669   /// Maps values from the original loop to their corresponding values in the
670   /// vectorized loop. A key value can map to either vector values, scalar
671   /// values or both kinds of values, depending on whether the key was
672   /// vectorized and scalarized.
673   VectorizerValueMap VectorLoopValueMap;
674 
675   /// Store instructions that were predicated.
676   SmallVector<Instruction *, 4> PredicatedInstructions;
677 
678   /// Trip count of the original loop.
679   Value *TripCount = nullptr;
680 
681   /// Trip count of the widened loop (TripCount - TripCount % (VF*UF))
682   Value *VectorTripCount = nullptr;
683 
684   /// The legality analysis.
685   LoopVectorizationLegality *Legal;
686 
687   /// The profitablity analysis.
688   LoopVectorizationCostModel *Cost;
689 
690   // Record whether runtime checks are added.
691   bool AddedSafetyChecks = false;
692 
693   // Holds the end values for each induction variable. We save the end values
694   // so we can later fix-up the external users of the induction variables.
695   DenseMap<PHINode *, Value *> IVEndValues;
696 };
697 
698 class InnerLoopUnroller : public InnerLoopVectorizer {
699 public:
700   InnerLoopUnroller(Loop *OrigLoop, PredicatedScalarEvolution &PSE,
701                     LoopInfo *LI, DominatorTree *DT,
702                     const TargetLibraryInfo *TLI,
703                     const TargetTransformInfo *TTI, AssumptionCache *AC,
704                     OptimizationRemarkEmitter *ORE, unsigned UnrollFactor,
705                     LoopVectorizationLegality *LVL,
706                     LoopVectorizationCostModel *CM)
707       : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 1,
708                             UnrollFactor, LVL, CM) {}
709 
710 private:
711   Value *getBroadcastInstrs(Value *V) override;
712   Value *getStepVector(Value *Val, int StartIdx, Value *Step,
713                        Instruction::BinaryOps Opcode =
714                        Instruction::BinaryOpsEnd) override;
715   Value *reverseVector(Value *Vec) override;
716 };
717 
718 } // end namespace llvm
719 
720 /// \brief Look for a meaningful debug location on the instruction or it's
721 /// operands.
722 static Instruction *getDebugLocFromInstOrOperands(Instruction *I) {
723   if (!I)
724     return I;
725 
726   DebugLoc Empty;
727   if (I->getDebugLoc() != Empty)
728     return I;
729 
730   for (User::op_iterator OI = I->op_begin(), OE = I->op_end(); OI != OE; ++OI) {
731     if (Instruction *OpInst = dyn_cast<Instruction>(*OI))
732       if (OpInst->getDebugLoc() != Empty)
733         return OpInst;
734   }
735 
736   return I;
737 }
738 
739 void InnerLoopVectorizer::setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr) {
740   if (const Instruction *Inst = dyn_cast_or_null<Instruction>(Ptr)) {
741     const DILocation *DIL = Inst->getDebugLoc();
742     if (DIL && Inst->getFunction()->isDebugInfoForProfiling() &&
743         !isa<DbgInfoIntrinsic>(Inst))
744       B.SetCurrentDebugLocation(DIL->cloneWithDuplicationFactor(UF * VF));
745     else
746       B.SetCurrentDebugLocation(DIL);
747   } else
748     B.SetCurrentDebugLocation(DebugLoc());
749 }
750 
751 #ifndef NDEBUG
752 /// \return string containing a file name and a line # for the given loop.
753 static std::string getDebugLocString(const Loop *L) {
754   std::string Result;
755   if (L) {
756     raw_string_ostream OS(Result);
757     if (const DebugLoc LoopDbgLoc = L->getStartLoc())
758       LoopDbgLoc.print(OS);
759     else
760       // Just print the module name.
761       OS << L->getHeader()->getParent()->getParent()->getModuleIdentifier();
762     OS.flush();
763   }
764   return Result;
765 }
766 #endif
767 
768 void InnerLoopVectorizer::addNewMetadata(Instruction *To,
769                                          const Instruction *Orig) {
770   // If the loop was versioned with memchecks, add the corresponding no-alias
771   // metadata.
772   if (LVer && (isa<LoadInst>(Orig) || isa<StoreInst>(Orig)))
773     LVer->annotateInstWithNoAlias(To, Orig);
774 }
775 
776 void InnerLoopVectorizer::addMetadata(Instruction *To,
777                                       Instruction *From) {
778   propagateMetadata(To, From);
779   addNewMetadata(To, From);
780 }
781 
782 void InnerLoopVectorizer::addMetadata(ArrayRef<Value *> To,
783                                       Instruction *From) {
784   for (Value *V : To) {
785     if (Instruction *I = dyn_cast<Instruction>(V))
786       addMetadata(I, From);
787   }
788 }
789 
790 namespace llvm {
791 
792 /// \brief The group of interleaved loads/stores sharing the same stride and
793 /// close to each other.
794 ///
795 /// Each member in this group has an index starting from 0, and the largest
796 /// index should be less than interleaved factor, which is equal to the absolute
797 /// value of the access's stride.
798 ///
799 /// E.g. An interleaved load group of factor 4:
800 ///        for (unsigned i = 0; i < 1024; i+=4) {
801 ///          a = A[i];                           // Member of index 0
802 ///          b = A[i+1];                         // Member of index 1
803 ///          d = A[i+3];                         // Member of index 3
804 ///          ...
805 ///        }
806 ///
807 ///      An interleaved store group of factor 4:
808 ///        for (unsigned i = 0; i < 1024; i+=4) {
809 ///          ...
810 ///          A[i]   = a;                         // Member of index 0
811 ///          A[i+1] = b;                         // Member of index 1
812 ///          A[i+2] = c;                         // Member of index 2
813 ///          A[i+3] = d;                         // Member of index 3
814 ///        }
815 ///
816 /// Note: the interleaved load group could have gaps (missing members), but
817 /// the interleaved store group doesn't allow gaps.
818 class InterleaveGroup {
819 public:
820   InterleaveGroup(Instruction *Instr, int Stride, unsigned Align)
821       : Align(Align), InsertPos(Instr) {
822     assert(Align && "The alignment should be non-zero");
823 
824     Factor = std::abs(Stride);
825     assert(Factor > 1 && "Invalid interleave factor");
826 
827     Reverse = Stride < 0;
828     Members[0] = Instr;
829   }
830 
831   bool isReverse() const { return Reverse; }
832   unsigned getFactor() const { return Factor; }
833   unsigned getAlignment() const { return Align; }
834   unsigned getNumMembers() const { return Members.size(); }
835 
836   /// \brief Try to insert a new member \p Instr with index \p Index and
837   /// alignment \p NewAlign. The index is related to the leader and it could be
838   /// negative if it is the new leader.
839   ///
840   /// \returns false if the instruction doesn't belong to the group.
841   bool insertMember(Instruction *Instr, int Index, unsigned NewAlign) {
842     assert(NewAlign && "The new member's alignment should be non-zero");
843 
844     int Key = Index + SmallestKey;
845 
846     // Skip if there is already a member with the same index.
847     if (Members.count(Key))
848       return false;
849 
850     if (Key > LargestKey) {
851       // The largest index is always less than the interleave factor.
852       if (Index >= static_cast<int>(Factor))
853         return false;
854 
855       LargestKey = Key;
856     } else if (Key < SmallestKey) {
857       // The largest index is always less than the interleave factor.
858       if (LargestKey - Key >= static_cast<int>(Factor))
859         return false;
860 
861       SmallestKey = Key;
862     }
863 
864     // It's always safe to select the minimum alignment.
865     Align = std::min(Align, NewAlign);
866     Members[Key] = Instr;
867     return true;
868   }
869 
870   /// \brief Get the member with the given index \p Index
871   ///
872   /// \returns nullptr if contains no such member.
873   Instruction *getMember(unsigned Index) const {
874     int Key = SmallestKey + Index;
875     if (!Members.count(Key))
876       return nullptr;
877 
878     return Members.find(Key)->second;
879   }
880 
881   /// \brief Get the index for the given member. Unlike the key in the member
882   /// map, the index starts from 0.
883   unsigned getIndex(Instruction *Instr) const {
884     for (auto I : Members)
885       if (I.second == Instr)
886         return I.first - SmallestKey;
887 
888     llvm_unreachable("InterleaveGroup contains no such member");
889   }
890 
891   Instruction *getInsertPos() const { return InsertPos; }
892   void setInsertPos(Instruction *Inst) { InsertPos = Inst; }
893 
894   /// Add metadata (e.g. alias info) from the instructions in this group to \p
895   /// NewInst.
896   ///
897   /// FIXME: this function currently does not add noalias metadata a'la
898   /// addNewMedata.  To do that we need to compute the intersection of the
899   /// noalias info from all members.
900   void addMetadata(Instruction *NewInst) const {
901     SmallVector<Value *, 4> VL;
902     std::transform(Members.begin(), Members.end(), std::back_inserter(VL),
903                    [](std::pair<int, Instruction *> p) { return p.second; });
904     propagateMetadata(NewInst, VL);
905   }
906 
907 private:
908   unsigned Factor; // Interleave Factor.
909   bool Reverse;
910   unsigned Align;
911   DenseMap<int, Instruction *> Members;
912   int SmallestKey = 0;
913   int LargestKey = 0;
914 
915   // To avoid breaking dependences, vectorized instructions of an interleave
916   // group should be inserted at either the first load or the last store in
917   // program order.
918   //
919   // E.g. %even = load i32             // Insert Position
920   //      %add = add i32 %even         // Use of %even
921   //      %odd = load i32
922   //
923   //      store i32 %even
924   //      %odd = add i32               // Def of %odd
925   //      store i32 %odd               // Insert Position
926   Instruction *InsertPos;
927 };
928 } // end namespace llvm
929 
930 namespace {
931 
932 /// \brief Drive the analysis of interleaved memory accesses in the loop.
933 ///
934 /// Use this class to analyze interleaved accesses only when we can vectorize
935 /// a loop. Otherwise it's meaningless to do analysis as the vectorization
936 /// on interleaved accesses is unsafe.
937 ///
938 /// The analysis collects interleave groups and records the relationships
939 /// between the member and the group in a map.
940 class InterleavedAccessInfo {
941 public:
942   InterleavedAccessInfo(PredicatedScalarEvolution &PSE, Loop *L,
943                         DominatorTree *DT, LoopInfo *LI,
944                         const LoopAccessInfo *LAI)
945     : PSE(PSE), TheLoop(L), DT(DT), LI(LI), LAI(LAI) {}
946 
947   ~InterleavedAccessInfo() {
948     SmallSet<InterleaveGroup *, 4> DelSet;
949     // Avoid releasing a pointer twice.
950     for (auto &I : InterleaveGroupMap)
951       DelSet.insert(I.second);
952     for (auto *Ptr : DelSet)
953       delete Ptr;
954   }
955 
956   /// \brief Analyze the interleaved accesses and collect them in interleave
957   /// groups. Substitute symbolic strides using \p Strides.
958   void analyzeInterleaving();
959 
960   /// \brief Check if \p Instr belongs to any interleave group.
961   bool isInterleaved(Instruction *Instr) const {
962     return InterleaveGroupMap.count(Instr);
963   }
964 
965   /// \brief Get the interleave group that \p Instr belongs to.
966   ///
967   /// \returns nullptr if doesn't have such group.
968   InterleaveGroup *getInterleaveGroup(Instruction *Instr) const {
969     if (InterleaveGroupMap.count(Instr))
970       return InterleaveGroupMap.find(Instr)->second;
971     return nullptr;
972   }
973 
974   /// \brief Returns true if an interleaved group that may access memory
975   /// out-of-bounds requires a scalar epilogue iteration for correctness.
976   bool requiresScalarEpilogue() const { return RequiresScalarEpilogue; }
977 
978 private:
979   /// A wrapper around ScalarEvolution, used to add runtime SCEV checks.
980   /// Simplifies SCEV expressions in the context of existing SCEV assumptions.
981   /// The interleaved access analysis can also add new predicates (for example
982   /// by versioning strides of pointers).
983   PredicatedScalarEvolution &PSE;
984 
985   Loop *TheLoop;
986   DominatorTree *DT;
987   LoopInfo *LI;
988   const LoopAccessInfo *LAI;
989 
990   /// True if the loop may contain non-reversed interleaved groups with
991   /// out-of-bounds accesses. We ensure we don't speculatively access memory
992   /// out-of-bounds by executing at least one scalar epilogue iteration.
993   bool RequiresScalarEpilogue = false;
994 
995   /// Holds the relationships between the members and the interleave group.
996   DenseMap<Instruction *, InterleaveGroup *> InterleaveGroupMap;
997 
998   /// Holds dependences among the memory accesses in the loop. It maps a source
999   /// access to a set of dependent sink accesses.
1000   DenseMap<Instruction *, SmallPtrSet<Instruction *, 2>> Dependences;
1001 
1002   /// \brief The descriptor for a strided memory access.
1003   struct StrideDescriptor {
1004     StrideDescriptor() = default;
1005     StrideDescriptor(int64_t Stride, const SCEV *Scev, uint64_t Size,
1006                      unsigned Align)
1007         : Stride(Stride), Scev(Scev), Size(Size), Align(Align) {}
1008 
1009     // The access's stride. It is negative for a reverse access.
1010     int64_t Stride = 0;
1011 
1012     // The scalar expression of this access.
1013     const SCEV *Scev = nullptr;
1014 
1015     // The size of the memory object.
1016     uint64_t Size = 0;
1017 
1018     // The alignment of this access.
1019     unsigned Align = 0;
1020   };
1021 
1022   /// \brief A type for holding instructions and their stride descriptors.
1023   using StrideEntry = std::pair<Instruction *, StrideDescriptor>;
1024 
1025   /// \brief Create a new interleave group with the given instruction \p Instr,
1026   /// stride \p Stride and alignment \p Align.
1027   ///
1028   /// \returns the newly created interleave group.
1029   InterleaveGroup *createInterleaveGroup(Instruction *Instr, int Stride,
1030                                          unsigned Align) {
1031     assert(!InterleaveGroupMap.count(Instr) &&
1032            "Already in an interleaved access group");
1033     InterleaveGroupMap[Instr] = new InterleaveGroup(Instr, Stride, Align);
1034     return InterleaveGroupMap[Instr];
1035   }
1036 
1037   /// \brief Release the group and remove all the relationships.
1038   void releaseGroup(InterleaveGroup *Group) {
1039     for (unsigned i = 0; i < Group->getFactor(); i++)
1040       if (Instruction *Member = Group->getMember(i))
1041         InterleaveGroupMap.erase(Member);
1042 
1043     delete Group;
1044   }
1045 
1046   /// \brief Collect all the accesses with a constant stride in program order.
1047   void collectConstStrideAccesses(
1048       MapVector<Instruction *, StrideDescriptor> &AccessStrideInfo,
1049       const ValueToValueMap &Strides);
1050 
1051   /// \brief Returns true if \p Stride is allowed in an interleaved group.
1052   static bool isStrided(int Stride) {
1053     unsigned Factor = std::abs(Stride);
1054     return Factor >= 2 && Factor <= MaxInterleaveGroupFactor;
1055   }
1056 
1057   /// \brief Returns true if \p BB is a predicated block.
1058   bool isPredicated(BasicBlock *BB) const {
1059     return LoopAccessInfo::blockNeedsPredication(BB, TheLoop, DT);
1060   }
1061 
1062   /// \brief Returns true if LoopAccessInfo can be used for dependence queries.
1063   bool areDependencesValid() const {
1064     return LAI && LAI->getDepChecker().getDependences();
1065   }
1066 
1067   /// \brief Returns true if memory accesses \p A and \p B can be reordered, if
1068   /// necessary, when constructing interleaved groups.
1069   ///
1070   /// \p A must precede \p B in program order. We return false if reordering is
1071   /// not necessary or is prevented because \p A and \p B may be dependent.
1072   bool canReorderMemAccessesForInterleavedGroups(StrideEntry *A,
1073                                                  StrideEntry *B) const {
1074     // Code motion for interleaved accesses can potentially hoist strided loads
1075     // and sink strided stores. The code below checks the legality of the
1076     // following two conditions:
1077     //
1078     // 1. Potentially moving a strided load (B) before any store (A) that
1079     //    precedes B, or
1080     //
1081     // 2. Potentially moving a strided store (A) after any load or store (B)
1082     //    that A precedes.
1083     //
1084     // It's legal to reorder A and B if we know there isn't a dependence from A
1085     // to B. Note that this determination is conservative since some
1086     // dependences could potentially be reordered safely.
1087 
1088     // A is potentially the source of a dependence.
1089     auto *Src = A->first;
1090     auto SrcDes = A->second;
1091 
1092     // B is potentially the sink of a dependence.
1093     auto *Sink = B->first;
1094     auto SinkDes = B->second;
1095 
1096     // Code motion for interleaved accesses can't violate WAR dependences.
1097     // Thus, reordering is legal if the source isn't a write.
1098     if (!Src->mayWriteToMemory())
1099       return true;
1100 
1101     // At least one of the accesses must be strided.
1102     if (!isStrided(SrcDes.Stride) && !isStrided(SinkDes.Stride))
1103       return true;
1104 
1105     // If dependence information is not available from LoopAccessInfo,
1106     // conservatively assume the instructions can't be reordered.
1107     if (!areDependencesValid())
1108       return false;
1109 
1110     // If we know there is a dependence from source to sink, assume the
1111     // instructions can't be reordered. Otherwise, reordering is legal.
1112     return !Dependences.count(Src) || !Dependences.lookup(Src).count(Sink);
1113   }
1114 
1115   /// \brief Collect the dependences from LoopAccessInfo.
1116   ///
1117   /// We process the dependences once during the interleaved access analysis to
1118   /// enable constant-time dependence queries.
1119   void collectDependences() {
1120     if (!areDependencesValid())
1121       return;
1122     auto *Deps = LAI->getDepChecker().getDependences();
1123     for (auto Dep : *Deps)
1124       Dependences[Dep.getSource(*LAI)].insert(Dep.getDestination(*LAI));
1125   }
1126 };
1127 
1128 } // end anonymous namespace
1129 
1130 static void emitMissedWarning(Function *F, Loop *L,
1131                               const LoopVectorizeHints &LH,
1132                               OptimizationRemarkEmitter *ORE) {
1133   LH.emitRemarkWithHints();
1134 
1135   if (LH.getForce() == LoopVectorizeHints::FK_Enabled) {
1136     if (LH.getWidth() != 1)
1137       ORE->emit(DiagnosticInfoOptimizationFailure(
1138                     DEBUG_TYPE, "FailedRequestedVectorization",
1139                     L->getStartLoc(), L->getHeader())
1140                 << "loop not vectorized: "
1141                 << "failed explicitly specified loop vectorization");
1142     else if (LH.getInterleave() != 1)
1143       ORE->emit(DiagnosticInfoOptimizationFailure(
1144                     DEBUG_TYPE, "FailedRequestedInterleaving", L->getStartLoc(),
1145                     L->getHeader())
1146                 << "loop not interleaved: "
1147                 << "failed explicitly specified loop interleaving");
1148   }
1149 }
1150 
1151 namespace llvm {
1152 
1153 /// LoopVectorizationCostModel - estimates the expected speedups due to
1154 /// vectorization.
1155 /// In many cases vectorization is not profitable. This can happen because of
1156 /// a number of reasons. In this class we mainly attempt to predict the
1157 /// expected speedup/slowdowns due to the supported instruction set. We use the
1158 /// TargetTransformInfo to query the different backends for the cost of
1159 /// different operations.
1160 class LoopVectorizationCostModel {
1161 public:
1162   LoopVectorizationCostModel(Loop *L, PredicatedScalarEvolution &PSE,
1163                              LoopInfo *LI, LoopVectorizationLegality *Legal,
1164                              const TargetTransformInfo &TTI,
1165                              const TargetLibraryInfo *TLI, DemandedBits *DB,
1166                              AssumptionCache *AC,
1167                              OptimizationRemarkEmitter *ORE, const Function *F,
1168                              const LoopVectorizeHints *Hints,
1169                              InterleavedAccessInfo &IAI)
1170       : TheLoop(L), PSE(PSE), LI(LI), Legal(Legal), TTI(TTI), TLI(TLI), DB(DB),
1171     AC(AC), ORE(ORE), TheFunction(F), Hints(Hints), InterleaveInfo(IAI) {}
1172 
1173   /// \return An upper bound for the vectorization factor, or None if
1174   /// vectorization should be avoided up front.
1175   Optional<unsigned> computeMaxVF(bool OptForSize);
1176 
1177   /// \return The most profitable vectorization factor and the cost of that VF.
1178   /// This method checks every power of two up to MaxVF. If UserVF is not ZERO
1179   /// then this vectorization factor will be selected if vectorization is
1180   /// possible.
1181   VectorizationFactor selectVectorizationFactor(unsigned MaxVF);
1182 
1183   /// Setup cost-based decisions for user vectorization factor.
1184   void selectUserVectorizationFactor(unsigned UserVF) {
1185     collectUniformsAndScalars(UserVF);
1186     collectInstsToScalarize(UserVF);
1187   }
1188 
1189   /// \return The size (in bits) of the smallest and widest types in the code
1190   /// that needs to be vectorized. We ignore values that remain scalar such as
1191   /// 64 bit loop indices.
1192   std::pair<unsigned, unsigned> getSmallestAndWidestTypes();
1193 
1194   /// \return The desired interleave count.
1195   /// If interleave count has been specified by metadata it will be returned.
1196   /// Otherwise, the interleave count is computed and returned. VF and LoopCost
1197   /// are the selected vectorization factor and the cost of the selected VF.
1198   unsigned selectInterleaveCount(bool OptForSize, unsigned VF,
1199                                  unsigned LoopCost);
1200 
1201   /// Memory access instruction may be vectorized in more than one way.
1202   /// Form of instruction after vectorization depends on cost.
1203   /// This function takes cost-based decisions for Load/Store instructions
1204   /// and collects them in a map. This decisions map is used for building
1205   /// the lists of loop-uniform and loop-scalar instructions.
1206   /// The calculated cost is saved with widening decision in order to
1207   /// avoid redundant calculations.
1208   void setCostBasedWideningDecision(unsigned VF);
1209 
1210   /// \brief A struct that represents some properties of the register usage
1211   /// of a loop.
1212   struct RegisterUsage {
1213     /// Holds the number of loop invariant values that are used in the loop.
1214     unsigned LoopInvariantRegs;
1215 
1216     /// Holds the maximum number of concurrent live intervals in the loop.
1217     unsigned MaxLocalUsers;
1218   };
1219 
1220   /// \return Returns information about the register usages of the loop for the
1221   /// given vectorization factors.
1222   SmallVector<RegisterUsage, 8> calculateRegisterUsage(ArrayRef<unsigned> VFs);
1223 
1224   /// Collect values we want to ignore in the cost model.
1225   void collectValuesToIgnore();
1226 
1227   /// \returns The smallest bitwidth each instruction can be represented with.
1228   /// The vector equivalents of these instructions should be truncated to this
1229   /// type.
1230   const MapVector<Instruction *, uint64_t> &getMinimalBitwidths() const {
1231     return MinBWs;
1232   }
1233 
1234   /// \returns True if it is more profitable to scalarize instruction \p I for
1235   /// vectorization factor \p VF.
1236   bool isProfitableToScalarize(Instruction *I, unsigned VF) const {
1237     assert(VF > 1 && "Profitable to scalarize relevant only for VF > 1.");
1238     auto Scalars = InstsToScalarize.find(VF);
1239     assert(Scalars != InstsToScalarize.end() &&
1240            "VF not yet analyzed for scalarization profitability");
1241     return Scalars->second.count(I);
1242   }
1243 
1244   /// Returns true if \p I is known to be uniform after vectorization.
1245   bool isUniformAfterVectorization(Instruction *I, unsigned VF) const {
1246     if (VF == 1)
1247       return true;
1248     assert(Uniforms.count(VF) && "VF not yet analyzed for uniformity");
1249     auto UniformsPerVF = Uniforms.find(VF);
1250     return UniformsPerVF->second.count(I);
1251   }
1252 
1253   /// Returns true if \p I is known to be scalar after vectorization.
1254   bool isScalarAfterVectorization(Instruction *I, unsigned VF) const {
1255     if (VF == 1)
1256       return true;
1257     assert(Scalars.count(VF) && "Scalar values are not calculated for VF");
1258     auto ScalarsPerVF = Scalars.find(VF);
1259     return ScalarsPerVF->second.count(I);
1260   }
1261 
1262   /// \returns True if instruction \p I can be truncated to a smaller bitwidth
1263   /// for vectorization factor \p VF.
1264   bool canTruncateToMinimalBitwidth(Instruction *I, unsigned VF) const {
1265     return VF > 1 && MinBWs.count(I) && !isProfitableToScalarize(I, VF) &&
1266            !isScalarAfterVectorization(I, VF);
1267   }
1268 
1269   /// Decision that was taken during cost calculation for memory instruction.
1270   enum InstWidening {
1271     CM_Unknown,
1272     CM_Widen,         // For consecutive accesses with stride +1.
1273     CM_Widen_Reverse, // For consecutive accesses with stride -1.
1274     CM_Interleave,
1275     CM_GatherScatter,
1276     CM_Scalarize
1277   };
1278 
1279   /// Save vectorization decision \p W and \p Cost taken by the cost model for
1280   /// instruction \p I and vector width \p VF.
1281   void setWideningDecision(Instruction *I, unsigned VF, InstWidening W,
1282                            unsigned Cost) {
1283     assert(VF >= 2 && "Expected VF >=2");
1284     WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost);
1285   }
1286 
1287   /// Save vectorization decision \p W and \p Cost taken by the cost model for
1288   /// interleaving group \p Grp and vector width \p VF.
1289   void setWideningDecision(const InterleaveGroup *Grp, unsigned VF,
1290                            InstWidening W, unsigned Cost) {
1291     assert(VF >= 2 && "Expected VF >=2");
1292     /// Broadcast this decicion to all instructions inside the group.
1293     /// But the cost will be assigned to one instruction only.
1294     for (unsigned i = 0; i < Grp->getFactor(); ++i) {
1295       if (auto *I = Grp->getMember(i)) {
1296         if (Grp->getInsertPos() == I)
1297           WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost);
1298         else
1299           WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, 0);
1300       }
1301     }
1302   }
1303 
1304   /// Return the cost model decision for the given instruction \p I and vector
1305   /// width \p VF. Return CM_Unknown if this instruction did not pass
1306   /// through the cost modeling.
1307   InstWidening getWideningDecision(Instruction *I, unsigned VF) {
1308     assert(VF >= 2 && "Expected VF >=2");
1309     std::pair<Instruction *, unsigned> InstOnVF = std::make_pair(I, VF);
1310     auto Itr = WideningDecisions.find(InstOnVF);
1311     if (Itr == WideningDecisions.end())
1312       return CM_Unknown;
1313     return Itr->second.first;
1314   }
1315 
1316   /// Return the vectorization cost for the given instruction \p I and vector
1317   /// width \p VF.
1318   unsigned getWideningCost(Instruction *I, unsigned VF) {
1319     assert(VF >= 2 && "Expected VF >=2");
1320     std::pair<Instruction *, unsigned> InstOnVF = std::make_pair(I, VF);
1321     assert(WideningDecisions.count(InstOnVF) && "The cost is not calculated");
1322     return WideningDecisions[InstOnVF].second;
1323   }
1324 
1325   /// Return True if instruction \p I is an optimizable truncate whose operand
1326   /// is an induction variable. Such a truncate will be removed by adding a new
1327   /// induction variable with the destination type.
1328   bool isOptimizableIVTruncate(Instruction *I, unsigned VF) {
1329     // If the instruction is not a truncate, return false.
1330     auto *Trunc = dyn_cast<TruncInst>(I);
1331     if (!Trunc)
1332       return false;
1333 
1334     // Get the source and destination types of the truncate.
1335     Type *SrcTy = ToVectorTy(cast<CastInst>(I)->getSrcTy(), VF);
1336     Type *DestTy = ToVectorTy(cast<CastInst>(I)->getDestTy(), VF);
1337 
1338     // If the truncate is free for the given types, return false. Replacing a
1339     // free truncate with an induction variable would add an induction variable
1340     // update instruction to each iteration of the loop. We exclude from this
1341     // check the primary induction variable since it will need an update
1342     // instruction regardless.
1343     Value *Op = Trunc->getOperand(0);
1344     if (Op != Legal->getPrimaryInduction() && TTI.isTruncateFree(SrcTy, DestTy))
1345       return false;
1346 
1347     // If the truncated value is not an induction variable, return false.
1348     return Legal->isInductionPhi(Op);
1349   }
1350 
1351   /// Collects the instructions to scalarize for each predicated instruction in
1352   /// the loop.
1353   void collectInstsToScalarize(unsigned VF);
1354 
1355   /// Collect Uniform and Scalar values for the given \p VF.
1356   /// The sets depend on CM decision for Load/Store instructions
1357   /// that may be vectorized as interleave, gather-scatter or scalarized.
1358   void collectUniformsAndScalars(unsigned VF) {
1359     // Do the analysis once.
1360     if (VF == 1 || Uniforms.count(VF))
1361       return;
1362     setCostBasedWideningDecision(VF);
1363     collectLoopUniforms(VF);
1364     collectLoopScalars(VF);
1365   }
1366 
1367   /// Returns true if the target machine supports masked store operation
1368   /// for the given \p DataType and kind of access to \p Ptr.
1369   bool isLegalMaskedStore(Type *DataType, Value *Ptr) {
1370     return Legal->isConsecutivePtr(Ptr) && TTI.isLegalMaskedStore(DataType);
1371   }
1372 
1373   /// Returns true if the target machine supports masked load operation
1374   /// for the given \p DataType and kind of access to \p Ptr.
1375   bool isLegalMaskedLoad(Type *DataType, Value *Ptr) {
1376     return Legal->isConsecutivePtr(Ptr) && TTI.isLegalMaskedLoad(DataType);
1377   }
1378 
1379   /// Returns true if the target machine supports masked scatter operation
1380   /// for the given \p DataType.
1381   bool isLegalMaskedScatter(Type *DataType) {
1382     return TTI.isLegalMaskedScatter(DataType);
1383   }
1384 
1385   /// Returns true if the target machine supports masked gather operation
1386   /// for the given \p DataType.
1387   bool isLegalMaskedGather(Type *DataType) {
1388     return TTI.isLegalMaskedGather(DataType);
1389   }
1390 
1391   /// Returns true if the target machine can represent \p V as a masked gather
1392   /// or scatter operation.
1393   bool isLegalGatherOrScatter(Value *V) {
1394     bool LI = isa<LoadInst>(V);
1395     bool SI = isa<StoreInst>(V);
1396     if (!LI && !SI)
1397       return false;
1398     auto *Ty = getMemInstValueType(V);
1399     return (LI && isLegalMaskedGather(Ty)) || (SI && isLegalMaskedScatter(Ty));
1400   }
1401 
1402   /// Returns true if \p I is an instruction that will be scalarized with
1403   /// predication. Such instructions include conditional stores and
1404   /// instructions that may divide by zero.
1405   bool isScalarWithPredication(Instruction *I);
1406 
1407   /// Returns true if \p I is a memory instruction with consecutive memory
1408   /// access that can be widened.
1409   bool memoryInstructionCanBeWidened(Instruction *I, unsigned VF = 1);
1410 
1411   /// \brief Check if \p Instr belongs to any interleaved access group.
1412   bool isAccessInterleaved(Instruction *Instr) {
1413     return InterleaveInfo.isInterleaved(Instr);
1414   }
1415 
1416   /// \brief Get the interleaved access group that \p Instr belongs to.
1417   const InterleaveGroup *getInterleavedAccessGroup(Instruction *Instr) {
1418     return InterleaveInfo.getInterleaveGroup(Instr);
1419   }
1420 
1421   /// \brief Returns true if an interleaved group requires a scalar iteration
1422   /// to handle accesses with gaps.
1423   bool requiresScalarEpilogue() const {
1424     return InterleaveInfo.requiresScalarEpilogue();
1425   }
1426 
1427 private:
1428   unsigned NumPredStores = 0;
1429 
1430   /// \return An upper bound for the vectorization factor, larger than zero.
1431   /// One is returned if vectorization should best be avoided due to cost.
1432   unsigned computeFeasibleMaxVF(bool OptForSize, unsigned ConstTripCount);
1433 
1434   /// The vectorization cost is a combination of the cost itself and a boolean
1435   /// indicating whether any of the contributing operations will actually
1436   /// operate on
1437   /// vector values after type legalization in the backend. If this latter value
1438   /// is
1439   /// false, then all operations will be scalarized (i.e. no vectorization has
1440   /// actually taken place).
1441   using VectorizationCostTy = std::pair<unsigned, bool>;
1442 
1443   /// Returns the expected execution cost. The unit of the cost does
1444   /// not matter because we use the 'cost' units to compare different
1445   /// vector widths. The cost that is returned is *not* normalized by
1446   /// the factor width.
1447   VectorizationCostTy expectedCost(unsigned VF);
1448 
1449   /// Returns the execution time cost of an instruction for a given vector
1450   /// width. Vector width of one means scalar.
1451   VectorizationCostTy getInstructionCost(Instruction *I, unsigned VF);
1452 
1453   /// The cost-computation logic from getInstructionCost which provides
1454   /// the vector type as an output parameter.
1455   unsigned getInstructionCost(Instruction *I, unsigned VF, Type *&VectorTy);
1456 
1457   /// Calculate vectorization cost of memory instruction \p I.
1458   unsigned getMemoryInstructionCost(Instruction *I, unsigned VF);
1459 
1460   /// The cost computation for scalarized memory instruction.
1461   unsigned getMemInstScalarizationCost(Instruction *I, unsigned VF);
1462 
1463   /// The cost computation for interleaving group of memory instructions.
1464   unsigned getInterleaveGroupCost(Instruction *I, unsigned VF);
1465 
1466   /// The cost computation for Gather/Scatter instruction.
1467   unsigned getGatherScatterCost(Instruction *I, unsigned VF);
1468 
1469   /// The cost computation for widening instruction \p I with consecutive
1470   /// memory access.
1471   unsigned getConsecutiveMemOpCost(Instruction *I, unsigned VF);
1472 
1473   /// The cost calculation for Load instruction \p I with uniform pointer -
1474   /// scalar load + broadcast.
1475   unsigned getUniformMemOpCost(Instruction *I, unsigned VF);
1476 
1477   /// Returns whether the instruction is a load or store and will be a emitted
1478   /// as a vector operation.
1479   bool isConsecutiveLoadOrStore(Instruction *I);
1480 
1481   /// Returns true if an artificially high cost for emulated masked memrefs
1482   /// should be used.
1483   bool useEmulatedMaskMemRefHack(Instruction *I);
1484 
1485   /// Create an analysis remark that explains why vectorization failed
1486   ///
1487   /// \p RemarkName is the identifier for the remark.  \return the remark object
1488   /// that can be streamed to.
1489   OptimizationRemarkAnalysis createMissedAnalysis(StringRef RemarkName) {
1490     return createLVMissedAnalysis(Hints->vectorizeAnalysisPassName(),
1491                                   RemarkName, TheLoop);
1492   }
1493 
1494   /// Map of scalar integer values to the smallest bitwidth they can be legally
1495   /// represented as. The vector equivalents of these values should be truncated
1496   /// to this type.
1497   MapVector<Instruction *, uint64_t> MinBWs;
1498 
1499   /// A type representing the costs for instructions if they were to be
1500   /// scalarized rather than vectorized. The entries are Instruction-Cost
1501   /// pairs.
1502   using ScalarCostsTy = DenseMap<Instruction *, unsigned>;
1503 
1504   /// A set containing all BasicBlocks that are known to present after
1505   /// vectorization as a predicated block.
1506   SmallPtrSet<BasicBlock *, 4> PredicatedBBsAfterVectorization;
1507 
1508   /// A map holding scalar costs for different vectorization factors. The
1509   /// presence of a cost for an instruction in the mapping indicates that the
1510   /// instruction will be scalarized when vectorizing with the associated
1511   /// vectorization factor. The entries are VF-ScalarCostTy pairs.
1512   DenseMap<unsigned, ScalarCostsTy> InstsToScalarize;
1513 
1514   /// Holds the instructions known to be uniform after vectorization.
1515   /// The data is collected per VF.
1516   DenseMap<unsigned, SmallPtrSet<Instruction *, 4>> Uniforms;
1517 
1518   /// Holds the instructions known to be scalar after vectorization.
1519   /// The data is collected per VF.
1520   DenseMap<unsigned, SmallPtrSet<Instruction *, 4>> Scalars;
1521 
1522   /// Holds the instructions (address computations) that are forced to be
1523   /// scalarized.
1524   DenseMap<unsigned, SmallPtrSet<Instruction *, 4>> ForcedScalars;
1525 
1526   /// Returns the expected difference in cost from scalarizing the expression
1527   /// feeding a predicated instruction \p PredInst. The instructions to
1528   /// scalarize and their scalar costs are collected in \p ScalarCosts. A
1529   /// non-negative return value implies the expression will be scalarized.
1530   /// Currently, only single-use chains are considered for scalarization.
1531   int computePredInstDiscount(Instruction *PredInst, ScalarCostsTy &ScalarCosts,
1532                               unsigned VF);
1533 
1534   /// Collect the instructions that are uniform after vectorization. An
1535   /// instruction is uniform if we represent it with a single scalar value in
1536   /// the vectorized loop corresponding to each vector iteration. Examples of
1537   /// uniform instructions include pointer operands of consecutive or
1538   /// interleaved memory accesses. Note that although uniformity implies an
1539   /// instruction will be scalar, the reverse is not true. In general, a
1540   /// scalarized instruction will be represented by VF scalar values in the
1541   /// vectorized loop, each corresponding to an iteration of the original
1542   /// scalar loop.
1543   void collectLoopUniforms(unsigned VF);
1544 
1545   /// Collect the instructions that are scalar after vectorization. An
1546   /// instruction is scalar if it is known to be uniform or will be scalarized
1547   /// during vectorization. Non-uniform scalarized instructions will be
1548   /// represented by VF values in the vectorized loop, each corresponding to an
1549   /// iteration of the original scalar loop.
1550   void collectLoopScalars(unsigned VF);
1551 
1552   /// Keeps cost model vectorization decision and cost for instructions.
1553   /// Right now it is used for memory instructions only.
1554   using DecisionList = DenseMap<std::pair<Instruction *, unsigned>,
1555                                 std::pair<InstWidening, unsigned>>;
1556 
1557   DecisionList WideningDecisions;
1558 
1559 public:
1560   /// The loop that we evaluate.
1561   Loop *TheLoop;
1562 
1563   /// Predicated scalar evolution analysis.
1564   PredicatedScalarEvolution &PSE;
1565 
1566   /// Loop Info analysis.
1567   LoopInfo *LI;
1568 
1569   /// Vectorization legality.
1570   LoopVectorizationLegality *Legal;
1571 
1572   /// Vector target information.
1573   const TargetTransformInfo &TTI;
1574 
1575   /// Target Library Info.
1576   const TargetLibraryInfo *TLI;
1577 
1578   /// Demanded bits analysis.
1579   DemandedBits *DB;
1580 
1581   /// Assumption cache.
1582   AssumptionCache *AC;
1583 
1584   /// Interface to emit optimization remarks.
1585   OptimizationRemarkEmitter *ORE;
1586 
1587   const Function *TheFunction;
1588 
1589   /// Loop Vectorize Hint.
1590   const LoopVectorizeHints *Hints;
1591 
1592   /// The interleave access information contains groups of interleaved accesses
1593   /// with the same stride and close to each other.
1594   InterleavedAccessInfo &InterleaveInfo;
1595 
1596   /// Values to ignore in the cost model.
1597   SmallPtrSet<const Value *, 16> ValuesToIgnore;
1598 
1599   /// Values to ignore in the cost model when VF > 1.
1600   SmallPtrSet<const Value *, 16> VecValuesToIgnore;
1601 };
1602 
1603 } // end namespace llvm
1604 
1605 // Return true if \p OuterLp is an outer loop annotated with hints for explicit
1606 // vectorization. The loop needs to be annotated with #pragma omp simd
1607 // simdlen(#) or #pragma clang vectorize(enable) vectorize_width(#). If the
1608 // vector length information is not provided, vectorization is not considered
1609 // explicit. Interleave hints are not allowed either. These limitations will be
1610 // relaxed in the future.
1611 // Please, note that we are currently forced to abuse the pragma 'clang
1612 // vectorize' semantics. This pragma provides *auto-vectorization hints*
1613 // (i.e., LV must check that vectorization is legal) whereas pragma 'omp simd'
1614 // provides *explicit vectorization hints* (LV can bypass legal checks and
1615 // assume that vectorization is legal). However, both hints are implemented
1616 // using the same metadata (llvm.loop.vectorize, processed by
1617 // LoopVectorizeHints). This will be fixed in the future when the native IR
1618 // representation for pragma 'omp simd' is introduced.
1619 static bool isExplicitVecOuterLoop(Loop *OuterLp,
1620                                    OptimizationRemarkEmitter *ORE) {
1621   assert(!OuterLp->empty() && "This is not an outer loop");
1622   LoopVectorizeHints Hints(OuterLp, true /*DisableInterleaving*/, *ORE);
1623 
1624   // Only outer loops with an explicit vectorization hint are supported.
1625   // Unannotated outer loops are ignored.
1626   if (Hints.getForce() == LoopVectorizeHints::FK_Undefined)
1627     return false;
1628 
1629   Function *Fn = OuterLp->getHeader()->getParent();
1630   if (!Hints.allowVectorization(Fn, OuterLp, false /*AlwaysVectorize*/)) {
1631     DEBUG(dbgs() << "LV: Loop hints prevent outer loop vectorization.\n");
1632     return false;
1633   }
1634 
1635   if (!Hints.getWidth()) {
1636     DEBUG(dbgs() << "LV: Not vectorizing: No user vector width.\n");
1637     emitMissedWarning(Fn, OuterLp, Hints, ORE);
1638     return false;
1639   }
1640 
1641   if (Hints.getInterleave() > 1) {
1642     // TODO: Interleave support is future work.
1643     DEBUG(dbgs() << "LV: Not vectorizing: Interleave is not supported for "
1644                     "outer loops.\n");
1645     emitMissedWarning(Fn, OuterLp, Hints, ORE);
1646     return false;
1647   }
1648 
1649   return true;
1650 }
1651 
1652 static void collectSupportedLoops(Loop &L, LoopInfo *LI,
1653                                   OptimizationRemarkEmitter *ORE,
1654                                   SmallVectorImpl<Loop *> &V) {
1655   // Collect inner loops and outer loops without irreducible control flow. For
1656   // now, only collect outer loops that have explicit vectorization hints.
1657   if (L.empty() || (EnableVPlanNativePath && isExplicitVecOuterLoop(&L, ORE))) {
1658     LoopBlocksRPO RPOT(&L);
1659     RPOT.perform(LI);
1660     if (!containsIrreducibleCFG<const BasicBlock *>(RPOT, *LI)) {
1661       V.push_back(&L);
1662       // TODO: Collect inner loops inside marked outer loops in case
1663       // vectorization fails for the outer loop. Do not invoke
1664       // 'containsIrreducibleCFG' again for inner loops when the outer loop is
1665       // already known to be reducible. We can use an inherited attribute for
1666       // that.
1667       return;
1668     }
1669   }
1670   for (Loop *InnerL : L)
1671     collectSupportedLoops(*InnerL, LI, ORE, V);
1672 }
1673 
1674 namespace {
1675 
1676 /// The LoopVectorize Pass.
1677 struct LoopVectorize : public FunctionPass {
1678   /// Pass identification, replacement for typeid
1679   static char ID;
1680 
1681   LoopVectorizePass Impl;
1682 
1683   explicit LoopVectorize(bool NoUnrolling = false, bool AlwaysVectorize = true)
1684       : FunctionPass(ID) {
1685     Impl.DisableUnrolling = NoUnrolling;
1686     Impl.AlwaysVectorize = AlwaysVectorize;
1687     initializeLoopVectorizePass(*PassRegistry::getPassRegistry());
1688   }
1689 
1690   bool runOnFunction(Function &F) override {
1691     if (skipFunction(F))
1692       return false;
1693 
1694     auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
1695     auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
1696     auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
1697     auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
1698     auto *BFI = &getAnalysis<BlockFrequencyInfoWrapperPass>().getBFI();
1699     auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
1700     auto *TLI = TLIP ? &TLIP->getTLI() : nullptr;
1701     auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
1702     auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
1703     auto *LAA = &getAnalysis<LoopAccessLegacyAnalysis>();
1704     auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits();
1705     auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE();
1706 
1707     std::function<const LoopAccessInfo &(Loop &)> GetLAA =
1708         [&](Loop &L) -> const LoopAccessInfo & { return LAA->getInfo(&L); };
1709 
1710     return Impl.runImpl(F, *SE, *LI, *TTI, *DT, *BFI, TLI, *DB, *AA, *AC,
1711                         GetLAA, *ORE);
1712   }
1713 
1714   void getAnalysisUsage(AnalysisUsage &AU) const override {
1715     AU.addRequired<AssumptionCacheTracker>();
1716     AU.addRequired<BlockFrequencyInfoWrapperPass>();
1717     AU.addRequired<DominatorTreeWrapperPass>();
1718     AU.addRequired<LoopInfoWrapperPass>();
1719     AU.addRequired<ScalarEvolutionWrapperPass>();
1720     AU.addRequired<TargetTransformInfoWrapperPass>();
1721     AU.addRequired<AAResultsWrapperPass>();
1722     AU.addRequired<LoopAccessLegacyAnalysis>();
1723     AU.addRequired<DemandedBitsWrapperPass>();
1724     AU.addRequired<OptimizationRemarkEmitterWrapperPass>();
1725     AU.addPreserved<LoopInfoWrapperPass>();
1726     AU.addPreserved<DominatorTreeWrapperPass>();
1727     AU.addPreserved<BasicAAWrapperPass>();
1728     AU.addPreserved<GlobalsAAWrapperPass>();
1729   }
1730 };
1731 
1732 } // end anonymous namespace
1733 
1734 //===----------------------------------------------------------------------===//
1735 // Implementation of LoopVectorizationLegality, InnerLoopVectorizer and
1736 // LoopVectorizationCostModel and LoopVectorizationPlanner.
1737 //===----------------------------------------------------------------------===//
1738 
1739 Value *InnerLoopVectorizer::getBroadcastInstrs(Value *V) {
1740   // We need to place the broadcast of invariant variables outside the loop.
1741   Instruction *Instr = dyn_cast<Instruction>(V);
1742   bool NewInstr = (Instr && Instr->getParent() == LoopVectorBody);
1743   bool Invariant = OrigLoop->isLoopInvariant(V) && !NewInstr;
1744 
1745   // Place the code for broadcasting invariant variables in the new preheader.
1746   IRBuilder<>::InsertPointGuard Guard(Builder);
1747   if (Invariant)
1748     Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
1749 
1750   // Broadcast the scalar into all locations in the vector.
1751   Value *Shuf = Builder.CreateVectorSplat(VF, V, "broadcast");
1752 
1753   return Shuf;
1754 }
1755 
1756 void InnerLoopVectorizer::createVectorIntOrFpInductionPHI(
1757     const InductionDescriptor &II, Value *Step, Instruction *EntryVal) {
1758   assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) &&
1759          "Expected either an induction phi-node or a truncate of it!");
1760   Value *Start = II.getStartValue();
1761 
1762   // Construct the initial value of the vector IV in the vector loop preheader
1763   auto CurrIP = Builder.saveIP();
1764   Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
1765   if (isa<TruncInst>(EntryVal)) {
1766     assert(Start->getType()->isIntegerTy() &&
1767            "Truncation requires an integer type");
1768     auto *TruncType = cast<IntegerType>(EntryVal->getType());
1769     Step = Builder.CreateTrunc(Step, TruncType);
1770     Start = Builder.CreateCast(Instruction::Trunc, Start, TruncType);
1771   }
1772   Value *SplatStart = Builder.CreateVectorSplat(VF, Start);
1773   Value *SteppedStart =
1774       getStepVector(SplatStart, 0, Step, II.getInductionOpcode());
1775 
1776   // We create vector phi nodes for both integer and floating-point induction
1777   // variables. Here, we determine the kind of arithmetic we will perform.
1778   Instruction::BinaryOps AddOp;
1779   Instruction::BinaryOps MulOp;
1780   if (Step->getType()->isIntegerTy()) {
1781     AddOp = Instruction::Add;
1782     MulOp = Instruction::Mul;
1783   } else {
1784     AddOp = II.getInductionOpcode();
1785     MulOp = Instruction::FMul;
1786   }
1787 
1788   // Multiply the vectorization factor by the step using integer or
1789   // floating-point arithmetic as appropriate.
1790   Value *ConstVF = getSignedIntOrFpConstant(Step->getType(), VF);
1791   Value *Mul = addFastMathFlag(Builder.CreateBinOp(MulOp, Step, ConstVF));
1792 
1793   // Create a vector splat to use in the induction update.
1794   //
1795   // FIXME: If the step is non-constant, we create the vector splat with
1796   //        IRBuilder. IRBuilder can constant-fold the multiply, but it doesn't
1797   //        handle a constant vector splat.
1798   Value *SplatVF = isa<Constant>(Mul)
1799                        ? ConstantVector::getSplat(VF, cast<Constant>(Mul))
1800                        : Builder.CreateVectorSplat(VF, Mul);
1801   Builder.restoreIP(CurrIP);
1802 
1803   // We may need to add the step a number of times, depending on the unroll
1804   // factor. The last of those goes into the PHI.
1805   PHINode *VecInd = PHINode::Create(SteppedStart->getType(), 2, "vec.ind",
1806                                     &*LoopVectorBody->getFirstInsertionPt());
1807   Instruction *LastInduction = VecInd;
1808   for (unsigned Part = 0; Part < UF; ++Part) {
1809     VectorLoopValueMap.setVectorValue(EntryVal, Part, LastInduction);
1810 
1811     if (isa<TruncInst>(EntryVal))
1812       addMetadata(LastInduction, EntryVal);
1813     recordVectorLoopValueForInductionCast(II, EntryVal, LastInduction, Part);
1814 
1815     LastInduction = cast<Instruction>(addFastMathFlag(
1816         Builder.CreateBinOp(AddOp, LastInduction, SplatVF, "step.add")));
1817   }
1818 
1819   // Move the last step to the end of the latch block. This ensures consistent
1820   // placement of all induction updates.
1821   auto *LoopVectorLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch();
1822   auto *Br = cast<BranchInst>(LoopVectorLatch->getTerminator());
1823   auto *ICmp = cast<Instruction>(Br->getCondition());
1824   LastInduction->moveBefore(ICmp);
1825   LastInduction->setName("vec.ind.next");
1826 
1827   VecInd->addIncoming(SteppedStart, LoopVectorPreHeader);
1828   VecInd->addIncoming(LastInduction, LoopVectorLatch);
1829 }
1830 
1831 bool InnerLoopVectorizer::shouldScalarizeInstruction(Instruction *I) const {
1832   return Cost->isScalarAfterVectorization(I, VF) ||
1833          Cost->isProfitableToScalarize(I, VF);
1834 }
1835 
1836 bool InnerLoopVectorizer::needsScalarInduction(Instruction *IV) const {
1837   if (shouldScalarizeInstruction(IV))
1838     return true;
1839   auto isScalarInst = [&](User *U) -> bool {
1840     auto *I = cast<Instruction>(U);
1841     return (OrigLoop->contains(I) && shouldScalarizeInstruction(I));
1842   };
1843   return llvm::any_of(IV->users(), isScalarInst);
1844 }
1845 
1846 void InnerLoopVectorizer::recordVectorLoopValueForInductionCast(
1847     const InductionDescriptor &ID, const Instruction *EntryVal,
1848     Value *VectorLoopVal, unsigned Part, unsigned Lane) {
1849   assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) &&
1850          "Expected either an induction phi-node or a truncate of it!");
1851 
1852   // This induction variable is not the phi from the original loop but the
1853   // newly-created IV based on the proof that casted Phi is equal to the
1854   // uncasted Phi in the vectorized loop (under a runtime guard possibly). It
1855   // re-uses the same InductionDescriptor that original IV uses but we don't
1856   // have to do any recording in this case - that is done when original IV is
1857   // processed.
1858   if (isa<TruncInst>(EntryVal))
1859     return;
1860 
1861   const SmallVectorImpl<Instruction *> &Casts = ID.getCastInsts();
1862   if (Casts.empty())
1863     return;
1864   // Only the first Cast instruction in the Casts vector is of interest.
1865   // The rest of the Casts (if exist) have no uses outside the
1866   // induction update chain itself.
1867   Instruction *CastInst = *Casts.begin();
1868   if (Lane < UINT_MAX)
1869     VectorLoopValueMap.setScalarValue(CastInst, {Part, Lane}, VectorLoopVal);
1870   else
1871     VectorLoopValueMap.setVectorValue(CastInst, Part, VectorLoopVal);
1872 }
1873 
1874 void InnerLoopVectorizer::widenIntOrFpInduction(PHINode *IV, TruncInst *Trunc) {
1875   assert((IV->getType()->isIntegerTy() || IV != OldInduction) &&
1876          "Primary induction variable must have an integer type");
1877 
1878   auto II = Legal->getInductionVars()->find(IV);
1879   assert(II != Legal->getInductionVars()->end() && "IV is not an induction");
1880 
1881   auto ID = II->second;
1882   assert(IV->getType() == ID.getStartValue()->getType() && "Types must match");
1883 
1884   // The scalar value to broadcast. This will be derived from the canonical
1885   // induction variable.
1886   Value *ScalarIV = nullptr;
1887 
1888   // The value from the original loop to which we are mapping the new induction
1889   // variable.
1890   Instruction *EntryVal = Trunc ? cast<Instruction>(Trunc) : IV;
1891 
1892   // True if we have vectorized the induction variable.
1893   auto VectorizedIV = false;
1894 
1895   // Determine if we want a scalar version of the induction variable. This is
1896   // true if the induction variable itself is not widened, or if it has at
1897   // least one user in the loop that is not widened.
1898   auto NeedsScalarIV = VF > 1 && needsScalarInduction(EntryVal);
1899 
1900   // Generate code for the induction step. Note that induction steps are
1901   // required to be loop-invariant
1902   assert(PSE.getSE()->isLoopInvariant(ID.getStep(), OrigLoop) &&
1903          "Induction step should be loop invariant");
1904   auto &DL = OrigLoop->getHeader()->getModule()->getDataLayout();
1905   Value *Step = nullptr;
1906   if (PSE.getSE()->isSCEVable(IV->getType())) {
1907     SCEVExpander Exp(*PSE.getSE(), DL, "induction");
1908     Step = Exp.expandCodeFor(ID.getStep(), ID.getStep()->getType(),
1909                              LoopVectorPreHeader->getTerminator());
1910   } else {
1911     Step = cast<SCEVUnknown>(ID.getStep())->getValue();
1912   }
1913 
1914   // Try to create a new independent vector induction variable. If we can't
1915   // create the phi node, we will splat the scalar induction variable in each
1916   // loop iteration.
1917   if (VF > 1 && !shouldScalarizeInstruction(EntryVal)) {
1918     createVectorIntOrFpInductionPHI(ID, Step, EntryVal);
1919     VectorizedIV = true;
1920   }
1921 
1922   // If we haven't yet vectorized the induction variable, or if we will create
1923   // a scalar one, we need to define the scalar induction variable and step
1924   // values. If we were given a truncation type, truncate the canonical
1925   // induction variable and step. Otherwise, derive these values from the
1926   // induction descriptor.
1927   if (!VectorizedIV || NeedsScalarIV) {
1928     ScalarIV = Induction;
1929     if (IV != OldInduction) {
1930       ScalarIV = IV->getType()->isIntegerTy()
1931                      ? Builder.CreateSExtOrTrunc(Induction, IV->getType())
1932                      : Builder.CreateCast(Instruction::SIToFP, Induction,
1933                                           IV->getType());
1934       ScalarIV = ID.transform(Builder, ScalarIV, PSE.getSE(), DL);
1935       ScalarIV->setName("offset.idx");
1936     }
1937     if (Trunc) {
1938       auto *TruncType = cast<IntegerType>(Trunc->getType());
1939       assert(Step->getType()->isIntegerTy() &&
1940              "Truncation requires an integer step");
1941       ScalarIV = Builder.CreateTrunc(ScalarIV, TruncType);
1942       Step = Builder.CreateTrunc(Step, TruncType);
1943     }
1944   }
1945 
1946   // If we haven't yet vectorized the induction variable, splat the scalar
1947   // induction variable, and build the necessary step vectors.
1948   // TODO: Don't do it unless the vectorized IV is really required.
1949   if (!VectorizedIV) {
1950     Value *Broadcasted = getBroadcastInstrs(ScalarIV);
1951     for (unsigned Part = 0; Part < UF; ++Part) {
1952       Value *EntryPart =
1953           getStepVector(Broadcasted, VF * Part, Step, ID.getInductionOpcode());
1954       VectorLoopValueMap.setVectorValue(EntryVal, Part, EntryPart);
1955       if (Trunc)
1956         addMetadata(EntryPart, Trunc);
1957       recordVectorLoopValueForInductionCast(ID, EntryVal, EntryPart, Part);
1958     }
1959   }
1960 
1961   // If an induction variable is only used for counting loop iterations or
1962   // calculating addresses, it doesn't need to be widened. Create scalar steps
1963   // that can be used by instructions we will later scalarize. Note that the
1964   // addition of the scalar steps will not increase the number of instructions
1965   // in the loop in the common case prior to InstCombine. We will be trading
1966   // one vector extract for each scalar step.
1967   if (NeedsScalarIV)
1968     buildScalarSteps(ScalarIV, Step, EntryVal, ID);
1969 }
1970 
1971 Value *InnerLoopVectorizer::getStepVector(Value *Val, int StartIdx, Value *Step,
1972                                           Instruction::BinaryOps BinOp) {
1973   // Create and check the types.
1974   assert(Val->getType()->isVectorTy() && "Must be a vector");
1975   int VLen = Val->getType()->getVectorNumElements();
1976 
1977   Type *STy = Val->getType()->getScalarType();
1978   assert((STy->isIntegerTy() || STy->isFloatingPointTy()) &&
1979          "Induction Step must be an integer or FP");
1980   assert(Step->getType() == STy && "Step has wrong type");
1981 
1982   SmallVector<Constant *, 8> Indices;
1983 
1984   if (STy->isIntegerTy()) {
1985     // Create a vector of consecutive numbers from zero to VF.
1986     for (int i = 0; i < VLen; ++i)
1987       Indices.push_back(ConstantInt::get(STy, StartIdx + i));
1988 
1989     // Add the consecutive indices to the vector value.
1990     Constant *Cv = ConstantVector::get(Indices);
1991     assert(Cv->getType() == Val->getType() && "Invalid consecutive vec");
1992     Step = Builder.CreateVectorSplat(VLen, Step);
1993     assert(Step->getType() == Val->getType() && "Invalid step vec");
1994     // FIXME: The newly created binary instructions should contain nsw/nuw flags,
1995     // which can be found from the original scalar operations.
1996     Step = Builder.CreateMul(Cv, Step);
1997     return Builder.CreateAdd(Val, Step, "induction");
1998   }
1999 
2000   // Floating point induction.
2001   assert((BinOp == Instruction::FAdd || BinOp == Instruction::FSub) &&
2002          "Binary Opcode should be specified for FP induction");
2003   // Create a vector of consecutive numbers from zero to VF.
2004   for (int i = 0; i < VLen; ++i)
2005     Indices.push_back(ConstantFP::get(STy, (double)(StartIdx + i)));
2006 
2007   // Add the consecutive indices to the vector value.
2008   Constant *Cv = ConstantVector::get(Indices);
2009 
2010   Step = Builder.CreateVectorSplat(VLen, Step);
2011 
2012   // Floating point operations had to be 'fast' to enable the induction.
2013   FastMathFlags Flags;
2014   Flags.setFast();
2015 
2016   Value *MulOp = Builder.CreateFMul(Cv, Step);
2017   if (isa<Instruction>(MulOp))
2018     // Have to check, MulOp may be a constant
2019     cast<Instruction>(MulOp)->setFastMathFlags(Flags);
2020 
2021   Value *BOp = Builder.CreateBinOp(BinOp, Val, MulOp, "induction");
2022   if (isa<Instruction>(BOp))
2023     cast<Instruction>(BOp)->setFastMathFlags(Flags);
2024   return BOp;
2025 }
2026 
2027 void InnerLoopVectorizer::buildScalarSteps(Value *ScalarIV, Value *Step,
2028                                            Instruction *EntryVal,
2029                                            const InductionDescriptor &ID) {
2030   // We shouldn't have to build scalar steps if we aren't vectorizing.
2031   assert(VF > 1 && "VF should be greater than one");
2032 
2033   // Get the value type and ensure it and the step have the same integer type.
2034   Type *ScalarIVTy = ScalarIV->getType()->getScalarType();
2035   assert(ScalarIVTy == Step->getType() &&
2036          "Val and Step should have the same type");
2037 
2038   // We build scalar steps for both integer and floating-point induction
2039   // variables. Here, we determine the kind of arithmetic we will perform.
2040   Instruction::BinaryOps AddOp;
2041   Instruction::BinaryOps MulOp;
2042   if (ScalarIVTy->isIntegerTy()) {
2043     AddOp = Instruction::Add;
2044     MulOp = Instruction::Mul;
2045   } else {
2046     AddOp = ID.getInductionOpcode();
2047     MulOp = Instruction::FMul;
2048   }
2049 
2050   // Determine the number of scalars we need to generate for each unroll
2051   // iteration. If EntryVal is uniform, we only need to generate the first
2052   // lane. Otherwise, we generate all VF values.
2053   unsigned Lanes =
2054       Cost->isUniformAfterVectorization(cast<Instruction>(EntryVal), VF) ? 1
2055                                                                          : VF;
2056   // Compute the scalar steps and save the results in VectorLoopValueMap.
2057   for (unsigned Part = 0; Part < UF; ++Part) {
2058     for (unsigned Lane = 0; Lane < Lanes; ++Lane) {
2059       auto *StartIdx = getSignedIntOrFpConstant(ScalarIVTy, VF * Part + Lane);
2060       auto *Mul = addFastMathFlag(Builder.CreateBinOp(MulOp, StartIdx, Step));
2061       auto *Add = addFastMathFlag(Builder.CreateBinOp(AddOp, ScalarIV, Mul));
2062       VectorLoopValueMap.setScalarValue(EntryVal, {Part, Lane}, Add);
2063       recordVectorLoopValueForInductionCast(ID, EntryVal, Add, Part, Lane);
2064     }
2065   }
2066 }
2067 
2068 Value *InnerLoopVectorizer::getOrCreateVectorValue(Value *V, unsigned Part) {
2069   assert(V != Induction && "The new induction variable should not be used.");
2070   assert(!V->getType()->isVectorTy() && "Can't widen a vector");
2071   assert(!V->getType()->isVoidTy() && "Type does not produce a value");
2072 
2073   // If we have a stride that is replaced by one, do it here.
2074   if (Legal->hasStride(V))
2075     V = ConstantInt::get(V->getType(), 1);
2076 
2077   // If we have a vector mapped to this value, return it.
2078   if (VectorLoopValueMap.hasVectorValue(V, Part))
2079     return VectorLoopValueMap.getVectorValue(V, Part);
2080 
2081   // If the value has not been vectorized, check if it has been scalarized
2082   // instead. If it has been scalarized, and we actually need the value in
2083   // vector form, we will construct the vector values on demand.
2084   if (VectorLoopValueMap.hasAnyScalarValue(V)) {
2085     Value *ScalarValue = VectorLoopValueMap.getScalarValue(V, {Part, 0});
2086 
2087     // If we've scalarized a value, that value should be an instruction.
2088     auto *I = cast<Instruction>(V);
2089 
2090     // If we aren't vectorizing, we can just copy the scalar map values over to
2091     // the vector map.
2092     if (VF == 1) {
2093       VectorLoopValueMap.setVectorValue(V, Part, ScalarValue);
2094       return ScalarValue;
2095     }
2096 
2097     // Get the last scalar instruction we generated for V and Part. If the value
2098     // is known to be uniform after vectorization, this corresponds to lane zero
2099     // of the Part unroll iteration. Otherwise, the last instruction is the one
2100     // we created for the last vector lane of the Part unroll iteration.
2101     unsigned LastLane = Cost->isUniformAfterVectorization(I, VF) ? 0 : VF - 1;
2102     auto *LastInst = cast<Instruction>(
2103         VectorLoopValueMap.getScalarValue(V, {Part, LastLane}));
2104 
2105     // Set the insert point after the last scalarized instruction. This ensures
2106     // the insertelement sequence will directly follow the scalar definitions.
2107     auto OldIP = Builder.saveIP();
2108     auto NewIP = std::next(BasicBlock::iterator(LastInst));
2109     Builder.SetInsertPoint(&*NewIP);
2110 
2111     // However, if we are vectorizing, we need to construct the vector values.
2112     // If the value is known to be uniform after vectorization, we can just
2113     // broadcast the scalar value corresponding to lane zero for each unroll
2114     // iteration. Otherwise, we construct the vector values using insertelement
2115     // instructions. Since the resulting vectors are stored in
2116     // VectorLoopValueMap, we will only generate the insertelements once.
2117     Value *VectorValue = nullptr;
2118     if (Cost->isUniformAfterVectorization(I, VF)) {
2119       VectorValue = getBroadcastInstrs(ScalarValue);
2120       VectorLoopValueMap.setVectorValue(V, Part, VectorValue);
2121     } else {
2122       // Initialize packing with insertelements to start from undef.
2123       Value *Undef = UndefValue::get(VectorType::get(V->getType(), VF));
2124       VectorLoopValueMap.setVectorValue(V, Part, Undef);
2125       for (unsigned Lane = 0; Lane < VF; ++Lane)
2126         packScalarIntoVectorValue(V, {Part, Lane});
2127       VectorValue = VectorLoopValueMap.getVectorValue(V, Part);
2128     }
2129     Builder.restoreIP(OldIP);
2130     return VectorValue;
2131   }
2132 
2133   // If this scalar is unknown, assume that it is a constant or that it is
2134   // loop invariant. Broadcast V and save the value for future uses.
2135   Value *B = getBroadcastInstrs(V);
2136   VectorLoopValueMap.setVectorValue(V, Part, B);
2137   return B;
2138 }
2139 
2140 Value *
2141 InnerLoopVectorizer::getOrCreateScalarValue(Value *V,
2142                                             const VPIteration &Instance) {
2143   // If the value is not an instruction contained in the loop, it should
2144   // already be scalar.
2145   if (OrigLoop->isLoopInvariant(V))
2146     return V;
2147 
2148   assert(Instance.Lane > 0
2149              ? !Cost->isUniformAfterVectorization(cast<Instruction>(V), VF)
2150              : true && "Uniform values only have lane zero");
2151 
2152   // If the value from the original loop has not been vectorized, it is
2153   // represented by UF x VF scalar values in the new loop. Return the requested
2154   // scalar value.
2155   if (VectorLoopValueMap.hasScalarValue(V, Instance))
2156     return VectorLoopValueMap.getScalarValue(V, Instance);
2157 
2158   // If the value has not been scalarized, get its entry in VectorLoopValueMap
2159   // for the given unroll part. If this entry is not a vector type (i.e., the
2160   // vectorization factor is one), there is no need to generate an
2161   // extractelement instruction.
2162   auto *U = getOrCreateVectorValue(V, Instance.Part);
2163   if (!U->getType()->isVectorTy()) {
2164     assert(VF == 1 && "Value not scalarized has non-vector type");
2165     return U;
2166   }
2167 
2168   // Otherwise, the value from the original loop has been vectorized and is
2169   // represented by UF vector values. Extract and return the requested scalar
2170   // value from the appropriate vector lane.
2171   return Builder.CreateExtractElement(U, Builder.getInt32(Instance.Lane));
2172 }
2173 
2174 void InnerLoopVectorizer::packScalarIntoVectorValue(
2175     Value *V, const VPIteration &Instance) {
2176   assert(V != Induction && "The new induction variable should not be used.");
2177   assert(!V->getType()->isVectorTy() && "Can't pack a vector");
2178   assert(!V->getType()->isVoidTy() && "Type does not produce a value");
2179 
2180   Value *ScalarInst = VectorLoopValueMap.getScalarValue(V, Instance);
2181   Value *VectorValue = VectorLoopValueMap.getVectorValue(V, Instance.Part);
2182   VectorValue = Builder.CreateInsertElement(VectorValue, ScalarInst,
2183                                             Builder.getInt32(Instance.Lane));
2184   VectorLoopValueMap.resetVectorValue(V, Instance.Part, VectorValue);
2185 }
2186 
2187 Value *InnerLoopVectorizer::reverseVector(Value *Vec) {
2188   assert(Vec->getType()->isVectorTy() && "Invalid type");
2189   SmallVector<Constant *, 8> ShuffleMask;
2190   for (unsigned i = 0; i < VF; ++i)
2191     ShuffleMask.push_back(Builder.getInt32(VF - i - 1));
2192 
2193   return Builder.CreateShuffleVector(Vec, UndefValue::get(Vec->getType()),
2194                                      ConstantVector::get(ShuffleMask),
2195                                      "reverse");
2196 }
2197 
2198 // Try to vectorize the interleave group that \p Instr belongs to.
2199 //
2200 // E.g. Translate following interleaved load group (factor = 3):
2201 //   for (i = 0; i < N; i+=3) {
2202 //     R = Pic[i];             // Member of index 0
2203 //     G = Pic[i+1];           // Member of index 1
2204 //     B = Pic[i+2];           // Member of index 2
2205 //     ... // do something to R, G, B
2206 //   }
2207 // To:
2208 //   %wide.vec = load <12 x i32>                       ; Read 4 tuples of R,G,B
2209 //   %R.vec = shuffle %wide.vec, undef, <0, 3, 6, 9>   ; R elements
2210 //   %G.vec = shuffle %wide.vec, undef, <1, 4, 7, 10>  ; G elements
2211 //   %B.vec = shuffle %wide.vec, undef, <2, 5, 8, 11>  ; B elements
2212 //
2213 // Or translate following interleaved store group (factor = 3):
2214 //   for (i = 0; i < N; i+=3) {
2215 //     ... do something to R, G, B
2216 //     Pic[i]   = R;           // Member of index 0
2217 //     Pic[i+1] = G;           // Member of index 1
2218 //     Pic[i+2] = B;           // Member of index 2
2219 //   }
2220 // To:
2221 //   %R_G.vec = shuffle %R.vec, %G.vec, <0, 1, 2, ..., 7>
2222 //   %B_U.vec = shuffle %B.vec, undef, <0, 1, 2, 3, u, u, u, u>
2223 //   %interleaved.vec = shuffle %R_G.vec, %B_U.vec,
2224 //        <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11>    ; Interleave R,G,B elements
2225 //   store <12 x i32> %interleaved.vec              ; Write 4 tuples of R,G,B
2226 void InnerLoopVectorizer::vectorizeInterleaveGroup(Instruction *Instr) {
2227   const InterleaveGroup *Group = Cost->getInterleavedAccessGroup(Instr);
2228   assert(Group && "Fail to get an interleaved access group.");
2229 
2230   // Skip if current instruction is not the insert position.
2231   if (Instr != Group->getInsertPos())
2232     return;
2233 
2234   const DataLayout &DL = Instr->getModule()->getDataLayout();
2235   Value *Ptr = getLoadStorePointerOperand(Instr);
2236 
2237   // Prepare for the vector type of the interleaved load/store.
2238   Type *ScalarTy = getMemInstValueType(Instr);
2239   unsigned InterleaveFactor = Group->getFactor();
2240   Type *VecTy = VectorType::get(ScalarTy, InterleaveFactor * VF);
2241   Type *PtrTy = VecTy->getPointerTo(getMemInstAddressSpace(Instr));
2242 
2243   // Prepare for the new pointers.
2244   setDebugLocFromInst(Builder, Ptr);
2245   SmallVector<Value *, 2> NewPtrs;
2246   unsigned Index = Group->getIndex(Instr);
2247 
2248   // If the group is reverse, adjust the index to refer to the last vector lane
2249   // instead of the first. We adjust the index from the first vector lane,
2250   // rather than directly getting the pointer for lane VF - 1, because the
2251   // pointer operand of the interleaved access is supposed to be uniform. For
2252   // uniform instructions, we're only required to generate a value for the
2253   // first vector lane in each unroll iteration.
2254   if (Group->isReverse())
2255     Index += (VF - 1) * Group->getFactor();
2256 
2257   for (unsigned Part = 0; Part < UF; Part++) {
2258     Value *NewPtr = getOrCreateScalarValue(Ptr, {Part, 0});
2259 
2260     // Notice current instruction could be any index. Need to adjust the address
2261     // to the member of index 0.
2262     //
2263     // E.g.  a = A[i+1];     // Member of index 1 (Current instruction)
2264     //       b = A[i];       // Member of index 0
2265     // Current pointer is pointed to A[i+1], adjust it to A[i].
2266     //
2267     // E.g.  A[i+1] = a;     // Member of index 1
2268     //       A[i]   = b;     // Member of index 0
2269     //       A[i+2] = c;     // Member of index 2 (Current instruction)
2270     // Current pointer is pointed to A[i+2], adjust it to A[i].
2271     NewPtr = Builder.CreateGEP(NewPtr, Builder.getInt32(-Index));
2272 
2273     // Cast to the vector pointer type.
2274     NewPtrs.push_back(Builder.CreateBitCast(NewPtr, PtrTy));
2275   }
2276 
2277   setDebugLocFromInst(Builder, Instr);
2278   Value *UndefVec = UndefValue::get(VecTy);
2279 
2280   // Vectorize the interleaved load group.
2281   if (isa<LoadInst>(Instr)) {
2282     // For each unroll part, create a wide load for the group.
2283     SmallVector<Value *, 2> NewLoads;
2284     for (unsigned Part = 0; Part < UF; Part++) {
2285       auto *NewLoad = Builder.CreateAlignedLoad(
2286           NewPtrs[Part], Group->getAlignment(), "wide.vec");
2287       Group->addMetadata(NewLoad);
2288       NewLoads.push_back(NewLoad);
2289     }
2290 
2291     // For each member in the group, shuffle out the appropriate data from the
2292     // wide loads.
2293     for (unsigned I = 0; I < InterleaveFactor; ++I) {
2294       Instruction *Member = Group->getMember(I);
2295 
2296       // Skip the gaps in the group.
2297       if (!Member)
2298         continue;
2299 
2300       Constant *StrideMask = createStrideMask(Builder, I, InterleaveFactor, VF);
2301       for (unsigned Part = 0; Part < UF; Part++) {
2302         Value *StridedVec = Builder.CreateShuffleVector(
2303             NewLoads[Part], UndefVec, StrideMask, "strided.vec");
2304 
2305         // If this member has different type, cast the result type.
2306         if (Member->getType() != ScalarTy) {
2307           VectorType *OtherVTy = VectorType::get(Member->getType(), VF);
2308           StridedVec = createBitOrPointerCast(StridedVec, OtherVTy, DL);
2309         }
2310 
2311         if (Group->isReverse())
2312           StridedVec = reverseVector(StridedVec);
2313 
2314         VectorLoopValueMap.setVectorValue(Member, Part, StridedVec);
2315       }
2316     }
2317     return;
2318   }
2319 
2320   // The sub vector type for current instruction.
2321   VectorType *SubVT = VectorType::get(ScalarTy, VF);
2322 
2323   // Vectorize the interleaved store group.
2324   for (unsigned Part = 0; Part < UF; Part++) {
2325     // Collect the stored vector from each member.
2326     SmallVector<Value *, 4> StoredVecs;
2327     for (unsigned i = 0; i < InterleaveFactor; i++) {
2328       // Interleaved store group doesn't allow a gap, so each index has a member
2329       Instruction *Member = Group->getMember(i);
2330       assert(Member && "Fail to get a member from an interleaved store group");
2331 
2332       Value *StoredVec = getOrCreateVectorValue(
2333           cast<StoreInst>(Member)->getValueOperand(), Part);
2334       if (Group->isReverse())
2335         StoredVec = reverseVector(StoredVec);
2336 
2337       // If this member has different type, cast it to a unified type.
2338 
2339       if (StoredVec->getType() != SubVT)
2340         StoredVec = createBitOrPointerCast(StoredVec, SubVT, DL);
2341 
2342       StoredVecs.push_back(StoredVec);
2343     }
2344 
2345     // Concatenate all vectors into a wide vector.
2346     Value *WideVec = concatenateVectors(Builder, StoredVecs);
2347 
2348     // Interleave the elements in the wide vector.
2349     Constant *IMask = createInterleaveMask(Builder, VF, InterleaveFactor);
2350     Value *IVec = Builder.CreateShuffleVector(WideVec, UndefVec, IMask,
2351                                               "interleaved.vec");
2352 
2353     Instruction *NewStoreInstr =
2354         Builder.CreateAlignedStore(IVec, NewPtrs[Part], Group->getAlignment());
2355 
2356     Group->addMetadata(NewStoreInstr);
2357   }
2358 }
2359 
2360 void InnerLoopVectorizer::vectorizeMemoryInstruction(Instruction *Instr,
2361                                                      VectorParts *BlockInMask) {
2362   // Attempt to issue a wide load.
2363   LoadInst *LI = dyn_cast<LoadInst>(Instr);
2364   StoreInst *SI = dyn_cast<StoreInst>(Instr);
2365 
2366   assert((LI || SI) && "Invalid Load/Store instruction");
2367 
2368   LoopVectorizationCostModel::InstWidening Decision =
2369       Cost->getWideningDecision(Instr, VF);
2370   assert(Decision != LoopVectorizationCostModel::CM_Unknown &&
2371          "CM decision should be taken at this point");
2372   if (Decision == LoopVectorizationCostModel::CM_Interleave)
2373     return vectorizeInterleaveGroup(Instr);
2374 
2375   Type *ScalarDataTy = getMemInstValueType(Instr);
2376   Type *DataTy = VectorType::get(ScalarDataTy, VF);
2377   Value *Ptr = getLoadStorePointerOperand(Instr);
2378   unsigned Alignment = getMemInstAlignment(Instr);
2379   // An alignment of 0 means target abi alignment. We need to use the scalar's
2380   // target abi alignment in such a case.
2381   const DataLayout &DL = Instr->getModule()->getDataLayout();
2382   if (!Alignment)
2383     Alignment = DL.getABITypeAlignment(ScalarDataTy);
2384   unsigned AddressSpace = getMemInstAddressSpace(Instr);
2385 
2386   // Determine if the pointer operand of the access is either consecutive or
2387   // reverse consecutive.
2388   bool Reverse = (Decision == LoopVectorizationCostModel::CM_Widen_Reverse);
2389   bool ConsecutiveStride =
2390       Reverse || (Decision == LoopVectorizationCostModel::CM_Widen);
2391   bool CreateGatherScatter =
2392       (Decision == LoopVectorizationCostModel::CM_GatherScatter);
2393 
2394   // Either Ptr feeds a vector load/store, or a vector GEP should feed a vector
2395   // gather/scatter. Otherwise Decision should have been to Scalarize.
2396   assert((ConsecutiveStride || CreateGatherScatter) &&
2397          "The instruction should be scalarized");
2398 
2399   // Handle consecutive loads/stores.
2400   if (ConsecutiveStride)
2401     Ptr = getOrCreateScalarValue(Ptr, {0, 0});
2402 
2403   VectorParts Mask;
2404   bool isMaskRequired = BlockInMask;
2405   if (isMaskRequired)
2406     Mask = *BlockInMask;
2407 
2408   const auto CreateVecPtr = [&](unsigned Part, Value *Ptr) -> Value * {
2409     // Calculate the pointer for the specific unroll-part.
2410     Value *PartPtr = Builder.CreateGEP(Ptr, Builder.getInt32(Part * VF));
2411 
2412     if (Reverse) {
2413       // If the address is consecutive but reversed, then the
2414       // wide store needs to start at the last vector element.
2415       PartPtr = Builder.CreateGEP(Ptr, Builder.getInt32(-Part * VF));
2416       PartPtr = Builder.CreateGEP(PartPtr, Builder.getInt32(1 - VF));
2417       if (isMaskRequired) // Reverse of a null all-one mask is a null mask.
2418         Mask[Part] = reverseVector(Mask[Part]);
2419     }
2420 
2421     return Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace));
2422   };
2423 
2424   // Handle Stores:
2425   if (SI) {
2426     setDebugLocFromInst(Builder, SI);
2427 
2428     for (unsigned Part = 0; Part < UF; ++Part) {
2429       Instruction *NewSI = nullptr;
2430       Value *StoredVal = getOrCreateVectorValue(SI->getValueOperand(), Part);
2431       if (CreateGatherScatter) {
2432         Value *MaskPart = isMaskRequired ? Mask[Part] : nullptr;
2433         Value *VectorGep = getOrCreateVectorValue(Ptr, Part);
2434         NewSI = Builder.CreateMaskedScatter(StoredVal, VectorGep, Alignment,
2435                                             MaskPart);
2436       } else {
2437         if (Reverse) {
2438           // If we store to reverse consecutive memory locations, then we need
2439           // to reverse the order of elements in the stored value.
2440           StoredVal = reverseVector(StoredVal);
2441           // We don't want to update the value in the map as it might be used in
2442           // another expression. So don't call resetVectorValue(StoredVal).
2443         }
2444         auto *VecPtr = CreateVecPtr(Part, Ptr);
2445         if (isMaskRequired)
2446           NewSI = Builder.CreateMaskedStore(StoredVal, VecPtr, Alignment,
2447                                             Mask[Part]);
2448         else
2449           NewSI = Builder.CreateAlignedStore(StoredVal, VecPtr, Alignment);
2450       }
2451       addMetadata(NewSI, SI);
2452     }
2453     return;
2454   }
2455 
2456   // Handle loads.
2457   assert(LI && "Must have a load instruction");
2458   setDebugLocFromInst(Builder, LI);
2459   for (unsigned Part = 0; Part < UF; ++Part) {
2460     Value *NewLI;
2461     if (CreateGatherScatter) {
2462       Value *MaskPart = isMaskRequired ? Mask[Part] : nullptr;
2463       Value *VectorGep = getOrCreateVectorValue(Ptr, Part);
2464       NewLI = Builder.CreateMaskedGather(VectorGep, Alignment, MaskPart,
2465                                          nullptr, "wide.masked.gather");
2466       addMetadata(NewLI, LI);
2467     } else {
2468       auto *VecPtr = CreateVecPtr(Part, Ptr);
2469       if (isMaskRequired)
2470         NewLI = Builder.CreateMaskedLoad(VecPtr, Alignment, Mask[Part],
2471                                          UndefValue::get(DataTy),
2472                                          "wide.masked.load");
2473       else
2474         NewLI = Builder.CreateAlignedLoad(VecPtr, Alignment, "wide.load");
2475 
2476       // Add metadata to the load, but setVectorValue to the reverse shuffle.
2477       addMetadata(NewLI, LI);
2478       if (Reverse)
2479         NewLI = reverseVector(NewLI);
2480     }
2481     VectorLoopValueMap.setVectorValue(Instr, Part, NewLI);
2482   }
2483 }
2484 
2485 void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr,
2486                                                const VPIteration &Instance,
2487                                                bool IfPredicateInstr) {
2488   assert(!Instr->getType()->isAggregateType() && "Can't handle vectors");
2489 
2490   setDebugLocFromInst(Builder, Instr);
2491 
2492   // Does this instruction return a value ?
2493   bool IsVoidRetTy = Instr->getType()->isVoidTy();
2494 
2495   Instruction *Cloned = Instr->clone();
2496   if (!IsVoidRetTy)
2497     Cloned->setName(Instr->getName() + ".cloned");
2498 
2499   // Replace the operands of the cloned instructions with their scalar
2500   // equivalents in the new loop.
2501   for (unsigned op = 0, e = Instr->getNumOperands(); op != e; ++op) {
2502     auto *NewOp = getOrCreateScalarValue(Instr->getOperand(op), Instance);
2503     Cloned->setOperand(op, NewOp);
2504   }
2505   addNewMetadata(Cloned, Instr);
2506 
2507   // Place the cloned scalar in the new loop.
2508   Builder.Insert(Cloned);
2509 
2510   // Add the cloned scalar to the scalar map entry.
2511   VectorLoopValueMap.setScalarValue(Instr, Instance, Cloned);
2512 
2513   // If we just cloned a new assumption, add it the assumption cache.
2514   if (auto *II = dyn_cast<IntrinsicInst>(Cloned))
2515     if (II->getIntrinsicID() == Intrinsic::assume)
2516       AC->registerAssumption(II);
2517 
2518   // End if-block.
2519   if (IfPredicateInstr)
2520     PredicatedInstructions.push_back(Cloned);
2521 }
2522 
2523 PHINode *InnerLoopVectorizer::createInductionVariable(Loop *L, Value *Start,
2524                                                       Value *End, Value *Step,
2525                                                       Instruction *DL) {
2526   BasicBlock *Header = L->getHeader();
2527   BasicBlock *Latch = L->getLoopLatch();
2528   // As we're just creating this loop, it's possible no latch exists
2529   // yet. If so, use the header as this will be a single block loop.
2530   if (!Latch)
2531     Latch = Header;
2532 
2533   IRBuilder<> Builder(&*Header->getFirstInsertionPt());
2534   Instruction *OldInst = getDebugLocFromInstOrOperands(OldInduction);
2535   setDebugLocFromInst(Builder, OldInst);
2536   auto *Induction = Builder.CreatePHI(Start->getType(), 2, "index");
2537 
2538   Builder.SetInsertPoint(Latch->getTerminator());
2539   setDebugLocFromInst(Builder, OldInst);
2540 
2541   // Create i+1 and fill the PHINode.
2542   Value *Next = Builder.CreateAdd(Induction, Step, "index.next");
2543   Induction->addIncoming(Start, L->getLoopPreheader());
2544   Induction->addIncoming(Next, Latch);
2545   // Create the compare.
2546   Value *ICmp = Builder.CreateICmpEQ(Next, End);
2547   Builder.CreateCondBr(ICmp, L->getExitBlock(), Header);
2548 
2549   // Now we have two terminators. Remove the old one from the block.
2550   Latch->getTerminator()->eraseFromParent();
2551 
2552   return Induction;
2553 }
2554 
2555 Value *InnerLoopVectorizer::getOrCreateTripCount(Loop *L) {
2556   if (TripCount)
2557     return TripCount;
2558 
2559   IRBuilder<> Builder(L->getLoopPreheader()->getTerminator());
2560   // Find the loop boundaries.
2561   ScalarEvolution *SE = PSE.getSE();
2562   const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount();
2563   assert(BackedgeTakenCount != SE->getCouldNotCompute() &&
2564          "Invalid loop count");
2565 
2566   Type *IdxTy = Legal->getWidestInductionType();
2567 
2568   // The exit count might have the type of i64 while the phi is i32. This can
2569   // happen if we have an induction variable that is sign extended before the
2570   // compare. The only way that we get a backedge taken count is that the
2571   // induction variable was signed and as such will not overflow. In such a case
2572   // truncation is legal.
2573   if (BackedgeTakenCount->getType()->getPrimitiveSizeInBits() >
2574       IdxTy->getPrimitiveSizeInBits())
2575     BackedgeTakenCount = SE->getTruncateOrNoop(BackedgeTakenCount, IdxTy);
2576   BackedgeTakenCount = SE->getNoopOrZeroExtend(BackedgeTakenCount, IdxTy);
2577 
2578   // Get the total trip count from the count by adding 1.
2579   const SCEV *ExitCount = SE->getAddExpr(
2580       BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType()));
2581 
2582   const DataLayout &DL = L->getHeader()->getModule()->getDataLayout();
2583 
2584   // Expand the trip count and place the new instructions in the preheader.
2585   // Notice that the pre-header does not change, only the loop body.
2586   SCEVExpander Exp(*SE, DL, "induction");
2587 
2588   // Count holds the overall loop count (N).
2589   TripCount = Exp.expandCodeFor(ExitCount, ExitCount->getType(),
2590                                 L->getLoopPreheader()->getTerminator());
2591 
2592   if (TripCount->getType()->isPointerTy())
2593     TripCount =
2594         CastInst::CreatePointerCast(TripCount, IdxTy, "exitcount.ptrcnt.to.int",
2595                                     L->getLoopPreheader()->getTerminator());
2596 
2597   return TripCount;
2598 }
2599 
2600 Value *InnerLoopVectorizer::getOrCreateVectorTripCount(Loop *L) {
2601   if (VectorTripCount)
2602     return VectorTripCount;
2603 
2604   Value *TC = getOrCreateTripCount(L);
2605   IRBuilder<> Builder(L->getLoopPreheader()->getTerminator());
2606 
2607   // Now we need to generate the expression for the part of the loop that the
2608   // vectorized body will execute. This is equal to N - (N % Step) if scalar
2609   // iterations are not required for correctness, or N - Step, otherwise. Step
2610   // is equal to the vectorization factor (number of SIMD elements) times the
2611   // unroll factor (number of SIMD instructions).
2612   Constant *Step = ConstantInt::get(TC->getType(), VF * UF);
2613   Value *R = Builder.CreateURem(TC, Step, "n.mod.vf");
2614 
2615   // If there is a non-reversed interleaved group that may speculatively access
2616   // memory out-of-bounds, we need to ensure that there will be at least one
2617   // iteration of the scalar epilogue loop. Thus, if the step evenly divides
2618   // the trip count, we set the remainder to be equal to the step. If the step
2619   // does not evenly divide the trip count, no adjustment is necessary since
2620   // there will already be scalar iterations. Note that the minimum iterations
2621   // check ensures that N >= Step.
2622   if (VF > 1 && Cost->requiresScalarEpilogue()) {
2623     auto *IsZero = Builder.CreateICmpEQ(R, ConstantInt::get(R->getType(), 0));
2624     R = Builder.CreateSelect(IsZero, Step, R);
2625   }
2626 
2627   VectorTripCount = Builder.CreateSub(TC, R, "n.vec");
2628 
2629   return VectorTripCount;
2630 }
2631 
2632 Value *InnerLoopVectorizer::createBitOrPointerCast(Value *V, VectorType *DstVTy,
2633                                                    const DataLayout &DL) {
2634   // Verify that V is a vector type with same number of elements as DstVTy.
2635   unsigned VF = DstVTy->getNumElements();
2636   VectorType *SrcVecTy = cast<VectorType>(V->getType());
2637   assert((VF == SrcVecTy->getNumElements()) && "Vector dimensions do not match");
2638   Type *SrcElemTy = SrcVecTy->getElementType();
2639   Type *DstElemTy = DstVTy->getElementType();
2640   assert((DL.getTypeSizeInBits(SrcElemTy) == DL.getTypeSizeInBits(DstElemTy)) &&
2641          "Vector elements must have same size");
2642 
2643   // Do a direct cast if element types are castable.
2644   if (CastInst::isBitOrNoopPointerCastable(SrcElemTy, DstElemTy, DL)) {
2645     return Builder.CreateBitOrPointerCast(V, DstVTy);
2646   }
2647   // V cannot be directly casted to desired vector type.
2648   // May happen when V is a floating point vector but DstVTy is a vector of
2649   // pointers or vice-versa. Handle this using a two-step bitcast using an
2650   // intermediate Integer type for the bitcast i.e. Ptr <-> Int <-> Float.
2651   assert((DstElemTy->isPointerTy() != SrcElemTy->isPointerTy()) &&
2652          "Only one type should be a pointer type");
2653   assert((DstElemTy->isFloatingPointTy() != SrcElemTy->isFloatingPointTy()) &&
2654          "Only one type should be a floating point type");
2655   Type *IntTy =
2656       IntegerType::getIntNTy(V->getContext(), DL.getTypeSizeInBits(SrcElemTy));
2657   VectorType *VecIntTy = VectorType::get(IntTy, VF);
2658   Value *CastVal = Builder.CreateBitOrPointerCast(V, VecIntTy);
2659   return Builder.CreateBitOrPointerCast(CastVal, DstVTy);
2660 }
2661 
2662 void InnerLoopVectorizer::emitMinimumIterationCountCheck(Loop *L,
2663                                                          BasicBlock *Bypass) {
2664   Value *Count = getOrCreateTripCount(L);
2665   BasicBlock *BB = L->getLoopPreheader();
2666   IRBuilder<> Builder(BB->getTerminator());
2667 
2668   // Generate code to check if the loop's trip count is less than VF * UF, or
2669   // equal to it in case a scalar epilogue is required; this implies that the
2670   // vector trip count is zero. This check also covers the case where adding one
2671   // to the backedge-taken count overflowed leading to an incorrect trip count
2672   // of zero. In this case we will also jump to the scalar loop.
2673   auto P = Cost->requiresScalarEpilogue() ? ICmpInst::ICMP_ULE
2674                                           : ICmpInst::ICMP_ULT;
2675   Value *CheckMinIters = Builder.CreateICmp(
2676       P, Count, ConstantInt::get(Count->getType(), VF * UF), "min.iters.check");
2677 
2678   BasicBlock *NewBB = BB->splitBasicBlock(BB->getTerminator(), "vector.ph");
2679   // Update dominator tree immediately if the generated block is a
2680   // LoopBypassBlock because SCEV expansions to generate loop bypass
2681   // checks may query it before the current function is finished.
2682   DT->addNewBlock(NewBB, BB);
2683   if (L->getParentLoop())
2684     L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI);
2685   ReplaceInstWithInst(BB->getTerminator(),
2686                       BranchInst::Create(Bypass, NewBB, CheckMinIters));
2687   LoopBypassBlocks.push_back(BB);
2688 }
2689 
2690 void InnerLoopVectorizer::emitSCEVChecks(Loop *L, BasicBlock *Bypass) {
2691   BasicBlock *BB = L->getLoopPreheader();
2692 
2693   // Generate the code to check that the SCEV assumptions that we made.
2694   // We want the new basic block to start at the first instruction in a
2695   // sequence of instructions that form a check.
2696   SCEVExpander Exp(*PSE.getSE(), Bypass->getModule()->getDataLayout(),
2697                    "scev.check");
2698   Value *SCEVCheck =
2699       Exp.expandCodeForPredicate(&PSE.getUnionPredicate(), BB->getTerminator());
2700 
2701   if (auto *C = dyn_cast<ConstantInt>(SCEVCheck))
2702     if (C->isZero())
2703       return;
2704 
2705   // Create a new block containing the stride check.
2706   BB->setName("vector.scevcheck");
2707   auto *NewBB = BB->splitBasicBlock(BB->getTerminator(), "vector.ph");
2708   // Update dominator tree immediately if the generated block is a
2709   // LoopBypassBlock because SCEV expansions to generate loop bypass
2710   // checks may query it before the current function is finished.
2711   DT->addNewBlock(NewBB, BB);
2712   if (L->getParentLoop())
2713     L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI);
2714   ReplaceInstWithInst(BB->getTerminator(),
2715                       BranchInst::Create(Bypass, NewBB, SCEVCheck));
2716   LoopBypassBlocks.push_back(BB);
2717   AddedSafetyChecks = true;
2718 }
2719 
2720 void InnerLoopVectorizer::emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass) {
2721   BasicBlock *BB = L->getLoopPreheader();
2722 
2723   // Generate the code that checks in runtime if arrays overlap. We put the
2724   // checks into a separate block to make the more common case of few elements
2725   // faster.
2726   Instruction *FirstCheckInst;
2727   Instruction *MemRuntimeCheck;
2728   std::tie(FirstCheckInst, MemRuntimeCheck) =
2729       Legal->getLAI()->addRuntimeChecks(BB->getTerminator());
2730   if (!MemRuntimeCheck)
2731     return;
2732 
2733   // Create a new block containing the memory check.
2734   BB->setName("vector.memcheck");
2735   auto *NewBB = BB->splitBasicBlock(BB->getTerminator(), "vector.ph");
2736   // Update dominator tree immediately if the generated block is a
2737   // LoopBypassBlock because SCEV expansions to generate loop bypass
2738   // checks may query it before the current function is finished.
2739   DT->addNewBlock(NewBB, BB);
2740   if (L->getParentLoop())
2741     L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI);
2742   ReplaceInstWithInst(BB->getTerminator(),
2743                       BranchInst::Create(Bypass, NewBB, MemRuntimeCheck));
2744   LoopBypassBlocks.push_back(BB);
2745   AddedSafetyChecks = true;
2746 
2747   // We currently don't use LoopVersioning for the actual loop cloning but we
2748   // still use it to add the noalias metadata.
2749   LVer = llvm::make_unique<LoopVersioning>(*Legal->getLAI(), OrigLoop, LI, DT,
2750                                            PSE.getSE());
2751   LVer->prepareNoAliasMetadata();
2752 }
2753 
2754 BasicBlock *InnerLoopVectorizer::createVectorizedLoopSkeleton() {
2755   /*
2756    In this function we generate a new loop. The new loop will contain
2757    the vectorized instructions while the old loop will continue to run the
2758    scalar remainder.
2759 
2760        [ ] <-- loop iteration number check.
2761     /   |
2762    /    v
2763   |    [ ] <-- vector loop bypass (may consist of multiple blocks).
2764   |  /  |
2765   | /   v
2766   ||   [ ]     <-- vector pre header.
2767   |/    |
2768   |     v
2769   |    [  ] \
2770   |    [  ]_|   <-- vector loop.
2771   |     |
2772   |     v
2773   |   -[ ]   <--- middle-block.
2774   |  /  |
2775   | /   v
2776   -|- >[ ]     <--- new preheader.
2777    |    |
2778    |    v
2779    |   [ ] \
2780    |   [ ]_|   <-- old scalar loop to handle remainder.
2781     \   |
2782      \  v
2783       >[ ]     <-- exit block.
2784    ...
2785    */
2786 
2787   BasicBlock *OldBasicBlock = OrigLoop->getHeader();
2788   BasicBlock *VectorPH = OrigLoop->getLoopPreheader();
2789   BasicBlock *ExitBlock = OrigLoop->getExitBlock();
2790   assert(VectorPH && "Invalid loop structure");
2791   assert(ExitBlock && "Must have an exit block");
2792 
2793   // Some loops have a single integer induction variable, while other loops
2794   // don't. One example is c++ iterators that often have multiple pointer
2795   // induction variables. In the code below we also support a case where we
2796   // don't have a single induction variable.
2797   //
2798   // We try to obtain an induction variable from the original loop as hard
2799   // as possible. However if we don't find one that:
2800   //   - is an integer
2801   //   - counts from zero, stepping by one
2802   //   - is the size of the widest induction variable type
2803   // then we create a new one.
2804   OldInduction = Legal->getPrimaryInduction();
2805   Type *IdxTy = Legal->getWidestInductionType();
2806 
2807   // Split the single block loop into the two loop structure described above.
2808   BasicBlock *VecBody =
2809       VectorPH->splitBasicBlock(VectorPH->getTerminator(), "vector.body");
2810   BasicBlock *MiddleBlock =
2811       VecBody->splitBasicBlock(VecBody->getTerminator(), "middle.block");
2812   BasicBlock *ScalarPH =
2813       MiddleBlock->splitBasicBlock(MiddleBlock->getTerminator(), "scalar.ph");
2814 
2815   // Create and register the new vector loop.
2816   Loop *Lp = LI->AllocateLoop();
2817   Loop *ParentLoop = OrigLoop->getParentLoop();
2818 
2819   // Insert the new loop into the loop nest and register the new basic blocks
2820   // before calling any utilities such as SCEV that require valid LoopInfo.
2821   if (ParentLoop) {
2822     ParentLoop->addChildLoop(Lp);
2823     ParentLoop->addBasicBlockToLoop(ScalarPH, *LI);
2824     ParentLoop->addBasicBlockToLoop(MiddleBlock, *LI);
2825   } else {
2826     LI->addTopLevelLoop(Lp);
2827   }
2828   Lp->addBasicBlockToLoop(VecBody, *LI);
2829 
2830   // Find the loop boundaries.
2831   Value *Count = getOrCreateTripCount(Lp);
2832 
2833   Value *StartIdx = ConstantInt::get(IdxTy, 0);
2834 
2835   // Now, compare the new count to zero. If it is zero skip the vector loop and
2836   // jump to the scalar loop. This check also covers the case where the
2837   // backedge-taken count is uint##_max: adding one to it will overflow leading
2838   // to an incorrect trip count of zero. In this (rare) case we will also jump
2839   // to the scalar loop.
2840   emitMinimumIterationCountCheck(Lp, ScalarPH);
2841 
2842   // Generate the code to check any assumptions that we've made for SCEV
2843   // expressions.
2844   emitSCEVChecks(Lp, ScalarPH);
2845 
2846   // Generate the code that checks in runtime if arrays overlap. We put the
2847   // checks into a separate block to make the more common case of few elements
2848   // faster.
2849   emitMemRuntimeChecks(Lp, ScalarPH);
2850 
2851   // Generate the induction variable.
2852   // The loop step is equal to the vectorization factor (num of SIMD elements)
2853   // times the unroll factor (num of SIMD instructions).
2854   Value *CountRoundDown = getOrCreateVectorTripCount(Lp);
2855   Constant *Step = ConstantInt::get(IdxTy, VF * UF);
2856   Induction =
2857       createInductionVariable(Lp, StartIdx, CountRoundDown, Step,
2858                               getDebugLocFromInstOrOperands(OldInduction));
2859 
2860   // We are going to resume the execution of the scalar loop.
2861   // Go over all of the induction variables that we found and fix the
2862   // PHIs that are left in the scalar version of the loop.
2863   // The starting values of PHI nodes depend on the counter of the last
2864   // iteration in the vectorized loop.
2865   // If we come from a bypass edge then we need to start from the original
2866   // start value.
2867 
2868   // This variable saves the new starting index for the scalar loop. It is used
2869   // to test if there are any tail iterations left once the vector loop has
2870   // completed.
2871   LoopVectorizationLegality::InductionList *List = Legal->getInductionVars();
2872   for (auto &InductionEntry : *List) {
2873     PHINode *OrigPhi = InductionEntry.first;
2874     InductionDescriptor II = InductionEntry.second;
2875 
2876     // Create phi nodes to merge from the  backedge-taken check block.
2877     PHINode *BCResumeVal = PHINode::Create(
2878         OrigPhi->getType(), 3, "bc.resume.val", ScalarPH->getTerminator());
2879     Value *&EndValue = IVEndValues[OrigPhi];
2880     if (OrigPhi == OldInduction) {
2881       // We know what the end value is.
2882       EndValue = CountRoundDown;
2883     } else {
2884       IRBuilder<> B(Lp->getLoopPreheader()->getTerminator());
2885       Type *StepType = II.getStep()->getType();
2886       Instruction::CastOps CastOp =
2887         CastInst::getCastOpcode(CountRoundDown, true, StepType, true);
2888       Value *CRD = B.CreateCast(CastOp, CountRoundDown, StepType, "cast.crd");
2889       const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout();
2890       EndValue = II.transform(B, CRD, PSE.getSE(), DL);
2891       EndValue->setName("ind.end");
2892     }
2893 
2894     // The new PHI merges the original incoming value, in case of a bypass,
2895     // or the value at the end of the vectorized loop.
2896     BCResumeVal->addIncoming(EndValue, MiddleBlock);
2897 
2898     // Fix the scalar body counter (PHI node).
2899     unsigned BlockIdx = OrigPhi->getBasicBlockIndex(ScalarPH);
2900 
2901     // The old induction's phi node in the scalar body needs the truncated
2902     // value.
2903     for (BasicBlock *BB : LoopBypassBlocks)
2904       BCResumeVal->addIncoming(II.getStartValue(), BB);
2905     OrigPhi->setIncomingValue(BlockIdx, BCResumeVal);
2906   }
2907 
2908   // Add a check in the middle block to see if we have completed
2909   // all of the iterations in the first vector loop.
2910   // If (N - N%VF) == N, then we *don't* need to run the remainder.
2911   Value *CmpN =
2912       CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ, Count,
2913                       CountRoundDown, "cmp.n", MiddleBlock->getTerminator());
2914   ReplaceInstWithInst(MiddleBlock->getTerminator(),
2915                       BranchInst::Create(ExitBlock, ScalarPH, CmpN));
2916 
2917   // Get ready to start creating new instructions into the vectorized body.
2918   Builder.SetInsertPoint(&*VecBody->getFirstInsertionPt());
2919 
2920   // Save the state.
2921   LoopVectorPreHeader = Lp->getLoopPreheader();
2922   LoopScalarPreHeader = ScalarPH;
2923   LoopMiddleBlock = MiddleBlock;
2924   LoopExitBlock = ExitBlock;
2925   LoopVectorBody = VecBody;
2926   LoopScalarBody = OldBasicBlock;
2927 
2928   // Keep all loop hints from the original loop on the vector loop (we'll
2929   // replace the vectorizer-specific hints below).
2930   if (MDNode *LID = OrigLoop->getLoopID())
2931     Lp->setLoopID(LID);
2932 
2933   LoopVectorizeHints Hints(Lp, true, *ORE);
2934   Hints.setAlreadyVectorized();
2935 
2936   return LoopVectorPreHeader;
2937 }
2938 
2939 // Fix up external users of the induction variable. At this point, we are
2940 // in LCSSA form, with all external PHIs that use the IV having one input value,
2941 // coming from the remainder loop. We need those PHIs to also have a correct
2942 // value for the IV when arriving directly from the middle block.
2943 void InnerLoopVectorizer::fixupIVUsers(PHINode *OrigPhi,
2944                                        const InductionDescriptor &II,
2945                                        Value *CountRoundDown, Value *EndValue,
2946                                        BasicBlock *MiddleBlock) {
2947   // There are two kinds of external IV usages - those that use the value
2948   // computed in the last iteration (the PHI) and those that use the penultimate
2949   // value (the value that feeds into the phi from the loop latch).
2950   // We allow both, but they, obviously, have different values.
2951 
2952   assert(OrigLoop->getExitBlock() && "Expected a single exit block");
2953 
2954   DenseMap<Value *, Value *> MissingVals;
2955 
2956   // An external user of the last iteration's value should see the value that
2957   // the remainder loop uses to initialize its own IV.
2958   Value *PostInc = OrigPhi->getIncomingValueForBlock(OrigLoop->getLoopLatch());
2959   for (User *U : PostInc->users()) {
2960     Instruction *UI = cast<Instruction>(U);
2961     if (!OrigLoop->contains(UI)) {
2962       assert(isa<PHINode>(UI) && "Expected LCSSA form");
2963       MissingVals[UI] = EndValue;
2964     }
2965   }
2966 
2967   // An external user of the penultimate value need to see EndValue - Step.
2968   // The simplest way to get this is to recompute it from the constituent SCEVs,
2969   // that is Start + (Step * (CRD - 1)).
2970   for (User *U : OrigPhi->users()) {
2971     auto *UI = cast<Instruction>(U);
2972     if (!OrigLoop->contains(UI)) {
2973       const DataLayout &DL =
2974           OrigLoop->getHeader()->getModule()->getDataLayout();
2975       assert(isa<PHINode>(UI) && "Expected LCSSA form");
2976 
2977       IRBuilder<> B(MiddleBlock->getTerminator());
2978       Value *CountMinusOne = B.CreateSub(
2979           CountRoundDown, ConstantInt::get(CountRoundDown->getType(), 1));
2980       Value *CMO =
2981           !II.getStep()->getType()->isIntegerTy()
2982               ? B.CreateCast(Instruction::SIToFP, CountMinusOne,
2983                              II.getStep()->getType())
2984               : B.CreateSExtOrTrunc(CountMinusOne, II.getStep()->getType());
2985       CMO->setName("cast.cmo");
2986       Value *Escape = II.transform(B, CMO, PSE.getSE(), DL);
2987       Escape->setName("ind.escape");
2988       MissingVals[UI] = Escape;
2989     }
2990   }
2991 
2992   for (auto &I : MissingVals) {
2993     PHINode *PHI = cast<PHINode>(I.first);
2994     // One corner case we have to handle is two IVs "chasing" each-other,
2995     // that is %IV2 = phi [...], [ %IV1, %latch ]
2996     // In this case, if IV1 has an external use, we need to avoid adding both
2997     // "last value of IV1" and "penultimate value of IV2". So, verify that we
2998     // don't already have an incoming value for the middle block.
2999     if (PHI->getBasicBlockIndex(MiddleBlock) == -1)
3000       PHI->addIncoming(I.second, MiddleBlock);
3001   }
3002 }
3003 
3004 namespace {
3005 
3006 struct CSEDenseMapInfo {
3007   static bool canHandle(const Instruction *I) {
3008     return isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) ||
3009            isa<ShuffleVectorInst>(I) || isa<GetElementPtrInst>(I);
3010   }
3011 
3012   static inline Instruction *getEmptyKey() {
3013     return DenseMapInfo<Instruction *>::getEmptyKey();
3014   }
3015 
3016   static inline Instruction *getTombstoneKey() {
3017     return DenseMapInfo<Instruction *>::getTombstoneKey();
3018   }
3019 
3020   static unsigned getHashValue(const Instruction *I) {
3021     assert(canHandle(I) && "Unknown instruction!");
3022     return hash_combine(I->getOpcode(), hash_combine_range(I->value_op_begin(),
3023                                                            I->value_op_end()));
3024   }
3025 
3026   static bool isEqual(const Instruction *LHS, const Instruction *RHS) {
3027     if (LHS == getEmptyKey() || RHS == getEmptyKey() ||
3028         LHS == getTombstoneKey() || RHS == getTombstoneKey())
3029       return LHS == RHS;
3030     return LHS->isIdenticalTo(RHS);
3031   }
3032 };
3033 
3034 } // end anonymous namespace
3035 
3036 ///\brief Perform cse of induction variable instructions.
3037 static void cse(BasicBlock *BB) {
3038   // Perform simple cse.
3039   SmallDenseMap<Instruction *, Instruction *, 4, CSEDenseMapInfo> CSEMap;
3040   for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) {
3041     Instruction *In = &*I++;
3042 
3043     if (!CSEDenseMapInfo::canHandle(In))
3044       continue;
3045 
3046     // Check if we can replace this instruction with any of the
3047     // visited instructions.
3048     if (Instruction *V = CSEMap.lookup(In)) {
3049       In->replaceAllUsesWith(V);
3050       In->eraseFromParent();
3051       continue;
3052     }
3053 
3054     CSEMap[In] = In;
3055   }
3056 }
3057 
3058 /// \brief Estimate the overhead of scalarizing an instruction. This is a
3059 /// convenience wrapper for the type-based getScalarizationOverhead API.
3060 static unsigned getScalarizationOverhead(Instruction *I, unsigned VF,
3061                                          const TargetTransformInfo &TTI) {
3062   if (VF == 1)
3063     return 0;
3064 
3065   unsigned Cost = 0;
3066   Type *RetTy = ToVectorTy(I->getType(), VF);
3067   if (!RetTy->isVoidTy() &&
3068       (!isa<LoadInst>(I) ||
3069        !TTI.supportsEfficientVectorElementLoadStore()))
3070     Cost += TTI.getScalarizationOverhead(RetTy, true, false);
3071 
3072   if (CallInst *CI = dyn_cast<CallInst>(I)) {
3073     SmallVector<const Value *, 4> Operands(CI->arg_operands());
3074     Cost += TTI.getOperandsScalarizationOverhead(Operands, VF);
3075   }
3076   else if (!isa<StoreInst>(I) ||
3077            !TTI.supportsEfficientVectorElementLoadStore()) {
3078     SmallVector<const Value *, 4> Operands(I->operand_values());
3079     Cost += TTI.getOperandsScalarizationOverhead(Operands, VF);
3080   }
3081 
3082   return Cost;
3083 }
3084 
3085 // Estimate cost of a call instruction CI if it were vectorized with factor VF.
3086 // Return the cost of the instruction, including scalarization overhead if it's
3087 // needed. The flag NeedToScalarize shows if the call needs to be scalarized -
3088 // i.e. either vector version isn't available, or is too expensive.
3089 static unsigned getVectorCallCost(CallInst *CI, unsigned VF,
3090                                   const TargetTransformInfo &TTI,
3091                                   const TargetLibraryInfo *TLI,
3092                                   bool &NeedToScalarize) {
3093   Function *F = CI->getCalledFunction();
3094   StringRef FnName = CI->getCalledFunction()->getName();
3095   Type *ScalarRetTy = CI->getType();
3096   SmallVector<Type *, 4> Tys, ScalarTys;
3097   for (auto &ArgOp : CI->arg_operands())
3098     ScalarTys.push_back(ArgOp->getType());
3099 
3100   // Estimate cost of scalarized vector call. The source operands are assumed
3101   // to be vectors, so we need to extract individual elements from there,
3102   // execute VF scalar calls, and then gather the result into the vector return
3103   // value.
3104   unsigned ScalarCallCost = TTI.getCallInstrCost(F, ScalarRetTy, ScalarTys);
3105   if (VF == 1)
3106     return ScalarCallCost;
3107 
3108   // Compute corresponding vector type for return value and arguments.
3109   Type *RetTy = ToVectorTy(ScalarRetTy, VF);
3110   for (Type *ScalarTy : ScalarTys)
3111     Tys.push_back(ToVectorTy(ScalarTy, VF));
3112 
3113   // Compute costs of unpacking argument values for the scalar calls and
3114   // packing the return values to a vector.
3115   unsigned ScalarizationCost = getScalarizationOverhead(CI, VF, TTI);
3116 
3117   unsigned Cost = ScalarCallCost * VF + ScalarizationCost;
3118 
3119   // If we can't emit a vector call for this function, then the currently found
3120   // cost is the cost we need to return.
3121   NeedToScalarize = true;
3122   if (!TLI || !TLI->isFunctionVectorizable(FnName, VF) || CI->isNoBuiltin())
3123     return Cost;
3124 
3125   // If the corresponding vector cost is cheaper, return its cost.
3126   unsigned VectorCallCost = TTI.getCallInstrCost(nullptr, RetTy, Tys);
3127   if (VectorCallCost < Cost) {
3128     NeedToScalarize = false;
3129     return VectorCallCost;
3130   }
3131   return Cost;
3132 }
3133 
3134 // Estimate cost of an intrinsic call instruction CI if it were vectorized with
3135 // factor VF.  Return the cost of the instruction, including scalarization
3136 // overhead if it's needed.
3137 static unsigned getVectorIntrinsicCost(CallInst *CI, unsigned VF,
3138                                        const TargetTransformInfo &TTI,
3139                                        const TargetLibraryInfo *TLI) {
3140   Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
3141   assert(ID && "Expected intrinsic call!");
3142 
3143   FastMathFlags FMF;
3144   if (auto *FPMO = dyn_cast<FPMathOperator>(CI))
3145     FMF = FPMO->getFastMathFlags();
3146 
3147   SmallVector<Value *, 4> Operands(CI->arg_operands());
3148   return TTI.getIntrinsicInstrCost(ID, CI->getType(), Operands, FMF, VF);
3149 }
3150 
3151 static Type *smallestIntegerVectorType(Type *T1, Type *T2) {
3152   auto *I1 = cast<IntegerType>(T1->getVectorElementType());
3153   auto *I2 = cast<IntegerType>(T2->getVectorElementType());
3154   return I1->getBitWidth() < I2->getBitWidth() ? T1 : T2;
3155 }
3156 static Type *largestIntegerVectorType(Type *T1, Type *T2) {
3157   auto *I1 = cast<IntegerType>(T1->getVectorElementType());
3158   auto *I2 = cast<IntegerType>(T2->getVectorElementType());
3159   return I1->getBitWidth() > I2->getBitWidth() ? T1 : T2;
3160 }
3161 
3162 void InnerLoopVectorizer::truncateToMinimalBitwidths() {
3163   // For every instruction `I` in MinBWs, truncate the operands, create a
3164   // truncated version of `I` and reextend its result. InstCombine runs
3165   // later and will remove any ext/trunc pairs.
3166   SmallPtrSet<Value *, 4> Erased;
3167   for (const auto &KV : Cost->getMinimalBitwidths()) {
3168     // If the value wasn't vectorized, we must maintain the original scalar
3169     // type. The absence of the value from VectorLoopValueMap indicates that it
3170     // wasn't vectorized.
3171     if (!VectorLoopValueMap.hasAnyVectorValue(KV.first))
3172       continue;
3173     for (unsigned Part = 0; Part < UF; ++Part) {
3174       Value *I = getOrCreateVectorValue(KV.first, Part);
3175       if (Erased.count(I) || I->use_empty() || !isa<Instruction>(I))
3176         continue;
3177       Type *OriginalTy = I->getType();
3178       Type *ScalarTruncatedTy =
3179           IntegerType::get(OriginalTy->getContext(), KV.second);
3180       Type *TruncatedTy = VectorType::get(ScalarTruncatedTy,
3181                                           OriginalTy->getVectorNumElements());
3182       if (TruncatedTy == OriginalTy)
3183         continue;
3184 
3185       IRBuilder<> B(cast<Instruction>(I));
3186       auto ShrinkOperand = [&](Value *V) -> Value * {
3187         if (auto *ZI = dyn_cast<ZExtInst>(V))
3188           if (ZI->getSrcTy() == TruncatedTy)
3189             return ZI->getOperand(0);
3190         return B.CreateZExtOrTrunc(V, TruncatedTy);
3191       };
3192 
3193       // The actual instruction modification depends on the instruction type,
3194       // unfortunately.
3195       Value *NewI = nullptr;
3196       if (auto *BO = dyn_cast<BinaryOperator>(I)) {
3197         NewI = B.CreateBinOp(BO->getOpcode(), ShrinkOperand(BO->getOperand(0)),
3198                              ShrinkOperand(BO->getOperand(1)));
3199 
3200         // Any wrapping introduced by shrinking this operation shouldn't be
3201         // considered undefined behavior. So, we can't unconditionally copy
3202         // arithmetic wrapping flags to NewI.
3203         cast<BinaryOperator>(NewI)->copyIRFlags(I, /*IncludeWrapFlags=*/false);
3204       } else if (auto *CI = dyn_cast<ICmpInst>(I)) {
3205         NewI =
3206             B.CreateICmp(CI->getPredicate(), ShrinkOperand(CI->getOperand(0)),
3207                          ShrinkOperand(CI->getOperand(1)));
3208       } else if (auto *SI = dyn_cast<SelectInst>(I)) {
3209         NewI = B.CreateSelect(SI->getCondition(),
3210                               ShrinkOperand(SI->getTrueValue()),
3211                               ShrinkOperand(SI->getFalseValue()));
3212       } else if (auto *CI = dyn_cast<CastInst>(I)) {
3213         switch (CI->getOpcode()) {
3214         default:
3215           llvm_unreachable("Unhandled cast!");
3216         case Instruction::Trunc:
3217           NewI = ShrinkOperand(CI->getOperand(0));
3218           break;
3219         case Instruction::SExt:
3220           NewI = B.CreateSExtOrTrunc(
3221               CI->getOperand(0),
3222               smallestIntegerVectorType(OriginalTy, TruncatedTy));
3223           break;
3224         case Instruction::ZExt:
3225           NewI = B.CreateZExtOrTrunc(
3226               CI->getOperand(0),
3227               smallestIntegerVectorType(OriginalTy, TruncatedTy));
3228           break;
3229         }
3230       } else if (auto *SI = dyn_cast<ShuffleVectorInst>(I)) {
3231         auto Elements0 = SI->getOperand(0)->getType()->getVectorNumElements();
3232         auto *O0 = B.CreateZExtOrTrunc(
3233             SI->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements0));
3234         auto Elements1 = SI->getOperand(1)->getType()->getVectorNumElements();
3235         auto *O1 = B.CreateZExtOrTrunc(
3236             SI->getOperand(1), VectorType::get(ScalarTruncatedTy, Elements1));
3237 
3238         NewI = B.CreateShuffleVector(O0, O1, SI->getMask());
3239       } else if (isa<LoadInst>(I)) {
3240         // Don't do anything with the operands, just extend the result.
3241         continue;
3242       } else if (auto *IE = dyn_cast<InsertElementInst>(I)) {
3243         auto Elements = IE->getOperand(0)->getType()->getVectorNumElements();
3244         auto *O0 = B.CreateZExtOrTrunc(
3245             IE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements));
3246         auto *O1 = B.CreateZExtOrTrunc(IE->getOperand(1), ScalarTruncatedTy);
3247         NewI = B.CreateInsertElement(O0, O1, IE->getOperand(2));
3248       } else if (auto *EE = dyn_cast<ExtractElementInst>(I)) {
3249         auto Elements = EE->getOperand(0)->getType()->getVectorNumElements();
3250         auto *O0 = B.CreateZExtOrTrunc(
3251             EE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements));
3252         NewI = B.CreateExtractElement(O0, EE->getOperand(2));
3253       } else {
3254         llvm_unreachable("Unhandled instruction type!");
3255       }
3256 
3257       // Lastly, extend the result.
3258       NewI->takeName(cast<Instruction>(I));
3259       Value *Res = B.CreateZExtOrTrunc(NewI, OriginalTy);
3260       I->replaceAllUsesWith(Res);
3261       cast<Instruction>(I)->eraseFromParent();
3262       Erased.insert(I);
3263       VectorLoopValueMap.resetVectorValue(KV.first, Part, Res);
3264     }
3265   }
3266 
3267   // We'll have created a bunch of ZExts that are now parentless. Clean up.
3268   for (const auto &KV : Cost->getMinimalBitwidths()) {
3269     // If the value wasn't vectorized, we must maintain the original scalar
3270     // type. The absence of the value from VectorLoopValueMap indicates that it
3271     // wasn't vectorized.
3272     if (!VectorLoopValueMap.hasAnyVectorValue(KV.first))
3273       continue;
3274     for (unsigned Part = 0; Part < UF; ++Part) {
3275       Value *I = getOrCreateVectorValue(KV.first, Part);
3276       ZExtInst *Inst = dyn_cast<ZExtInst>(I);
3277       if (Inst && Inst->use_empty()) {
3278         Value *NewI = Inst->getOperand(0);
3279         Inst->eraseFromParent();
3280         VectorLoopValueMap.resetVectorValue(KV.first, Part, NewI);
3281       }
3282     }
3283   }
3284 }
3285 
3286 void InnerLoopVectorizer::fixVectorizedLoop() {
3287   // Insert truncates and extends for any truncated instructions as hints to
3288   // InstCombine.
3289   if (VF > 1)
3290     truncateToMinimalBitwidths();
3291 
3292   // At this point every instruction in the original loop is widened to a
3293   // vector form. Now we need to fix the recurrences in the loop. These PHI
3294   // nodes are currently empty because we did not want to introduce cycles.
3295   // This is the second stage of vectorizing recurrences.
3296   fixCrossIterationPHIs();
3297 
3298   // Update the dominator tree.
3299   //
3300   // FIXME: After creating the structure of the new loop, the dominator tree is
3301   //        no longer up-to-date, and it remains that way until we update it
3302   //        here. An out-of-date dominator tree is problematic for SCEV,
3303   //        because SCEVExpander uses it to guide code generation. The
3304   //        vectorizer use SCEVExpanders in several places. Instead, we should
3305   //        keep the dominator tree up-to-date as we go.
3306   updateAnalysis();
3307 
3308   // Fix-up external users of the induction variables.
3309   for (auto &Entry : *Legal->getInductionVars())
3310     fixupIVUsers(Entry.first, Entry.second,
3311                  getOrCreateVectorTripCount(LI->getLoopFor(LoopVectorBody)),
3312                  IVEndValues[Entry.first], LoopMiddleBlock);
3313 
3314   fixLCSSAPHIs();
3315   for (Instruction *PI : PredicatedInstructions)
3316     sinkScalarOperands(&*PI);
3317 
3318   // Remove redundant induction instructions.
3319   cse(LoopVectorBody);
3320 }
3321 
3322 void InnerLoopVectorizer::fixCrossIterationPHIs() {
3323   // In order to support recurrences we need to be able to vectorize Phi nodes.
3324   // Phi nodes have cycles, so we need to vectorize them in two stages. This is
3325   // stage #2: We now need to fix the recurrences by adding incoming edges to
3326   // the currently empty PHI nodes. At this point every instruction in the
3327   // original loop is widened to a vector form so we can use them to construct
3328   // the incoming edges.
3329   for (PHINode &Phi : OrigLoop->getHeader()->phis()) {
3330     // Handle first-order recurrences and reductions that need to be fixed.
3331     if (Legal->isFirstOrderRecurrence(&Phi))
3332       fixFirstOrderRecurrence(&Phi);
3333     else if (Legal->isReductionVariable(&Phi))
3334       fixReduction(&Phi);
3335   }
3336 }
3337 
3338 void InnerLoopVectorizer::fixFirstOrderRecurrence(PHINode *Phi) {
3339   // This is the second phase of vectorizing first-order recurrences. An
3340   // overview of the transformation is described below. Suppose we have the
3341   // following loop.
3342   //
3343   //   for (int i = 0; i < n; ++i)
3344   //     b[i] = a[i] - a[i - 1];
3345   //
3346   // There is a first-order recurrence on "a". For this loop, the shorthand
3347   // scalar IR looks like:
3348   //
3349   //   scalar.ph:
3350   //     s_init = a[-1]
3351   //     br scalar.body
3352   //
3353   //   scalar.body:
3354   //     i = phi [0, scalar.ph], [i+1, scalar.body]
3355   //     s1 = phi [s_init, scalar.ph], [s2, scalar.body]
3356   //     s2 = a[i]
3357   //     b[i] = s2 - s1
3358   //     br cond, scalar.body, ...
3359   //
3360   // In this example, s1 is a recurrence because it's value depends on the
3361   // previous iteration. In the first phase of vectorization, we created a
3362   // temporary value for s1. We now complete the vectorization and produce the
3363   // shorthand vector IR shown below (for VF = 4, UF = 1).
3364   //
3365   //   vector.ph:
3366   //     v_init = vector(..., ..., ..., a[-1])
3367   //     br vector.body
3368   //
3369   //   vector.body
3370   //     i = phi [0, vector.ph], [i+4, vector.body]
3371   //     v1 = phi [v_init, vector.ph], [v2, vector.body]
3372   //     v2 = a[i, i+1, i+2, i+3];
3373   //     v3 = vector(v1(3), v2(0, 1, 2))
3374   //     b[i, i+1, i+2, i+3] = v2 - v3
3375   //     br cond, vector.body, middle.block
3376   //
3377   //   middle.block:
3378   //     x = v2(3)
3379   //     br scalar.ph
3380   //
3381   //   scalar.ph:
3382   //     s_init = phi [x, middle.block], [a[-1], otherwise]
3383   //     br scalar.body
3384   //
3385   // After execution completes the vector loop, we extract the next value of
3386   // the recurrence (x) to use as the initial value in the scalar loop.
3387 
3388   // Get the original loop preheader and single loop latch.
3389   auto *Preheader = OrigLoop->getLoopPreheader();
3390   auto *Latch = OrigLoop->getLoopLatch();
3391 
3392   // Get the initial and previous values of the scalar recurrence.
3393   auto *ScalarInit = Phi->getIncomingValueForBlock(Preheader);
3394   auto *Previous = Phi->getIncomingValueForBlock(Latch);
3395 
3396   // Create a vector from the initial value.
3397   auto *VectorInit = ScalarInit;
3398   if (VF > 1) {
3399     Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
3400     VectorInit = Builder.CreateInsertElement(
3401         UndefValue::get(VectorType::get(VectorInit->getType(), VF)), VectorInit,
3402         Builder.getInt32(VF - 1), "vector.recur.init");
3403   }
3404 
3405   // We constructed a temporary phi node in the first phase of vectorization.
3406   // This phi node will eventually be deleted.
3407   Builder.SetInsertPoint(
3408       cast<Instruction>(VectorLoopValueMap.getVectorValue(Phi, 0)));
3409 
3410   // Create a phi node for the new recurrence. The current value will either be
3411   // the initial value inserted into a vector or loop-varying vector value.
3412   auto *VecPhi = Builder.CreatePHI(VectorInit->getType(), 2, "vector.recur");
3413   VecPhi->addIncoming(VectorInit, LoopVectorPreHeader);
3414 
3415   // Get the vectorized previous value of the last part UF - 1. It appears last
3416   // among all unrolled iterations, due to the order of their construction.
3417   Value *PreviousLastPart = getOrCreateVectorValue(Previous, UF - 1);
3418 
3419   // Set the insertion point after the previous value if it is an instruction.
3420   // Note that the previous value may have been constant-folded so it is not
3421   // guaranteed to be an instruction in the vector loop. Also, if the previous
3422   // value is a phi node, we should insert after all the phi nodes to avoid
3423   // breaking basic block verification.
3424   if (LI->getLoopFor(LoopVectorBody)->isLoopInvariant(PreviousLastPart) ||
3425       isa<PHINode>(PreviousLastPart))
3426     Builder.SetInsertPoint(&*LoopVectorBody->getFirstInsertionPt());
3427   else
3428     Builder.SetInsertPoint(
3429         &*++BasicBlock::iterator(cast<Instruction>(PreviousLastPart)));
3430 
3431   // We will construct a vector for the recurrence by combining the values for
3432   // the current and previous iterations. This is the required shuffle mask.
3433   SmallVector<Constant *, 8> ShuffleMask(VF);
3434   ShuffleMask[0] = Builder.getInt32(VF - 1);
3435   for (unsigned I = 1; I < VF; ++I)
3436     ShuffleMask[I] = Builder.getInt32(I + VF - 1);
3437 
3438   // The vector from which to take the initial value for the current iteration
3439   // (actual or unrolled). Initially, this is the vector phi node.
3440   Value *Incoming = VecPhi;
3441 
3442   // Shuffle the current and previous vector and update the vector parts.
3443   for (unsigned Part = 0; Part < UF; ++Part) {
3444     Value *PreviousPart = getOrCreateVectorValue(Previous, Part);
3445     Value *PhiPart = VectorLoopValueMap.getVectorValue(Phi, Part);
3446     auto *Shuffle =
3447         VF > 1 ? Builder.CreateShuffleVector(Incoming, PreviousPart,
3448                                              ConstantVector::get(ShuffleMask))
3449                : Incoming;
3450     PhiPart->replaceAllUsesWith(Shuffle);
3451     cast<Instruction>(PhiPart)->eraseFromParent();
3452     VectorLoopValueMap.resetVectorValue(Phi, Part, Shuffle);
3453     Incoming = PreviousPart;
3454   }
3455 
3456   // Fix the latch value of the new recurrence in the vector loop.
3457   VecPhi->addIncoming(Incoming, LI->getLoopFor(LoopVectorBody)->getLoopLatch());
3458 
3459   // Extract the last vector element in the middle block. This will be the
3460   // initial value for the recurrence when jumping to the scalar loop.
3461   auto *ExtractForScalar = Incoming;
3462   if (VF > 1) {
3463     Builder.SetInsertPoint(LoopMiddleBlock->getTerminator());
3464     ExtractForScalar = Builder.CreateExtractElement(
3465         ExtractForScalar, Builder.getInt32(VF - 1), "vector.recur.extract");
3466   }
3467   // Extract the second last element in the middle block if the
3468   // Phi is used outside the loop. We need to extract the phi itself
3469   // and not the last element (the phi update in the current iteration). This
3470   // will be the value when jumping to the exit block from the LoopMiddleBlock,
3471   // when the scalar loop is not run at all.
3472   Value *ExtractForPhiUsedOutsideLoop = nullptr;
3473   if (VF > 1)
3474     ExtractForPhiUsedOutsideLoop = Builder.CreateExtractElement(
3475         Incoming, Builder.getInt32(VF - 2), "vector.recur.extract.for.phi");
3476   // When loop is unrolled without vectorizing, initialize
3477   // ExtractForPhiUsedOutsideLoop with the value just prior to unrolled value of
3478   // `Incoming`. This is analogous to the vectorized case above: extracting the
3479   // second last element when VF > 1.
3480   else if (UF > 1)
3481     ExtractForPhiUsedOutsideLoop = getOrCreateVectorValue(Previous, UF - 2);
3482 
3483   // Fix the initial value of the original recurrence in the scalar loop.
3484   Builder.SetInsertPoint(&*LoopScalarPreHeader->begin());
3485   auto *Start = Builder.CreatePHI(Phi->getType(), 2, "scalar.recur.init");
3486   for (auto *BB : predecessors(LoopScalarPreHeader)) {
3487     auto *Incoming = BB == LoopMiddleBlock ? ExtractForScalar : ScalarInit;
3488     Start->addIncoming(Incoming, BB);
3489   }
3490 
3491   Phi->setIncomingValue(Phi->getBasicBlockIndex(LoopScalarPreHeader), Start);
3492   Phi->setName("scalar.recur");
3493 
3494   // Finally, fix users of the recurrence outside the loop. The users will need
3495   // either the last value of the scalar recurrence or the last value of the
3496   // vector recurrence we extracted in the middle block. Since the loop is in
3497   // LCSSA form, we just need to find the phi node for the original scalar
3498   // recurrence in the exit block, and then add an edge for the middle block.
3499   for (PHINode &LCSSAPhi : LoopExitBlock->phis()) {
3500     if (LCSSAPhi.getIncomingValue(0) == Phi) {
3501       LCSSAPhi.addIncoming(ExtractForPhiUsedOutsideLoop, LoopMiddleBlock);
3502       break;
3503     }
3504   }
3505 }
3506 
3507 void InnerLoopVectorizer::fixReduction(PHINode *Phi) {
3508   Constant *Zero = Builder.getInt32(0);
3509 
3510   // Get it's reduction variable descriptor.
3511   assert(Legal->isReductionVariable(Phi) &&
3512          "Unable to find the reduction variable");
3513   RecurrenceDescriptor RdxDesc = (*Legal->getReductionVars())[Phi];
3514 
3515   RecurrenceDescriptor::RecurrenceKind RK = RdxDesc.getRecurrenceKind();
3516   TrackingVH<Value> ReductionStartValue = RdxDesc.getRecurrenceStartValue();
3517   Instruction *LoopExitInst = RdxDesc.getLoopExitInstr();
3518   RecurrenceDescriptor::MinMaxRecurrenceKind MinMaxKind =
3519     RdxDesc.getMinMaxRecurrenceKind();
3520   setDebugLocFromInst(Builder, ReductionStartValue);
3521 
3522   // We need to generate a reduction vector from the incoming scalar.
3523   // To do so, we need to generate the 'identity' vector and override
3524   // one of the elements with the incoming scalar reduction. We need
3525   // to do it in the vector-loop preheader.
3526   Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
3527 
3528   // This is the vector-clone of the value that leaves the loop.
3529   Type *VecTy = getOrCreateVectorValue(LoopExitInst, 0)->getType();
3530 
3531   // Find the reduction identity variable. Zero for addition, or, xor,
3532   // one for multiplication, -1 for And.
3533   Value *Identity;
3534   Value *VectorStart;
3535   if (RK == RecurrenceDescriptor::RK_IntegerMinMax ||
3536       RK == RecurrenceDescriptor::RK_FloatMinMax) {
3537     // MinMax reduction have the start value as their identify.
3538     if (VF == 1) {
3539       VectorStart = Identity = ReductionStartValue;
3540     } else {
3541       VectorStart = Identity =
3542         Builder.CreateVectorSplat(VF, ReductionStartValue, "minmax.ident");
3543     }
3544   } else {
3545     // Handle other reduction kinds:
3546     Constant *Iden = RecurrenceDescriptor::getRecurrenceIdentity(
3547         RK, VecTy->getScalarType());
3548     if (VF == 1) {
3549       Identity = Iden;
3550       // This vector is the Identity vector where the first element is the
3551       // incoming scalar reduction.
3552       VectorStart = ReductionStartValue;
3553     } else {
3554       Identity = ConstantVector::getSplat(VF, Iden);
3555 
3556       // This vector is the Identity vector where the first element is the
3557       // incoming scalar reduction.
3558       VectorStart =
3559         Builder.CreateInsertElement(Identity, ReductionStartValue, Zero);
3560     }
3561   }
3562 
3563   // Fix the vector-loop phi.
3564 
3565   // Reductions do not have to start at zero. They can start with
3566   // any loop invariant values.
3567   BasicBlock *Latch = OrigLoop->getLoopLatch();
3568   Value *LoopVal = Phi->getIncomingValueForBlock(Latch);
3569   for (unsigned Part = 0; Part < UF; ++Part) {
3570     Value *VecRdxPhi = getOrCreateVectorValue(Phi, Part);
3571     Value *Val = getOrCreateVectorValue(LoopVal, Part);
3572     // Make sure to add the reduction stat value only to the
3573     // first unroll part.
3574     Value *StartVal = (Part == 0) ? VectorStart : Identity;
3575     cast<PHINode>(VecRdxPhi)->addIncoming(StartVal, LoopVectorPreHeader);
3576     cast<PHINode>(VecRdxPhi)
3577       ->addIncoming(Val, LI->getLoopFor(LoopVectorBody)->getLoopLatch());
3578   }
3579 
3580   // Before each round, move the insertion point right between
3581   // the PHIs and the values we are going to write.
3582   // This allows us to write both PHINodes and the extractelement
3583   // instructions.
3584   Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt());
3585 
3586   setDebugLocFromInst(Builder, LoopExitInst);
3587 
3588   // If the vector reduction can be performed in a smaller type, we truncate
3589   // then extend the loop exit value to enable InstCombine to evaluate the
3590   // entire expression in the smaller type.
3591   if (VF > 1 && Phi->getType() != RdxDesc.getRecurrenceType()) {
3592     Type *RdxVecTy = VectorType::get(RdxDesc.getRecurrenceType(), VF);
3593     Builder.SetInsertPoint(
3594         LI->getLoopFor(LoopVectorBody)->getLoopLatch()->getTerminator());
3595     VectorParts RdxParts(UF);
3596     for (unsigned Part = 0; Part < UF; ++Part) {
3597       RdxParts[Part] = VectorLoopValueMap.getVectorValue(LoopExitInst, Part);
3598       Value *Trunc = Builder.CreateTrunc(RdxParts[Part], RdxVecTy);
3599       Value *Extnd = RdxDesc.isSigned() ? Builder.CreateSExt(Trunc, VecTy)
3600                                         : Builder.CreateZExt(Trunc, VecTy);
3601       for (Value::user_iterator UI = RdxParts[Part]->user_begin();
3602            UI != RdxParts[Part]->user_end();)
3603         if (*UI != Trunc) {
3604           (*UI++)->replaceUsesOfWith(RdxParts[Part], Extnd);
3605           RdxParts[Part] = Extnd;
3606         } else {
3607           ++UI;
3608         }
3609     }
3610     Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt());
3611     for (unsigned Part = 0; Part < UF; ++Part) {
3612       RdxParts[Part] = Builder.CreateTrunc(RdxParts[Part], RdxVecTy);
3613       VectorLoopValueMap.resetVectorValue(LoopExitInst, Part, RdxParts[Part]);
3614     }
3615   }
3616 
3617   // Reduce all of the unrolled parts into a single vector.
3618   Value *ReducedPartRdx = VectorLoopValueMap.getVectorValue(LoopExitInst, 0);
3619   unsigned Op = RecurrenceDescriptor::getRecurrenceBinOp(RK);
3620   setDebugLocFromInst(Builder, ReducedPartRdx);
3621   for (unsigned Part = 1; Part < UF; ++Part) {
3622     Value *RdxPart = VectorLoopValueMap.getVectorValue(LoopExitInst, Part);
3623     if (Op != Instruction::ICmp && Op != Instruction::FCmp)
3624       // Floating point operations had to be 'fast' to enable the reduction.
3625       ReducedPartRdx = addFastMathFlag(
3626           Builder.CreateBinOp((Instruction::BinaryOps)Op, RdxPart,
3627                               ReducedPartRdx, "bin.rdx"));
3628     else
3629       ReducedPartRdx = RecurrenceDescriptor::createMinMaxOp(
3630           Builder, MinMaxKind, ReducedPartRdx, RdxPart);
3631   }
3632 
3633   if (VF > 1) {
3634     bool NoNaN = Legal->hasFunNoNaNAttr();
3635     ReducedPartRdx =
3636         createTargetReduction(Builder, TTI, RdxDesc, ReducedPartRdx, NoNaN);
3637     // If the reduction can be performed in a smaller type, we need to extend
3638     // the reduction to the wider type before we branch to the original loop.
3639     if (Phi->getType() != RdxDesc.getRecurrenceType())
3640       ReducedPartRdx =
3641         RdxDesc.isSigned()
3642         ? Builder.CreateSExt(ReducedPartRdx, Phi->getType())
3643         : Builder.CreateZExt(ReducedPartRdx, Phi->getType());
3644   }
3645 
3646   // Create a phi node that merges control-flow from the backedge-taken check
3647   // block and the middle block.
3648   PHINode *BCBlockPhi = PHINode::Create(Phi->getType(), 2, "bc.merge.rdx",
3649                                         LoopScalarPreHeader->getTerminator());
3650   for (unsigned I = 0, E = LoopBypassBlocks.size(); I != E; ++I)
3651     BCBlockPhi->addIncoming(ReductionStartValue, LoopBypassBlocks[I]);
3652   BCBlockPhi->addIncoming(ReducedPartRdx, LoopMiddleBlock);
3653 
3654   // Now, we need to fix the users of the reduction variable
3655   // inside and outside of the scalar remainder loop.
3656   // We know that the loop is in LCSSA form. We need to update the
3657   // PHI nodes in the exit blocks.
3658   for (PHINode &LCSSAPhi : LoopExitBlock->phis()) {
3659     // All PHINodes need to have a single entry edge, or two if
3660     // we already fixed them.
3661     assert(LCSSAPhi.getNumIncomingValues() < 3 && "Invalid LCSSA PHI");
3662 
3663     // We found a reduction value exit-PHI. Update it with the
3664     // incoming bypass edge.
3665     if (LCSSAPhi.getIncomingValue(0) == LoopExitInst)
3666       LCSSAPhi.addIncoming(ReducedPartRdx, LoopMiddleBlock);
3667   } // end of the LCSSA phi scan.
3668 
3669     // Fix the scalar loop reduction variable with the incoming reduction sum
3670     // from the vector body and from the backedge value.
3671   int IncomingEdgeBlockIdx =
3672     Phi->getBasicBlockIndex(OrigLoop->getLoopLatch());
3673   assert(IncomingEdgeBlockIdx >= 0 && "Invalid block index");
3674   // Pick the other block.
3675   int SelfEdgeBlockIdx = (IncomingEdgeBlockIdx ? 0 : 1);
3676   Phi->setIncomingValue(SelfEdgeBlockIdx, BCBlockPhi);
3677   Phi->setIncomingValue(IncomingEdgeBlockIdx, LoopExitInst);
3678 }
3679 
3680 void InnerLoopVectorizer::fixLCSSAPHIs() {
3681   for (PHINode &LCSSAPhi : LoopExitBlock->phis()) {
3682     if (LCSSAPhi.getNumIncomingValues() == 1) {
3683       assert(OrigLoop->isLoopInvariant(LCSSAPhi.getIncomingValue(0)) &&
3684              "Incoming value isn't loop invariant");
3685       LCSSAPhi.addIncoming(LCSSAPhi.getIncomingValue(0), LoopMiddleBlock);
3686     }
3687   }
3688 }
3689 
3690 void InnerLoopVectorizer::sinkScalarOperands(Instruction *PredInst) {
3691   // The basic block and loop containing the predicated instruction.
3692   auto *PredBB = PredInst->getParent();
3693   auto *VectorLoop = LI->getLoopFor(PredBB);
3694 
3695   // Initialize a worklist with the operands of the predicated instruction.
3696   SetVector<Value *> Worklist(PredInst->op_begin(), PredInst->op_end());
3697 
3698   // Holds instructions that we need to analyze again. An instruction may be
3699   // reanalyzed if we don't yet know if we can sink it or not.
3700   SmallVector<Instruction *, 8> InstsToReanalyze;
3701 
3702   // Returns true if a given use occurs in the predicated block. Phi nodes use
3703   // their operands in their corresponding predecessor blocks.
3704   auto isBlockOfUsePredicated = [&](Use &U) -> bool {
3705     auto *I = cast<Instruction>(U.getUser());
3706     BasicBlock *BB = I->getParent();
3707     if (auto *Phi = dyn_cast<PHINode>(I))
3708       BB = Phi->getIncomingBlock(
3709           PHINode::getIncomingValueNumForOperand(U.getOperandNo()));
3710     return BB == PredBB;
3711   };
3712 
3713   // Iteratively sink the scalarized operands of the predicated instruction
3714   // into the block we created for it. When an instruction is sunk, it's
3715   // operands are then added to the worklist. The algorithm ends after one pass
3716   // through the worklist doesn't sink a single instruction.
3717   bool Changed;
3718   do {
3719     // Add the instructions that need to be reanalyzed to the worklist, and
3720     // reset the changed indicator.
3721     Worklist.insert(InstsToReanalyze.begin(), InstsToReanalyze.end());
3722     InstsToReanalyze.clear();
3723     Changed = false;
3724 
3725     while (!Worklist.empty()) {
3726       auto *I = dyn_cast<Instruction>(Worklist.pop_back_val());
3727 
3728       // We can't sink an instruction if it is a phi node, is already in the
3729       // predicated block, is not in the loop, or may have side effects.
3730       if (!I || isa<PHINode>(I) || I->getParent() == PredBB ||
3731           !VectorLoop->contains(I) || I->mayHaveSideEffects())
3732         continue;
3733 
3734       // It's legal to sink the instruction if all its uses occur in the
3735       // predicated block. Otherwise, there's nothing to do yet, and we may
3736       // need to reanalyze the instruction.
3737       if (!llvm::all_of(I->uses(), isBlockOfUsePredicated)) {
3738         InstsToReanalyze.push_back(I);
3739         continue;
3740       }
3741 
3742       // Move the instruction to the beginning of the predicated block, and add
3743       // it's operands to the worklist.
3744       I->moveBefore(&*PredBB->getFirstInsertionPt());
3745       Worklist.insert(I->op_begin(), I->op_end());
3746 
3747       // The sinking may have enabled other instructions to be sunk, so we will
3748       // need to iterate.
3749       Changed = true;
3750     }
3751   } while (Changed);
3752 }
3753 
3754 void InnerLoopVectorizer::widenPHIInstruction(Instruction *PN, unsigned UF,
3755                                               unsigned VF) {
3756   assert(PN->getParent() == OrigLoop->getHeader() &&
3757          "Non-header phis should have been handled elsewhere");
3758 
3759   PHINode *P = cast<PHINode>(PN);
3760   // In order to support recurrences we need to be able to vectorize Phi nodes.
3761   // Phi nodes have cycles, so we need to vectorize them in two stages. This is
3762   // stage #1: We create a new vector PHI node with no incoming edges. We'll use
3763   // this value when we vectorize all of the instructions that use the PHI.
3764   if (Legal->isReductionVariable(P) || Legal->isFirstOrderRecurrence(P)) {
3765     for (unsigned Part = 0; Part < UF; ++Part) {
3766       // This is phase one of vectorizing PHIs.
3767       Type *VecTy =
3768           (VF == 1) ? PN->getType() : VectorType::get(PN->getType(), VF);
3769       Value *EntryPart = PHINode::Create(
3770           VecTy, 2, "vec.phi", &*LoopVectorBody->getFirstInsertionPt());
3771       VectorLoopValueMap.setVectorValue(P, Part, EntryPart);
3772     }
3773     return;
3774   }
3775 
3776   setDebugLocFromInst(Builder, P);
3777 
3778   // This PHINode must be an induction variable.
3779   // Make sure that we know about it.
3780   assert(Legal->getInductionVars()->count(P) && "Not an induction variable");
3781 
3782   InductionDescriptor II = Legal->getInductionVars()->lookup(P);
3783   const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout();
3784 
3785   // FIXME: The newly created binary instructions should contain nsw/nuw flags,
3786   // which can be found from the original scalar operations.
3787   switch (II.getKind()) {
3788   case InductionDescriptor::IK_NoInduction:
3789     llvm_unreachable("Unknown induction");
3790   case InductionDescriptor::IK_IntInduction:
3791   case InductionDescriptor::IK_FpInduction:
3792     llvm_unreachable("Integer/fp induction is handled elsewhere.");
3793   case InductionDescriptor::IK_PtrInduction: {
3794     // Handle the pointer induction variable case.
3795     assert(P->getType()->isPointerTy() && "Unexpected type.");
3796     // This is the normalized GEP that starts counting at zero.
3797     Value *PtrInd = Induction;
3798     PtrInd = Builder.CreateSExtOrTrunc(PtrInd, II.getStep()->getType());
3799     // Determine the number of scalars we need to generate for each unroll
3800     // iteration. If the instruction is uniform, we only need to generate the
3801     // first lane. Otherwise, we generate all VF values.
3802     unsigned Lanes = Cost->isUniformAfterVectorization(P, VF) ? 1 : VF;
3803     // These are the scalar results. Notice that we don't generate vector GEPs
3804     // because scalar GEPs result in better code.
3805     for (unsigned Part = 0; Part < UF; ++Part) {
3806       for (unsigned Lane = 0; Lane < Lanes; ++Lane) {
3807         Constant *Idx = ConstantInt::get(PtrInd->getType(), Lane + Part * VF);
3808         Value *GlobalIdx = Builder.CreateAdd(PtrInd, Idx);
3809         Value *SclrGep = II.transform(Builder, GlobalIdx, PSE.getSE(), DL);
3810         SclrGep->setName("next.gep");
3811         VectorLoopValueMap.setScalarValue(P, {Part, Lane}, SclrGep);
3812       }
3813     }
3814     return;
3815   }
3816   }
3817 }
3818 
3819 /// A helper function for checking whether an integer division-related
3820 /// instruction may divide by zero (in which case it must be predicated if
3821 /// executed conditionally in the scalar code).
3822 /// TODO: It may be worthwhile to generalize and check isKnownNonZero().
3823 /// Non-zero divisors that are non compile-time constants will not be
3824 /// converted into multiplication, so we will still end up scalarizing
3825 /// the division, but can do so w/o predication.
3826 static bool mayDivideByZero(Instruction &I) {
3827   assert((I.getOpcode() == Instruction::UDiv ||
3828           I.getOpcode() == Instruction::SDiv ||
3829           I.getOpcode() == Instruction::URem ||
3830           I.getOpcode() == Instruction::SRem) &&
3831          "Unexpected instruction");
3832   Value *Divisor = I.getOperand(1);
3833   auto *CInt = dyn_cast<ConstantInt>(Divisor);
3834   return !CInt || CInt->isZero();
3835 }
3836 
3837 void InnerLoopVectorizer::widenInstruction(Instruction &I) {
3838   switch (I.getOpcode()) {
3839   case Instruction::Br:
3840   case Instruction::PHI:
3841     llvm_unreachable("This instruction is handled by a different recipe.");
3842   case Instruction::GetElementPtr: {
3843     // Construct a vector GEP by widening the operands of the scalar GEP as
3844     // necessary. We mark the vector GEP 'inbounds' if appropriate. A GEP
3845     // results in a vector of pointers when at least one operand of the GEP
3846     // is vector-typed. Thus, to keep the representation compact, we only use
3847     // vector-typed operands for loop-varying values.
3848     auto *GEP = cast<GetElementPtrInst>(&I);
3849 
3850     if (VF > 1 && OrigLoop->hasLoopInvariantOperands(GEP)) {
3851       // If we are vectorizing, but the GEP has only loop-invariant operands,
3852       // the GEP we build (by only using vector-typed operands for
3853       // loop-varying values) would be a scalar pointer. Thus, to ensure we
3854       // produce a vector of pointers, we need to either arbitrarily pick an
3855       // operand to broadcast, or broadcast a clone of the original GEP.
3856       // Here, we broadcast a clone of the original.
3857       //
3858       // TODO: If at some point we decide to scalarize instructions having
3859       //       loop-invariant operands, this special case will no longer be
3860       //       required. We would add the scalarization decision to
3861       //       collectLoopScalars() and teach getVectorValue() to broadcast
3862       //       the lane-zero scalar value.
3863       auto *Clone = Builder.Insert(GEP->clone());
3864       for (unsigned Part = 0; Part < UF; ++Part) {
3865         Value *EntryPart = Builder.CreateVectorSplat(VF, Clone);
3866         VectorLoopValueMap.setVectorValue(&I, Part, EntryPart);
3867         addMetadata(EntryPart, GEP);
3868       }
3869     } else {
3870       // If the GEP has at least one loop-varying operand, we are sure to
3871       // produce a vector of pointers. But if we are only unrolling, we want
3872       // to produce a scalar GEP for each unroll part. Thus, the GEP we
3873       // produce with the code below will be scalar (if VF == 1) or vector
3874       // (otherwise). Note that for the unroll-only case, we still maintain
3875       // values in the vector mapping with initVector, as we do for other
3876       // instructions.
3877       for (unsigned Part = 0; Part < UF; ++Part) {
3878         // The pointer operand of the new GEP. If it's loop-invariant, we
3879         // won't broadcast it.
3880         auto *Ptr =
3881             OrigLoop->isLoopInvariant(GEP->getPointerOperand())
3882                 ? GEP->getPointerOperand()
3883                 : getOrCreateVectorValue(GEP->getPointerOperand(), Part);
3884 
3885         // Collect all the indices for the new GEP. If any index is
3886         // loop-invariant, we won't broadcast it.
3887         SmallVector<Value *, 4> Indices;
3888         for (auto &U : make_range(GEP->idx_begin(), GEP->idx_end())) {
3889           if (OrigLoop->isLoopInvariant(U.get()))
3890             Indices.push_back(U.get());
3891           else
3892             Indices.push_back(getOrCreateVectorValue(U.get(), Part));
3893         }
3894 
3895         // Create the new GEP. Note that this GEP may be a scalar if VF == 1,
3896         // but it should be a vector, otherwise.
3897         auto *NewGEP = GEP->isInBounds()
3898                            ? Builder.CreateInBoundsGEP(Ptr, Indices)
3899                            : Builder.CreateGEP(Ptr, Indices);
3900         assert((VF == 1 || NewGEP->getType()->isVectorTy()) &&
3901                "NewGEP is not a pointer vector");
3902         VectorLoopValueMap.setVectorValue(&I, Part, NewGEP);
3903         addMetadata(NewGEP, GEP);
3904       }
3905     }
3906 
3907     break;
3908   }
3909   case Instruction::UDiv:
3910   case Instruction::SDiv:
3911   case Instruction::SRem:
3912   case Instruction::URem:
3913   case Instruction::Add:
3914   case Instruction::FAdd:
3915   case Instruction::Sub:
3916   case Instruction::FSub:
3917   case Instruction::Mul:
3918   case Instruction::FMul:
3919   case Instruction::FDiv:
3920   case Instruction::FRem:
3921   case Instruction::Shl:
3922   case Instruction::LShr:
3923   case Instruction::AShr:
3924   case Instruction::And:
3925   case Instruction::Or:
3926   case Instruction::Xor: {
3927     // Just widen binops.
3928     auto *BinOp = cast<BinaryOperator>(&I);
3929     setDebugLocFromInst(Builder, BinOp);
3930 
3931     for (unsigned Part = 0; Part < UF; ++Part) {
3932       Value *A = getOrCreateVectorValue(BinOp->getOperand(0), Part);
3933       Value *B = getOrCreateVectorValue(BinOp->getOperand(1), Part);
3934       Value *V = Builder.CreateBinOp(BinOp->getOpcode(), A, B);
3935 
3936       if (BinaryOperator *VecOp = dyn_cast<BinaryOperator>(V))
3937         VecOp->copyIRFlags(BinOp);
3938 
3939       // Use this vector value for all users of the original instruction.
3940       VectorLoopValueMap.setVectorValue(&I, Part, V);
3941       addMetadata(V, BinOp);
3942     }
3943 
3944     break;
3945   }
3946   case Instruction::Select: {
3947     // Widen selects.
3948     // If the selector is loop invariant we can create a select
3949     // instruction with a scalar condition. Otherwise, use vector-select.
3950     auto *SE = PSE.getSE();
3951     bool InvariantCond =
3952         SE->isLoopInvariant(PSE.getSCEV(I.getOperand(0)), OrigLoop);
3953     setDebugLocFromInst(Builder, &I);
3954 
3955     // The condition can be loop invariant  but still defined inside the
3956     // loop. This means that we can't just use the original 'cond' value.
3957     // We have to take the 'vectorized' value and pick the first lane.
3958     // Instcombine will make this a no-op.
3959 
3960     auto *ScalarCond = getOrCreateScalarValue(I.getOperand(0), {0, 0});
3961 
3962     for (unsigned Part = 0; Part < UF; ++Part) {
3963       Value *Cond = getOrCreateVectorValue(I.getOperand(0), Part);
3964       Value *Op0 = getOrCreateVectorValue(I.getOperand(1), Part);
3965       Value *Op1 = getOrCreateVectorValue(I.getOperand(2), Part);
3966       Value *Sel =
3967           Builder.CreateSelect(InvariantCond ? ScalarCond : Cond, Op0, Op1);
3968       VectorLoopValueMap.setVectorValue(&I, Part, Sel);
3969       addMetadata(Sel, &I);
3970     }
3971 
3972     break;
3973   }
3974 
3975   case Instruction::ICmp:
3976   case Instruction::FCmp: {
3977     // Widen compares. Generate vector compares.
3978     bool FCmp = (I.getOpcode() == Instruction::FCmp);
3979     auto *Cmp = dyn_cast<CmpInst>(&I);
3980     setDebugLocFromInst(Builder, Cmp);
3981     for (unsigned Part = 0; Part < UF; ++Part) {
3982       Value *A = getOrCreateVectorValue(Cmp->getOperand(0), Part);
3983       Value *B = getOrCreateVectorValue(Cmp->getOperand(1), Part);
3984       Value *C = nullptr;
3985       if (FCmp) {
3986         // Propagate fast math flags.
3987         IRBuilder<>::FastMathFlagGuard FMFG(Builder);
3988         Builder.setFastMathFlags(Cmp->getFastMathFlags());
3989         C = Builder.CreateFCmp(Cmp->getPredicate(), A, B);
3990       } else {
3991         C = Builder.CreateICmp(Cmp->getPredicate(), A, B);
3992       }
3993       VectorLoopValueMap.setVectorValue(&I, Part, C);
3994       addMetadata(C, &I);
3995     }
3996 
3997     break;
3998   }
3999 
4000   case Instruction::ZExt:
4001   case Instruction::SExt:
4002   case Instruction::FPToUI:
4003   case Instruction::FPToSI:
4004   case Instruction::FPExt:
4005   case Instruction::PtrToInt:
4006   case Instruction::IntToPtr:
4007   case Instruction::SIToFP:
4008   case Instruction::UIToFP:
4009   case Instruction::Trunc:
4010   case Instruction::FPTrunc:
4011   case Instruction::BitCast: {
4012     auto *CI = dyn_cast<CastInst>(&I);
4013     setDebugLocFromInst(Builder, CI);
4014 
4015     /// Vectorize casts.
4016     Type *DestTy =
4017         (VF == 1) ? CI->getType() : VectorType::get(CI->getType(), VF);
4018 
4019     for (unsigned Part = 0; Part < UF; ++Part) {
4020       Value *A = getOrCreateVectorValue(CI->getOperand(0), Part);
4021       Value *Cast = Builder.CreateCast(CI->getOpcode(), A, DestTy);
4022       VectorLoopValueMap.setVectorValue(&I, Part, Cast);
4023       addMetadata(Cast, &I);
4024     }
4025     break;
4026   }
4027 
4028   case Instruction::Call: {
4029     // Ignore dbg intrinsics.
4030     if (isa<DbgInfoIntrinsic>(I))
4031       break;
4032     setDebugLocFromInst(Builder, &I);
4033 
4034     Module *M = I.getParent()->getParent()->getParent();
4035     auto *CI = cast<CallInst>(&I);
4036 
4037     StringRef FnName = CI->getCalledFunction()->getName();
4038     Function *F = CI->getCalledFunction();
4039     Type *RetTy = ToVectorTy(CI->getType(), VF);
4040     SmallVector<Type *, 4> Tys;
4041     for (Value *ArgOperand : CI->arg_operands())
4042       Tys.push_back(ToVectorTy(ArgOperand->getType(), VF));
4043 
4044     Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
4045 
4046     // The flag shows whether we use Intrinsic or a usual Call for vectorized
4047     // version of the instruction.
4048     // Is it beneficial to perform intrinsic call compared to lib call?
4049     bool NeedToScalarize;
4050     unsigned CallCost = getVectorCallCost(CI, VF, *TTI, TLI, NeedToScalarize);
4051     bool UseVectorIntrinsic =
4052         ID && getVectorIntrinsicCost(CI, VF, *TTI, TLI) <= CallCost;
4053     assert((UseVectorIntrinsic || !NeedToScalarize) &&
4054            "Instruction should be scalarized elsewhere.");
4055 
4056     for (unsigned Part = 0; Part < UF; ++Part) {
4057       SmallVector<Value *, 4> Args;
4058       for (unsigned i = 0, ie = CI->getNumArgOperands(); i != ie; ++i) {
4059         Value *Arg = CI->getArgOperand(i);
4060         // Some intrinsics have a scalar argument - don't replace it with a
4061         // vector.
4062         if (!UseVectorIntrinsic || !hasVectorInstrinsicScalarOpd(ID, i))
4063           Arg = getOrCreateVectorValue(CI->getArgOperand(i), Part);
4064         Args.push_back(Arg);
4065       }
4066 
4067       Function *VectorF;
4068       if (UseVectorIntrinsic) {
4069         // Use vector version of the intrinsic.
4070         Type *TysForDecl[] = {CI->getType()};
4071         if (VF > 1)
4072           TysForDecl[0] = VectorType::get(CI->getType()->getScalarType(), VF);
4073         VectorF = Intrinsic::getDeclaration(M, ID, TysForDecl);
4074       } else {
4075         // Use vector version of the library call.
4076         StringRef VFnName = TLI->getVectorizedFunction(FnName, VF);
4077         assert(!VFnName.empty() && "Vector function name is empty.");
4078         VectorF = M->getFunction(VFnName);
4079         if (!VectorF) {
4080           // Generate a declaration
4081           FunctionType *FTy = FunctionType::get(RetTy, Tys, false);
4082           VectorF =
4083               Function::Create(FTy, Function::ExternalLinkage, VFnName, M);
4084           VectorF->copyAttributesFrom(F);
4085         }
4086       }
4087       assert(VectorF && "Can't create vector function.");
4088 
4089       SmallVector<OperandBundleDef, 1> OpBundles;
4090       CI->getOperandBundlesAsDefs(OpBundles);
4091       CallInst *V = Builder.CreateCall(VectorF, Args, OpBundles);
4092 
4093       if (isa<FPMathOperator>(V))
4094         V->copyFastMathFlags(CI);
4095 
4096       VectorLoopValueMap.setVectorValue(&I, Part, V);
4097       addMetadata(V, &I);
4098     }
4099 
4100     break;
4101   }
4102 
4103   default:
4104     // This instruction is not vectorized by simple widening.
4105     DEBUG(dbgs() << "LV: Found an unhandled instruction: " << I);
4106     llvm_unreachable("Unhandled instruction!");
4107   } // end of switch.
4108 }
4109 
4110 void InnerLoopVectorizer::updateAnalysis() {
4111   // Forget the original basic block.
4112   PSE.getSE()->forgetLoop(OrigLoop);
4113 
4114   // Update the dominator tree information.
4115   assert(DT->properlyDominates(LoopBypassBlocks.front(), LoopExitBlock) &&
4116          "Entry does not dominate exit.");
4117 
4118   DT->addNewBlock(LoopMiddleBlock,
4119                   LI->getLoopFor(LoopVectorBody)->getLoopLatch());
4120   DT->addNewBlock(LoopScalarPreHeader, LoopBypassBlocks[0]);
4121   DT->changeImmediateDominator(LoopScalarBody, LoopScalarPreHeader);
4122   DT->changeImmediateDominator(LoopExitBlock, LoopBypassBlocks[0]);
4123   assert(DT->verify(DominatorTree::VerificationLevel::Fast));
4124 }
4125 
4126 void LoopVectorizationCostModel::collectLoopScalars(unsigned VF) {
4127   // We should not collect Scalars more than once per VF. Right now, this
4128   // function is called from collectUniformsAndScalars(), which already does
4129   // this check. Collecting Scalars for VF=1 does not make any sense.
4130   assert(VF >= 2 && !Scalars.count(VF) &&
4131          "This function should not be visited twice for the same VF");
4132 
4133   SmallSetVector<Instruction *, 8> Worklist;
4134 
4135   // These sets are used to seed the analysis with pointers used by memory
4136   // accesses that will remain scalar.
4137   SmallSetVector<Instruction *, 8> ScalarPtrs;
4138   SmallPtrSet<Instruction *, 8> PossibleNonScalarPtrs;
4139 
4140   // A helper that returns true if the use of Ptr by MemAccess will be scalar.
4141   // The pointer operands of loads and stores will be scalar as long as the
4142   // memory access is not a gather or scatter operation. The value operand of a
4143   // store will remain scalar if the store is scalarized.
4144   auto isScalarUse = [&](Instruction *MemAccess, Value *Ptr) {
4145     InstWidening WideningDecision = getWideningDecision(MemAccess, VF);
4146     assert(WideningDecision != CM_Unknown &&
4147            "Widening decision should be ready at this moment");
4148     if (auto *Store = dyn_cast<StoreInst>(MemAccess))
4149       if (Ptr == Store->getValueOperand())
4150         return WideningDecision == CM_Scalarize;
4151     assert(Ptr == getLoadStorePointerOperand(MemAccess) &&
4152            "Ptr is neither a value or pointer operand");
4153     return WideningDecision != CM_GatherScatter;
4154   };
4155 
4156   // A helper that returns true if the given value is a bitcast or
4157   // getelementptr instruction contained in the loop.
4158   auto isLoopVaryingBitCastOrGEP = [&](Value *V) {
4159     return ((isa<BitCastInst>(V) && V->getType()->isPointerTy()) ||
4160             isa<GetElementPtrInst>(V)) &&
4161            !TheLoop->isLoopInvariant(V);
4162   };
4163 
4164   // A helper that evaluates a memory access's use of a pointer. If the use
4165   // will be a scalar use, and the pointer is only used by memory accesses, we
4166   // place the pointer in ScalarPtrs. Otherwise, the pointer is placed in
4167   // PossibleNonScalarPtrs.
4168   auto evaluatePtrUse = [&](Instruction *MemAccess, Value *Ptr) {
4169     // We only care about bitcast and getelementptr instructions contained in
4170     // the loop.
4171     if (!isLoopVaryingBitCastOrGEP(Ptr))
4172       return;
4173 
4174     // If the pointer has already been identified as scalar (e.g., if it was
4175     // also identified as uniform), there's nothing to do.
4176     auto *I = cast<Instruction>(Ptr);
4177     if (Worklist.count(I))
4178       return;
4179 
4180     // If the use of the pointer will be a scalar use, and all users of the
4181     // pointer are memory accesses, place the pointer in ScalarPtrs. Otherwise,
4182     // place the pointer in PossibleNonScalarPtrs.
4183     if (isScalarUse(MemAccess, Ptr) && llvm::all_of(I->users(), [&](User *U) {
4184           return isa<LoadInst>(U) || isa<StoreInst>(U);
4185         }))
4186       ScalarPtrs.insert(I);
4187     else
4188       PossibleNonScalarPtrs.insert(I);
4189   };
4190 
4191   // We seed the scalars analysis with three classes of instructions: (1)
4192   // instructions marked uniform-after-vectorization, (2) bitcast and
4193   // getelementptr instructions used by memory accesses requiring a scalar use,
4194   // and (3) pointer induction variables and their update instructions (we
4195   // currently only scalarize these).
4196   //
4197   // (1) Add to the worklist all instructions that have been identified as
4198   // uniform-after-vectorization.
4199   Worklist.insert(Uniforms[VF].begin(), Uniforms[VF].end());
4200 
4201   // (2) Add to the worklist all bitcast and getelementptr instructions used by
4202   // memory accesses requiring a scalar use. The pointer operands of loads and
4203   // stores will be scalar as long as the memory accesses is not a gather or
4204   // scatter operation. The value operand of a store will remain scalar if the
4205   // store is scalarized.
4206   for (auto *BB : TheLoop->blocks())
4207     for (auto &I : *BB) {
4208       if (auto *Load = dyn_cast<LoadInst>(&I)) {
4209         evaluatePtrUse(Load, Load->getPointerOperand());
4210       } else if (auto *Store = dyn_cast<StoreInst>(&I)) {
4211         evaluatePtrUse(Store, Store->getPointerOperand());
4212         evaluatePtrUse(Store, Store->getValueOperand());
4213       }
4214     }
4215   for (auto *I : ScalarPtrs)
4216     if (!PossibleNonScalarPtrs.count(I)) {
4217       DEBUG(dbgs() << "LV: Found scalar instruction: " << *I << "\n");
4218       Worklist.insert(I);
4219     }
4220 
4221   // (3) Add to the worklist all pointer induction variables and their update
4222   // instructions.
4223   //
4224   // TODO: Once we are able to vectorize pointer induction variables we should
4225   //       no longer insert them into the worklist here.
4226   auto *Latch = TheLoop->getLoopLatch();
4227   for (auto &Induction : *Legal->getInductionVars()) {
4228     auto *Ind = Induction.first;
4229     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
4230     if (Induction.second.getKind() != InductionDescriptor::IK_PtrInduction)
4231       continue;
4232     Worklist.insert(Ind);
4233     Worklist.insert(IndUpdate);
4234     DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n");
4235     DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate << "\n");
4236   }
4237 
4238   // Insert the forced scalars.
4239   // FIXME: Currently widenPHIInstruction() often creates a dead vector
4240   // induction variable when the PHI user is scalarized.
4241   if (ForcedScalars.count(VF))
4242     for (auto *I : ForcedScalars.find(VF)->second)
4243       Worklist.insert(I);
4244 
4245   // Expand the worklist by looking through any bitcasts and getelementptr
4246   // instructions we've already identified as scalar. This is similar to the
4247   // expansion step in collectLoopUniforms(); however, here we're only
4248   // expanding to include additional bitcasts and getelementptr instructions.
4249   unsigned Idx = 0;
4250   while (Idx != Worklist.size()) {
4251     Instruction *Dst = Worklist[Idx++];
4252     if (!isLoopVaryingBitCastOrGEP(Dst->getOperand(0)))
4253       continue;
4254     auto *Src = cast<Instruction>(Dst->getOperand(0));
4255     if (llvm::all_of(Src->users(), [&](User *U) -> bool {
4256           auto *J = cast<Instruction>(U);
4257           return !TheLoop->contains(J) || Worklist.count(J) ||
4258                  ((isa<LoadInst>(J) || isa<StoreInst>(J)) &&
4259                   isScalarUse(J, Src));
4260         })) {
4261       Worklist.insert(Src);
4262       DEBUG(dbgs() << "LV: Found scalar instruction: " << *Src << "\n");
4263     }
4264   }
4265 
4266   // An induction variable will remain scalar if all users of the induction
4267   // variable and induction variable update remain scalar.
4268   for (auto &Induction : *Legal->getInductionVars()) {
4269     auto *Ind = Induction.first;
4270     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
4271 
4272     // We already considered pointer induction variables, so there's no reason
4273     // to look at their users again.
4274     //
4275     // TODO: Once we are able to vectorize pointer induction variables we
4276     //       should no longer skip over them here.
4277     if (Induction.second.getKind() == InductionDescriptor::IK_PtrInduction)
4278       continue;
4279 
4280     // Determine if all users of the induction variable are scalar after
4281     // vectorization.
4282     auto ScalarInd = llvm::all_of(Ind->users(), [&](User *U) -> bool {
4283       auto *I = cast<Instruction>(U);
4284       return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I);
4285     });
4286     if (!ScalarInd)
4287       continue;
4288 
4289     // Determine if all users of the induction variable update instruction are
4290     // scalar after vectorization.
4291     auto ScalarIndUpdate =
4292         llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
4293           auto *I = cast<Instruction>(U);
4294           return I == Ind || !TheLoop->contains(I) || Worklist.count(I);
4295         });
4296     if (!ScalarIndUpdate)
4297       continue;
4298 
4299     // The induction variable and its update instruction will remain scalar.
4300     Worklist.insert(Ind);
4301     Worklist.insert(IndUpdate);
4302     DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n");
4303     DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate << "\n");
4304   }
4305 
4306   Scalars[VF].insert(Worklist.begin(), Worklist.end());
4307 }
4308 
4309 bool LoopVectorizationCostModel::isScalarWithPredication(Instruction *I) {
4310   if (!Legal->blockNeedsPredication(I->getParent()))
4311     return false;
4312   switch(I->getOpcode()) {
4313   default:
4314     break;
4315   case Instruction::Load:
4316   case Instruction::Store: {
4317     if (!Legal->isMaskRequired(I))
4318       return false;
4319     auto *Ptr = getLoadStorePointerOperand(I);
4320     auto *Ty = getMemInstValueType(I);
4321     return isa<LoadInst>(I) ?
4322         !(isLegalMaskedLoad(Ty, Ptr)  || isLegalMaskedGather(Ty))
4323       : !(isLegalMaskedStore(Ty, Ptr) || isLegalMaskedScatter(Ty));
4324   }
4325   case Instruction::UDiv:
4326   case Instruction::SDiv:
4327   case Instruction::SRem:
4328   case Instruction::URem:
4329     return mayDivideByZero(*I);
4330   }
4331   return false;
4332 }
4333 
4334 bool LoopVectorizationCostModel::memoryInstructionCanBeWidened(Instruction *I,
4335                                                                unsigned VF) {
4336   // Get and ensure we have a valid memory instruction.
4337   LoadInst *LI = dyn_cast<LoadInst>(I);
4338   StoreInst *SI = dyn_cast<StoreInst>(I);
4339   assert((LI || SI) && "Invalid memory instruction");
4340 
4341   auto *Ptr = getLoadStorePointerOperand(I);
4342 
4343   // In order to be widened, the pointer should be consecutive, first of all.
4344   if (!Legal->isConsecutivePtr(Ptr))
4345     return false;
4346 
4347   // If the instruction is a store located in a predicated block, it will be
4348   // scalarized.
4349   if (isScalarWithPredication(I))
4350     return false;
4351 
4352   // If the instruction's allocated size doesn't equal it's type size, it
4353   // requires padding and will be scalarized.
4354   auto &DL = I->getModule()->getDataLayout();
4355   auto *ScalarTy = LI ? LI->getType() : SI->getValueOperand()->getType();
4356   if (hasIrregularType(ScalarTy, DL, VF))
4357     return false;
4358 
4359   return true;
4360 }
4361 
4362 void LoopVectorizationCostModel::collectLoopUniforms(unsigned VF) {
4363   // We should not collect Uniforms more than once per VF. Right now,
4364   // this function is called from collectUniformsAndScalars(), which
4365   // already does this check. Collecting Uniforms for VF=1 does not make any
4366   // sense.
4367 
4368   assert(VF >= 2 && !Uniforms.count(VF) &&
4369          "This function should not be visited twice for the same VF");
4370 
4371   // Visit the list of Uniforms. If we'll not find any uniform value, we'll
4372   // not analyze again.  Uniforms.count(VF) will return 1.
4373   Uniforms[VF].clear();
4374 
4375   // We now know that the loop is vectorizable!
4376   // Collect instructions inside the loop that will remain uniform after
4377   // vectorization.
4378 
4379   // Global values, params and instructions outside of current loop are out of
4380   // scope.
4381   auto isOutOfScope = [&](Value *V) -> bool {
4382     Instruction *I = dyn_cast<Instruction>(V);
4383     return (!I || !TheLoop->contains(I));
4384   };
4385 
4386   SetVector<Instruction *> Worklist;
4387   BasicBlock *Latch = TheLoop->getLoopLatch();
4388 
4389   // Start with the conditional branch. If the branch condition is an
4390   // instruction contained in the loop that is only used by the branch, it is
4391   // uniform.
4392   auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0));
4393   if (Cmp && TheLoop->contains(Cmp) && Cmp->hasOneUse()) {
4394     Worklist.insert(Cmp);
4395     DEBUG(dbgs() << "LV: Found uniform instruction: " << *Cmp << "\n");
4396   }
4397 
4398   // Holds consecutive and consecutive-like pointers. Consecutive-like pointers
4399   // are pointers that are treated like consecutive pointers during
4400   // vectorization. The pointer operands of interleaved accesses are an
4401   // example.
4402   SmallSetVector<Instruction *, 8> ConsecutiveLikePtrs;
4403 
4404   // Holds pointer operands of instructions that are possibly non-uniform.
4405   SmallPtrSet<Instruction *, 8> PossibleNonUniformPtrs;
4406 
4407   auto isUniformDecision = [&](Instruction *I, unsigned VF) {
4408     InstWidening WideningDecision = getWideningDecision(I, VF);
4409     assert(WideningDecision != CM_Unknown &&
4410            "Widening decision should be ready at this moment");
4411 
4412     return (WideningDecision == CM_Widen ||
4413             WideningDecision == CM_Widen_Reverse ||
4414             WideningDecision == CM_Interleave);
4415   };
4416   // Iterate over the instructions in the loop, and collect all
4417   // consecutive-like pointer operands in ConsecutiveLikePtrs. If it's possible
4418   // that a consecutive-like pointer operand will be scalarized, we collect it
4419   // in PossibleNonUniformPtrs instead. We use two sets here because a single
4420   // getelementptr instruction can be used by both vectorized and scalarized
4421   // memory instructions. For example, if a loop loads and stores from the same
4422   // location, but the store is conditional, the store will be scalarized, and
4423   // the getelementptr won't remain uniform.
4424   for (auto *BB : TheLoop->blocks())
4425     for (auto &I : *BB) {
4426       // If there's no pointer operand, there's nothing to do.
4427       auto *Ptr = dyn_cast_or_null<Instruction>(getLoadStorePointerOperand(&I));
4428       if (!Ptr)
4429         continue;
4430 
4431       // True if all users of Ptr are memory accesses that have Ptr as their
4432       // pointer operand.
4433       auto UsersAreMemAccesses =
4434           llvm::all_of(Ptr->users(), [&](User *U) -> bool {
4435             return getLoadStorePointerOperand(U) == Ptr;
4436           });
4437 
4438       // Ensure the memory instruction will not be scalarized or used by
4439       // gather/scatter, making its pointer operand non-uniform. If the pointer
4440       // operand is used by any instruction other than a memory access, we
4441       // conservatively assume the pointer operand may be non-uniform.
4442       if (!UsersAreMemAccesses || !isUniformDecision(&I, VF))
4443         PossibleNonUniformPtrs.insert(Ptr);
4444 
4445       // If the memory instruction will be vectorized and its pointer operand
4446       // is consecutive-like, or interleaving - the pointer operand should
4447       // remain uniform.
4448       else
4449         ConsecutiveLikePtrs.insert(Ptr);
4450     }
4451 
4452   // Add to the Worklist all consecutive and consecutive-like pointers that
4453   // aren't also identified as possibly non-uniform.
4454   for (auto *V : ConsecutiveLikePtrs)
4455     if (!PossibleNonUniformPtrs.count(V)) {
4456       DEBUG(dbgs() << "LV: Found uniform instruction: " << *V << "\n");
4457       Worklist.insert(V);
4458     }
4459 
4460   // Expand Worklist in topological order: whenever a new instruction
4461   // is added , its users should be either already inside Worklist, or
4462   // out of scope. It ensures a uniform instruction will only be used
4463   // by uniform instructions or out of scope instructions.
4464   unsigned idx = 0;
4465   while (idx != Worklist.size()) {
4466     Instruction *I = Worklist[idx++];
4467 
4468     for (auto OV : I->operand_values()) {
4469       if (isOutOfScope(OV))
4470         continue;
4471       auto *OI = cast<Instruction>(OV);
4472       if (llvm::all_of(OI->users(), [&](User *U) -> bool {
4473             auto *J = cast<Instruction>(U);
4474             return !TheLoop->contains(J) || Worklist.count(J) ||
4475                    (OI == getLoadStorePointerOperand(J) &&
4476                     isUniformDecision(J, VF));
4477           })) {
4478         Worklist.insert(OI);
4479         DEBUG(dbgs() << "LV: Found uniform instruction: " << *OI << "\n");
4480       }
4481     }
4482   }
4483 
4484   // Returns true if Ptr is the pointer operand of a memory access instruction
4485   // I, and I is known to not require scalarization.
4486   auto isVectorizedMemAccessUse = [&](Instruction *I, Value *Ptr) -> bool {
4487     return getLoadStorePointerOperand(I) == Ptr && isUniformDecision(I, VF);
4488   };
4489 
4490   // For an instruction to be added into Worklist above, all its users inside
4491   // the loop should also be in Worklist. However, this condition cannot be
4492   // true for phi nodes that form a cyclic dependence. We must process phi
4493   // nodes separately. An induction variable will remain uniform if all users
4494   // of the induction variable and induction variable update remain uniform.
4495   // The code below handles both pointer and non-pointer induction variables.
4496   for (auto &Induction : *Legal->getInductionVars()) {
4497     auto *Ind = Induction.first;
4498     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
4499 
4500     // Determine if all users of the induction variable are uniform after
4501     // vectorization.
4502     auto UniformInd = llvm::all_of(Ind->users(), [&](User *U) -> bool {
4503       auto *I = cast<Instruction>(U);
4504       return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) ||
4505              isVectorizedMemAccessUse(I, Ind);
4506     });
4507     if (!UniformInd)
4508       continue;
4509 
4510     // Determine if all users of the induction variable update instruction are
4511     // uniform after vectorization.
4512     auto UniformIndUpdate =
4513         llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
4514           auto *I = cast<Instruction>(U);
4515           return I == Ind || !TheLoop->contains(I) || Worklist.count(I) ||
4516                  isVectorizedMemAccessUse(I, IndUpdate);
4517         });
4518     if (!UniformIndUpdate)
4519       continue;
4520 
4521     // The induction variable and its update instruction will remain uniform.
4522     Worklist.insert(Ind);
4523     Worklist.insert(IndUpdate);
4524     DEBUG(dbgs() << "LV: Found uniform instruction: " << *Ind << "\n");
4525     DEBUG(dbgs() << "LV: Found uniform instruction: " << *IndUpdate << "\n");
4526   }
4527 
4528   Uniforms[VF].insert(Worklist.begin(), Worklist.end());
4529 }
4530 
4531 void InterleavedAccessInfo::collectConstStrideAccesses(
4532     MapVector<Instruction *, StrideDescriptor> &AccessStrideInfo,
4533     const ValueToValueMap &Strides) {
4534   auto &DL = TheLoop->getHeader()->getModule()->getDataLayout();
4535 
4536   // Since it's desired that the load/store instructions be maintained in
4537   // "program order" for the interleaved access analysis, we have to visit the
4538   // blocks in the loop in reverse postorder (i.e., in a topological order).
4539   // Such an ordering will ensure that any load/store that may be executed
4540   // before a second load/store will precede the second load/store in
4541   // AccessStrideInfo.
4542   LoopBlocksDFS DFS(TheLoop);
4543   DFS.perform(LI);
4544   for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO()))
4545     for (auto &I : *BB) {
4546       auto *LI = dyn_cast<LoadInst>(&I);
4547       auto *SI = dyn_cast<StoreInst>(&I);
4548       if (!LI && !SI)
4549         continue;
4550 
4551       Value *Ptr = getLoadStorePointerOperand(&I);
4552       // We don't check wrapping here because we don't know yet if Ptr will be
4553       // part of a full group or a group with gaps. Checking wrapping for all
4554       // pointers (even those that end up in groups with no gaps) will be overly
4555       // conservative. For full groups, wrapping should be ok since if we would
4556       // wrap around the address space we would do a memory access at nullptr
4557       // even without the transformation. The wrapping checks are therefore
4558       // deferred until after we've formed the interleaved groups.
4559       int64_t Stride = getPtrStride(PSE, Ptr, TheLoop, Strides,
4560                                     /*Assume=*/true, /*ShouldCheckWrap=*/false);
4561 
4562       const SCEV *Scev = replaceSymbolicStrideSCEV(PSE, Strides, Ptr);
4563       PointerType *PtrTy = dyn_cast<PointerType>(Ptr->getType());
4564       uint64_t Size = DL.getTypeAllocSize(PtrTy->getElementType());
4565 
4566       // An alignment of 0 means target ABI alignment.
4567       unsigned Align = getMemInstAlignment(&I);
4568       if (!Align)
4569         Align = DL.getABITypeAlignment(PtrTy->getElementType());
4570 
4571       AccessStrideInfo[&I] = StrideDescriptor(Stride, Scev, Size, Align);
4572     }
4573 }
4574 
4575 // Analyze interleaved accesses and collect them into interleaved load and
4576 // store groups.
4577 //
4578 // When generating code for an interleaved load group, we effectively hoist all
4579 // loads in the group to the location of the first load in program order. When
4580 // generating code for an interleaved store group, we sink all stores to the
4581 // location of the last store. This code motion can change the order of load
4582 // and store instructions and may break dependences.
4583 //
4584 // The code generation strategy mentioned above ensures that we won't violate
4585 // any write-after-read (WAR) dependences.
4586 //
4587 // E.g., for the WAR dependence:  a = A[i];      // (1)
4588 //                                A[i] = b;      // (2)
4589 //
4590 // The store group of (2) is always inserted at or below (2), and the load
4591 // group of (1) is always inserted at or above (1). Thus, the instructions will
4592 // never be reordered. All other dependences are checked to ensure the
4593 // correctness of the instruction reordering.
4594 //
4595 // The algorithm visits all memory accesses in the loop in bottom-up program
4596 // order. Program order is established by traversing the blocks in the loop in
4597 // reverse postorder when collecting the accesses.
4598 //
4599 // We visit the memory accesses in bottom-up order because it can simplify the
4600 // construction of store groups in the presence of write-after-write (WAW)
4601 // dependences.
4602 //
4603 // E.g., for the WAW dependence:  A[i] = a;      // (1)
4604 //                                A[i] = b;      // (2)
4605 //                                A[i + 1] = c;  // (3)
4606 //
4607 // We will first create a store group with (3) and (2). (1) can't be added to
4608 // this group because it and (2) are dependent. However, (1) can be grouped
4609 // with other accesses that may precede it in program order. Note that a
4610 // bottom-up order does not imply that WAW dependences should not be checked.
4611 void InterleavedAccessInfo::analyzeInterleaving() {
4612   DEBUG(dbgs() << "LV: Analyzing interleaved accesses...\n");
4613   const ValueToValueMap &Strides = LAI->getSymbolicStrides();
4614 
4615   // Holds all accesses with a constant stride.
4616   MapVector<Instruction *, StrideDescriptor> AccessStrideInfo;
4617   collectConstStrideAccesses(AccessStrideInfo, Strides);
4618 
4619   if (AccessStrideInfo.empty())
4620     return;
4621 
4622   // Collect the dependences in the loop.
4623   collectDependences();
4624 
4625   // Holds all interleaved store groups temporarily.
4626   SmallSetVector<InterleaveGroup *, 4> StoreGroups;
4627   // Holds all interleaved load groups temporarily.
4628   SmallSetVector<InterleaveGroup *, 4> LoadGroups;
4629 
4630   // Search in bottom-up program order for pairs of accesses (A and B) that can
4631   // form interleaved load or store groups. In the algorithm below, access A
4632   // precedes access B in program order. We initialize a group for B in the
4633   // outer loop of the algorithm, and then in the inner loop, we attempt to
4634   // insert each A into B's group if:
4635   //
4636   //  1. A and B have the same stride,
4637   //  2. A and B have the same memory object size, and
4638   //  3. A belongs in B's group according to its distance from B.
4639   //
4640   // Special care is taken to ensure group formation will not break any
4641   // dependences.
4642   for (auto BI = AccessStrideInfo.rbegin(), E = AccessStrideInfo.rend();
4643        BI != E; ++BI) {
4644     Instruction *B = BI->first;
4645     StrideDescriptor DesB = BI->second;
4646 
4647     // Initialize a group for B if it has an allowable stride. Even if we don't
4648     // create a group for B, we continue with the bottom-up algorithm to ensure
4649     // we don't break any of B's dependences.
4650     InterleaveGroup *Group = nullptr;
4651     if (isStrided(DesB.Stride)) {
4652       Group = getInterleaveGroup(B);
4653       if (!Group) {
4654         DEBUG(dbgs() << "LV: Creating an interleave group with:" << *B << '\n');
4655         Group = createInterleaveGroup(B, DesB.Stride, DesB.Align);
4656       }
4657       if (B->mayWriteToMemory())
4658         StoreGroups.insert(Group);
4659       else
4660         LoadGroups.insert(Group);
4661     }
4662 
4663     for (auto AI = std::next(BI); AI != E; ++AI) {
4664       Instruction *A = AI->first;
4665       StrideDescriptor DesA = AI->second;
4666 
4667       // Our code motion strategy implies that we can't have dependences
4668       // between accesses in an interleaved group and other accesses located
4669       // between the first and last member of the group. Note that this also
4670       // means that a group can't have more than one member at a given offset.
4671       // The accesses in a group can have dependences with other accesses, but
4672       // we must ensure we don't extend the boundaries of the group such that
4673       // we encompass those dependent accesses.
4674       //
4675       // For example, assume we have the sequence of accesses shown below in a
4676       // stride-2 loop:
4677       //
4678       //  (1, 2) is a group | A[i]   = a;  // (1)
4679       //                    | A[i-1] = b;  // (2) |
4680       //                      A[i-3] = c;  // (3)
4681       //                      A[i]   = d;  // (4) | (2, 4) is not a group
4682       //
4683       // Because accesses (2) and (3) are dependent, we can group (2) with (1)
4684       // but not with (4). If we did, the dependent access (3) would be within
4685       // the boundaries of the (2, 4) group.
4686       if (!canReorderMemAccessesForInterleavedGroups(&*AI, &*BI)) {
4687         // If a dependence exists and A is already in a group, we know that A
4688         // must be a store since A precedes B and WAR dependences are allowed.
4689         // Thus, A would be sunk below B. We release A's group to prevent this
4690         // illegal code motion. A will then be free to form another group with
4691         // instructions that precede it.
4692         if (isInterleaved(A)) {
4693           InterleaveGroup *StoreGroup = getInterleaveGroup(A);
4694           StoreGroups.remove(StoreGroup);
4695           releaseGroup(StoreGroup);
4696         }
4697 
4698         // If a dependence exists and A is not already in a group (or it was
4699         // and we just released it), B might be hoisted above A (if B is a
4700         // load) or another store might be sunk below A (if B is a store). In
4701         // either case, we can't add additional instructions to B's group. B
4702         // will only form a group with instructions that it precedes.
4703         break;
4704       }
4705 
4706       // At this point, we've checked for illegal code motion. If either A or B
4707       // isn't strided, there's nothing left to do.
4708       if (!isStrided(DesA.Stride) || !isStrided(DesB.Stride))
4709         continue;
4710 
4711       // Ignore A if it's already in a group or isn't the same kind of memory
4712       // operation as B.
4713       // Note that mayReadFromMemory() isn't mutually exclusive to mayWriteToMemory
4714       // in the case of atomic loads. We shouldn't see those here, canVectorizeMemory()
4715       // should have returned false - except for the case we asked for optimization
4716       // remarks.
4717       if (isInterleaved(A) || (A->mayReadFromMemory() != B->mayReadFromMemory())
4718           || (A->mayWriteToMemory() != B->mayWriteToMemory()))
4719         continue;
4720 
4721       // Check rules 1 and 2. Ignore A if its stride or size is different from
4722       // that of B.
4723       if (DesA.Stride != DesB.Stride || DesA.Size != DesB.Size)
4724         continue;
4725 
4726       // Ignore A if the memory object of A and B don't belong to the same
4727       // address space
4728       if (getMemInstAddressSpace(A) != getMemInstAddressSpace(B))
4729         continue;
4730 
4731       // Calculate the distance from A to B.
4732       const SCEVConstant *DistToB = dyn_cast<SCEVConstant>(
4733           PSE.getSE()->getMinusSCEV(DesA.Scev, DesB.Scev));
4734       if (!DistToB)
4735         continue;
4736       int64_t DistanceToB = DistToB->getAPInt().getSExtValue();
4737 
4738       // Check rule 3. Ignore A if its distance to B is not a multiple of the
4739       // size.
4740       if (DistanceToB % static_cast<int64_t>(DesB.Size))
4741         continue;
4742 
4743       // Ignore A if either A or B is in a predicated block. Although we
4744       // currently prevent group formation for predicated accesses, we may be
4745       // able to relax this limitation in the future once we handle more
4746       // complicated blocks.
4747       if (isPredicated(A->getParent()) || isPredicated(B->getParent()))
4748         continue;
4749 
4750       // The index of A is the index of B plus A's distance to B in multiples
4751       // of the size.
4752       int IndexA =
4753           Group->getIndex(B) + DistanceToB / static_cast<int64_t>(DesB.Size);
4754 
4755       // Try to insert A into B's group.
4756       if (Group->insertMember(A, IndexA, DesA.Align)) {
4757         DEBUG(dbgs() << "LV: Inserted:" << *A << '\n'
4758                      << "    into the interleave group with" << *B << '\n');
4759         InterleaveGroupMap[A] = Group;
4760 
4761         // Set the first load in program order as the insert position.
4762         if (A->mayReadFromMemory())
4763           Group->setInsertPos(A);
4764       }
4765     } // Iteration over A accesses.
4766   } // Iteration over B accesses.
4767 
4768   // Remove interleaved store groups with gaps.
4769   for (InterleaveGroup *Group : StoreGroups)
4770     if (Group->getNumMembers() != Group->getFactor()) {
4771       DEBUG(dbgs() << "LV: Invalidate candidate interleaved store group due "
4772                       "to gaps.\n");
4773       releaseGroup(Group);
4774     }
4775   // Remove interleaved groups with gaps (currently only loads) whose memory
4776   // accesses may wrap around. We have to revisit the getPtrStride analysis,
4777   // this time with ShouldCheckWrap=true, since collectConstStrideAccesses does
4778   // not check wrapping (see documentation there).
4779   // FORNOW we use Assume=false;
4780   // TODO: Change to Assume=true but making sure we don't exceed the threshold
4781   // of runtime SCEV assumptions checks (thereby potentially failing to
4782   // vectorize altogether).
4783   // Additional optional optimizations:
4784   // TODO: If we are peeling the loop and we know that the first pointer doesn't
4785   // wrap then we can deduce that all pointers in the group don't wrap.
4786   // This means that we can forcefully peel the loop in order to only have to
4787   // check the first pointer for no-wrap. When we'll change to use Assume=true
4788   // we'll only need at most one runtime check per interleaved group.
4789   for (InterleaveGroup *Group : LoadGroups) {
4790     // Case 1: A full group. Can Skip the checks; For full groups, if the wide
4791     // load would wrap around the address space we would do a memory access at
4792     // nullptr even without the transformation.
4793     if (Group->getNumMembers() == Group->getFactor())
4794       continue;
4795 
4796     // Case 2: If first and last members of the group don't wrap this implies
4797     // that all the pointers in the group don't wrap.
4798     // So we check only group member 0 (which is always guaranteed to exist),
4799     // and group member Factor - 1; If the latter doesn't exist we rely on
4800     // peeling (if it is a non-reveresed accsess -- see Case 3).
4801     Value *FirstMemberPtr = getLoadStorePointerOperand(Group->getMember(0));
4802     if (!getPtrStride(PSE, FirstMemberPtr, TheLoop, Strides, /*Assume=*/false,
4803                       /*ShouldCheckWrap=*/true)) {
4804       DEBUG(dbgs() << "LV: Invalidate candidate interleaved group due to "
4805                       "first group member potentially pointer-wrapping.\n");
4806       releaseGroup(Group);
4807       continue;
4808     }
4809     Instruction *LastMember = Group->getMember(Group->getFactor() - 1);
4810     if (LastMember) {
4811       Value *LastMemberPtr = getLoadStorePointerOperand(LastMember);
4812       if (!getPtrStride(PSE, LastMemberPtr, TheLoop, Strides, /*Assume=*/false,
4813                         /*ShouldCheckWrap=*/true)) {
4814         DEBUG(dbgs() << "LV: Invalidate candidate interleaved group due to "
4815                         "last group member potentially pointer-wrapping.\n");
4816         releaseGroup(Group);
4817       }
4818     } else {
4819       // Case 3: A non-reversed interleaved load group with gaps: We need
4820       // to execute at least one scalar epilogue iteration. This will ensure
4821       // we don't speculatively access memory out-of-bounds. We only need
4822       // to look for a member at index factor - 1, since every group must have
4823       // a member at index zero.
4824       if (Group->isReverse()) {
4825         DEBUG(dbgs() << "LV: Invalidate candidate interleaved group due to "
4826                         "a reverse access with gaps.\n");
4827         releaseGroup(Group);
4828         continue;
4829       }
4830       DEBUG(dbgs() << "LV: Interleaved group requires epilogue iteration.\n");
4831       RequiresScalarEpilogue = true;
4832     }
4833   }
4834 }
4835 
4836 Optional<unsigned> LoopVectorizationCostModel::computeMaxVF(bool OptForSize) {
4837   if (Legal->getRuntimePointerChecking()->Need && TTI.hasBranchDivergence()) {
4838     // TODO: It may by useful to do since it's still likely to be dynamically
4839     // uniform if the target can skip.
4840     DEBUG(dbgs() << "LV: Not inserting runtime ptr check for divergent target");
4841 
4842     ORE->emit(
4843       createMissedAnalysis("CantVersionLoopWithDivergentTarget")
4844       << "runtime pointer checks needed. Not enabled for divergent target");
4845 
4846     return None;
4847   }
4848 
4849   unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop);
4850   if (!OptForSize) // Remaining checks deal with scalar loop when OptForSize.
4851     return computeFeasibleMaxVF(OptForSize, TC);
4852 
4853   if (Legal->getRuntimePointerChecking()->Need) {
4854     ORE->emit(createMissedAnalysis("CantVersionLoopWithOptForSize")
4855               << "runtime pointer checks needed. Enable vectorization of this "
4856                  "loop with '#pragma clang loop vectorize(enable)' when "
4857                  "compiling with -Os/-Oz");
4858     DEBUG(dbgs()
4859           << "LV: Aborting. Runtime ptr check is required with -Os/-Oz.\n");
4860     return None;
4861   }
4862 
4863   // If we optimize the program for size, avoid creating the tail loop.
4864   DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n');
4865 
4866   // If we don't know the precise trip count, don't try to vectorize.
4867   if (TC < 2) {
4868     ORE->emit(
4869         createMissedAnalysis("UnknownLoopCountComplexCFG")
4870         << "unable to calculate the loop count due to complex control flow");
4871     DEBUG(dbgs() << "LV: Aborting. A tail loop is required with -Os/-Oz.\n");
4872     return None;
4873   }
4874 
4875   unsigned MaxVF = computeFeasibleMaxVF(OptForSize, TC);
4876 
4877   if (TC % MaxVF != 0) {
4878     // If the trip count that we found modulo the vectorization factor is not
4879     // zero then we require a tail.
4880     // FIXME: look for a smaller MaxVF that does divide TC rather than give up.
4881     // FIXME: return None if loop requiresScalarEpilog(<MaxVF>), or look for a
4882     //        smaller MaxVF that does not require a scalar epilog.
4883 
4884     ORE->emit(createMissedAnalysis("NoTailLoopWithOptForSize")
4885               << "cannot optimize for size and vectorize at the "
4886                  "same time. Enable vectorization of this loop "
4887                  "with '#pragma clang loop vectorize(enable)' "
4888                  "when compiling with -Os/-Oz");
4889     DEBUG(dbgs() << "LV: Aborting. A tail loop is required with -Os/-Oz.\n");
4890     return None;
4891   }
4892 
4893   return MaxVF;
4894 }
4895 
4896 unsigned
4897 LoopVectorizationCostModel::computeFeasibleMaxVF(bool OptForSize,
4898                                                  unsigned ConstTripCount) {
4899   MinBWs = computeMinimumValueSizes(TheLoop->getBlocks(), *DB, &TTI);
4900   unsigned SmallestType, WidestType;
4901   std::tie(SmallestType, WidestType) = getSmallestAndWidestTypes();
4902   unsigned WidestRegister = TTI.getRegisterBitWidth(true);
4903 
4904   // Get the maximum safe dependence distance in bits computed by LAA.
4905   // It is computed by MaxVF * sizeOf(type) * 8, where type is taken from
4906   // the memory accesses that is most restrictive (involved in the smallest
4907   // dependence distance).
4908   unsigned MaxSafeRegisterWidth = Legal->getMaxSafeRegisterWidth();
4909 
4910   WidestRegister = std::min(WidestRegister, MaxSafeRegisterWidth);
4911 
4912   unsigned MaxVectorSize = WidestRegister / WidestType;
4913 
4914   DEBUG(dbgs() << "LV: The Smallest and Widest types: " << SmallestType << " / "
4915                << WidestType << " bits.\n");
4916   DEBUG(dbgs() << "LV: The Widest register safe to use is: " << WidestRegister
4917                << " bits.\n");
4918 
4919   assert(MaxVectorSize <= 64 && "Did not expect to pack so many elements"
4920                                 " into one vector!");
4921   if (MaxVectorSize == 0) {
4922     DEBUG(dbgs() << "LV: The target has no vector registers.\n");
4923     MaxVectorSize = 1;
4924     return MaxVectorSize;
4925   } else if (ConstTripCount && ConstTripCount < MaxVectorSize &&
4926              isPowerOf2_32(ConstTripCount)) {
4927     // We need to clamp the VF to be the ConstTripCount. There is no point in
4928     // choosing a higher viable VF as done in the loop below.
4929     DEBUG(dbgs() << "LV: Clamping the MaxVF to the constant trip count: "
4930                  << ConstTripCount << "\n");
4931     MaxVectorSize = ConstTripCount;
4932     return MaxVectorSize;
4933   }
4934 
4935   unsigned MaxVF = MaxVectorSize;
4936   if (TTI.shouldMaximizeVectorBandwidth(OptForSize) ||
4937       (MaximizeBandwidth && !OptForSize)) {
4938     // Collect all viable vectorization factors larger than the default MaxVF
4939     // (i.e. MaxVectorSize).
4940     SmallVector<unsigned, 8> VFs;
4941     unsigned NewMaxVectorSize = WidestRegister / SmallestType;
4942     for (unsigned VS = MaxVectorSize * 2; VS <= NewMaxVectorSize; VS *= 2)
4943       VFs.push_back(VS);
4944 
4945     // For each VF calculate its register usage.
4946     auto RUs = calculateRegisterUsage(VFs);
4947 
4948     // Select the largest VF which doesn't require more registers than existing
4949     // ones.
4950     unsigned TargetNumRegisters = TTI.getNumberOfRegisters(true);
4951     for (int i = RUs.size() - 1; i >= 0; --i) {
4952       if (RUs[i].MaxLocalUsers <= TargetNumRegisters) {
4953         MaxVF = VFs[i];
4954         break;
4955       }
4956     }
4957     if (unsigned MinVF = TTI.getMinimumVF(SmallestType)) {
4958       if (MaxVF < MinVF) {
4959         DEBUG(dbgs() << "LV: Overriding calculated MaxVF(" << MaxVF
4960                      << ") with target's minimum: " << MinVF << '\n');
4961         MaxVF = MinVF;
4962       }
4963     }
4964   }
4965   return MaxVF;
4966 }
4967 
4968 VectorizationFactor
4969 LoopVectorizationCostModel::selectVectorizationFactor(unsigned MaxVF) {
4970   float Cost = expectedCost(1).first;
4971   const float ScalarCost = Cost;
4972   unsigned Width = 1;
4973   DEBUG(dbgs() << "LV: Scalar loop costs: " << (int)ScalarCost << ".\n");
4974 
4975   bool ForceVectorization = Hints->getForce() == LoopVectorizeHints::FK_Enabled;
4976   // Ignore scalar width, because the user explicitly wants vectorization.
4977   if (ForceVectorization && MaxVF > 1) {
4978     Width = 2;
4979     Cost = expectedCost(Width).first / (float)Width;
4980   }
4981 
4982   for (unsigned i = 2; i <= MaxVF; i *= 2) {
4983     // Notice that the vector loop needs to be executed less times, so
4984     // we need to divide the cost of the vector loops by the width of
4985     // the vector elements.
4986     VectorizationCostTy C = expectedCost(i);
4987     float VectorCost = C.first / (float)i;
4988     DEBUG(dbgs() << "LV: Vector loop of width " << i
4989                  << " costs: " << (int)VectorCost << ".\n");
4990     if (!C.second && !ForceVectorization) {
4991       DEBUG(
4992           dbgs() << "LV: Not considering vector loop of width " << i
4993                  << " because it will not generate any vector instructions.\n");
4994       continue;
4995     }
4996     if (VectorCost < Cost) {
4997       Cost = VectorCost;
4998       Width = i;
4999     }
5000   }
5001 
5002   if (!EnableCondStoresVectorization && NumPredStores) {
5003     ORE->emit(createMissedAnalysis("ConditionalStore")
5004               << "store that is conditionally executed prevents vectorization");
5005     DEBUG(dbgs() << "LV: No vectorization. There are conditional stores.\n");
5006     Width = 1;
5007     Cost = ScalarCost;
5008   }
5009 
5010   DEBUG(if (ForceVectorization && Width > 1 && Cost >= ScalarCost) dbgs()
5011         << "LV: Vectorization seems to be not beneficial, "
5012         << "but was forced by a user.\n");
5013   DEBUG(dbgs() << "LV: Selecting VF: " << Width << ".\n");
5014   VectorizationFactor Factor = {Width, (unsigned)(Width * Cost)};
5015   return Factor;
5016 }
5017 
5018 std::pair<unsigned, unsigned>
5019 LoopVectorizationCostModel::getSmallestAndWidestTypes() {
5020   unsigned MinWidth = -1U;
5021   unsigned MaxWidth = 8;
5022   const DataLayout &DL = TheFunction->getParent()->getDataLayout();
5023 
5024   // For each block.
5025   for (BasicBlock *BB : TheLoop->blocks()) {
5026     // For each instruction in the loop.
5027     for (Instruction &I : *BB) {
5028       Type *T = I.getType();
5029 
5030       // Skip ignored values.
5031       if (ValuesToIgnore.count(&I))
5032         continue;
5033 
5034       // Only examine Loads, Stores and PHINodes.
5035       if (!isa<LoadInst>(I) && !isa<StoreInst>(I) && !isa<PHINode>(I))
5036         continue;
5037 
5038       // Examine PHI nodes that are reduction variables. Update the type to
5039       // account for the recurrence type.
5040       if (auto *PN = dyn_cast<PHINode>(&I)) {
5041         if (!Legal->isReductionVariable(PN))
5042           continue;
5043         RecurrenceDescriptor RdxDesc = (*Legal->getReductionVars())[PN];
5044         T = RdxDesc.getRecurrenceType();
5045       }
5046 
5047       // Examine the stored values.
5048       if (auto *ST = dyn_cast<StoreInst>(&I))
5049         T = ST->getValueOperand()->getType();
5050 
5051       // Ignore loaded pointer types and stored pointer types that are not
5052       // vectorizable.
5053       //
5054       // FIXME: The check here attempts to predict whether a load or store will
5055       //        be vectorized. We only know this for certain after a VF has
5056       //        been selected. Here, we assume that if an access can be
5057       //        vectorized, it will be. We should also look at extending this
5058       //        optimization to non-pointer types.
5059       //
5060       if (T->isPointerTy() && !isConsecutiveLoadOrStore(&I) &&
5061           !isAccessInterleaved(&I) && !isLegalGatherOrScatter(&I))
5062         continue;
5063 
5064       MinWidth = std::min(MinWidth,
5065                           (unsigned)DL.getTypeSizeInBits(T->getScalarType()));
5066       MaxWidth = std::max(MaxWidth,
5067                           (unsigned)DL.getTypeSizeInBits(T->getScalarType()));
5068     }
5069   }
5070 
5071   return {MinWidth, MaxWidth};
5072 }
5073 
5074 unsigned LoopVectorizationCostModel::selectInterleaveCount(bool OptForSize,
5075                                                            unsigned VF,
5076                                                            unsigned LoopCost) {
5077   // -- The interleave heuristics --
5078   // We interleave the loop in order to expose ILP and reduce the loop overhead.
5079   // There are many micro-architectural considerations that we can't predict
5080   // at this level. For example, frontend pressure (on decode or fetch) due to
5081   // code size, or the number and capabilities of the execution ports.
5082   //
5083   // We use the following heuristics to select the interleave count:
5084   // 1. If the code has reductions, then we interleave to break the cross
5085   // iteration dependency.
5086   // 2. If the loop is really small, then we interleave to reduce the loop
5087   // overhead.
5088   // 3. We don't interleave if we think that we will spill registers to memory
5089   // due to the increased register pressure.
5090 
5091   // When we optimize for size, we don't interleave.
5092   if (OptForSize)
5093     return 1;
5094 
5095   // We used the distance for the interleave count.
5096   if (Legal->getMaxSafeDepDistBytes() != -1U)
5097     return 1;
5098 
5099   // Do not interleave loops with a relatively small trip count.
5100   unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop);
5101   if (TC > 1 && TC < TinyTripCountInterleaveThreshold)
5102     return 1;
5103 
5104   unsigned TargetNumRegisters = TTI.getNumberOfRegisters(VF > 1);
5105   DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters
5106                << " registers\n");
5107 
5108   if (VF == 1) {
5109     if (ForceTargetNumScalarRegs.getNumOccurrences() > 0)
5110       TargetNumRegisters = ForceTargetNumScalarRegs;
5111   } else {
5112     if (ForceTargetNumVectorRegs.getNumOccurrences() > 0)
5113       TargetNumRegisters = ForceTargetNumVectorRegs;
5114   }
5115 
5116   RegisterUsage R = calculateRegisterUsage({VF})[0];
5117   // We divide by these constants so assume that we have at least one
5118   // instruction that uses at least one register.
5119   R.MaxLocalUsers = std::max(R.MaxLocalUsers, 1U);
5120 
5121   // We calculate the interleave count using the following formula.
5122   // Subtract the number of loop invariants from the number of available
5123   // registers. These registers are used by all of the interleaved instances.
5124   // Next, divide the remaining registers by the number of registers that is
5125   // required by the loop, in order to estimate how many parallel instances
5126   // fit without causing spills. All of this is rounded down if necessary to be
5127   // a power of two. We want power of two interleave count to simplify any
5128   // addressing operations or alignment considerations.
5129   unsigned IC = PowerOf2Floor((TargetNumRegisters - R.LoopInvariantRegs) /
5130                               R.MaxLocalUsers);
5131 
5132   // Don't count the induction variable as interleaved.
5133   if (EnableIndVarRegisterHeur)
5134     IC = PowerOf2Floor((TargetNumRegisters - R.LoopInvariantRegs - 1) /
5135                        std::max(1U, (R.MaxLocalUsers - 1)));
5136 
5137   // Clamp the interleave ranges to reasonable counts.
5138   unsigned MaxInterleaveCount = TTI.getMaxInterleaveFactor(VF);
5139 
5140   // Check if the user has overridden the max.
5141   if (VF == 1) {
5142     if (ForceTargetMaxScalarInterleaveFactor.getNumOccurrences() > 0)
5143       MaxInterleaveCount = ForceTargetMaxScalarInterleaveFactor;
5144   } else {
5145     if (ForceTargetMaxVectorInterleaveFactor.getNumOccurrences() > 0)
5146       MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor;
5147   }
5148 
5149   // If we did not calculate the cost for VF (because the user selected the VF)
5150   // then we calculate the cost of VF here.
5151   if (LoopCost == 0)
5152     LoopCost = expectedCost(VF).first;
5153 
5154   // Clamp the calculated IC to be between the 1 and the max interleave count
5155   // that the target allows.
5156   if (IC > MaxInterleaveCount)
5157     IC = MaxInterleaveCount;
5158   else if (IC < 1)
5159     IC = 1;
5160 
5161   // Interleave if we vectorized this loop and there is a reduction that could
5162   // benefit from interleaving.
5163   if (VF > 1 && !Legal->getReductionVars()->empty()) {
5164     DEBUG(dbgs() << "LV: Interleaving because of reductions.\n");
5165     return IC;
5166   }
5167 
5168   // Note that if we've already vectorized the loop we will have done the
5169   // runtime check and so interleaving won't require further checks.
5170   bool InterleavingRequiresRuntimePointerCheck =
5171       (VF == 1 && Legal->getRuntimePointerChecking()->Need);
5172 
5173   // We want to interleave small loops in order to reduce the loop overhead and
5174   // potentially expose ILP opportunities.
5175   DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n');
5176   if (!InterleavingRequiresRuntimePointerCheck && LoopCost < SmallLoopCost) {
5177     // We assume that the cost overhead is 1 and we use the cost model
5178     // to estimate the cost of the loop and interleave until the cost of the
5179     // loop overhead is about 5% of the cost of the loop.
5180     unsigned SmallIC =
5181         std::min(IC, (unsigned)PowerOf2Floor(SmallLoopCost / LoopCost));
5182 
5183     // Interleave until store/load ports (estimated by max interleave count) are
5184     // saturated.
5185     unsigned NumStores = Legal->getNumStores();
5186     unsigned NumLoads = Legal->getNumLoads();
5187     unsigned StoresIC = IC / (NumStores ? NumStores : 1);
5188     unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1);
5189 
5190     // If we have a scalar reduction (vector reductions are already dealt with
5191     // by this point), we can increase the critical path length if the loop
5192     // we're interleaving is inside another loop. Limit, by default to 2, so the
5193     // critical path only gets increased by one reduction operation.
5194     if (!Legal->getReductionVars()->empty() && TheLoop->getLoopDepth() > 1) {
5195       unsigned F = static_cast<unsigned>(MaxNestedScalarReductionIC);
5196       SmallIC = std::min(SmallIC, F);
5197       StoresIC = std::min(StoresIC, F);
5198       LoadsIC = std::min(LoadsIC, F);
5199     }
5200 
5201     if (EnableLoadStoreRuntimeInterleave &&
5202         std::max(StoresIC, LoadsIC) > SmallIC) {
5203       DEBUG(dbgs() << "LV: Interleaving to saturate store or load ports.\n");
5204       return std::max(StoresIC, LoadsIC);
5205     }
5206 
5207     DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n");
5208     return SmallIC;
5209   }
5210 
5211   // Interleave if this is a large loop (small loops are already dealt with by
5212   // this point) that could benefit from interleaving.
5213   bool HasReductions = !Legal->getReductionVars()->empty();
5214   if (TTI.enableAggressiveInterleaving(HasReductions)) {
5215     DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n");
5216     return IC;
5217   }
5218 
5219   DEBUG(dbgs() << "LV: Not Interleaving.\n");
5220   return 1;
5221 }
5222 
5223 SmallVector<LoopVectorizationCostModel::RegisterUsage, 8>
5224 LoopVectorizationCostModel::calculateRegisterUsage(ArrayRef<unsigned> VFs) {
5225   // This function calculates the register usage by measuring the highest number
5226   // of values that are alive at a single location. Obviously, this is a very
5227   // rough estimation. We scan the loop in a topological order in order and
5228   // assign a number to each instruction. We use RPO to ensure that defs are
5229   // met before their users. We assume that each instruction that has in-loop
5230   // users starts an interval. We record every time that an in-loop value is
5231   // used, so we have a list of the first and last occurrences of each
5232   // instruction. Next, we transpose this data structure into a multi map that
5233   // holds the list of intervals that *end* at a specific location. This multi
5234   // map allows us to perform a linear search. We scan the instructions linearly
5235   // and record each time that a new interval starts, by placing it in a set.
5236   // If we find this value in the multi-map then we remove it from the set.
5237   // The max register usage is the maximum size of the set.
5238   // We also search for instructions that are defined outside the loop, but are
5239   // used inside the loop. We need this number separately from the max-interval
5240   // usage number because when we unroll, loop-invariant values do not take
5241   // more register.
5242   LoopBlocksDFS DFS(TheLoop);
5243   DFS.perform(LI);
5244 
5245   RegisterUsage RU;
5246 
5247   // Each 'key' in the map opens a new interval. The values
5248   // of the map are the index of the 'last seen' usage of the
5249   // instruction that is the key.
5250   using IntervalMap = DenseMap<Instruction *, unsigned>;
5251 
5252   // Maps instruction to its index.
5253   DenseMap<unsigned, Instruction *> IdxToInstr;
5254   // Marks the end of each interval.
5255   IntervalMap EndPoint;
5256   // Saves the list of instruction indices that are used in the loop.
5257   SmallSet<Instruction *, 8> Ends;
5258   // Saves the list of values that are used in the loop but are
5259   // defined outside the loop, such as arguments and constants.
5260   SmallPtrSet<Value *, 8> LoopInvariants;
5261 
5262   unsigned Index = 0;
5263   for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) {
5264     for (Instruction &I : *BB) {
5265       IdxToInstr[Index++] = &I;
5266 
5267       // Save the end location of each USE.
5268       for (Value *U : I.operands()) {
5269         auto *Instr = dyn_cast<Instruction>(U);
5270 
5271         // Ignore non-instruction values such as arguments, constants, etc.
5272         if (!Instr)
5273           continue;
5274 
5275         // If this instruction is outside the loop then record it and continue.
5276         if (!TheLoop->contains(Instr)) {
5277           LoopInvariants.insert(Instr);
5278           continue;
5279         }
5280 
5281         // Overwrite previous end points.
5282         EndPoint[Instr] = Index;
5283         Ends.insert(Instr);
5284       }
5285     }
5286   }
5287 
5288   // Saves the list of intervals that end with the index in 'key'.
5289   using InstrList = SmallVector<Instruction *, 2>;
5290   DenseMap<unsigned, InstrList> TransposeEnds;
5291 
5292   // Transpose the EndPoints to a list of values that end at each index.
5293   for (auto &Interval : EndPoint)
5294     TransposeEnds[Interval.second].push_back(Interval.first);
5295 
5296   SmallSet<Instruction *, 8> OpenIntervals;
5297 
5298   // Get the size of the widest register.
5299   unsigned MaxSafeDepDist = -1U;
5300   if (Legal->getMaxSafeDepDistBytes() != -1U)
5301     MaxSafeDepDist = Legal->getMaxSafeDepDistBytes() * 8;
5302   unsigned WidestRegister =
5303       std::min(TTI.getRegisterBitWidth(true), MaxSafeDepDist);
5304   const DataLayout &DL = TheFunction->getParent()->getDataLayout();
5305 
5306   SmallVector<RegisterUsage, 8> RUs(VFs.size());
5307   SmallVector<unsigned, 8> MaxUsages(VFs.size(), 0);
5308 
5309   DEBUG(dbgs() << "LV(REG): Calculating max register usage:\n");
5310 
5311   // A lambda that gets the register usage for the given type and VF.
5312   auto GetRegUsage = [&DL, WidestRegister](Type *Ty, unsigned VF) {
5313     if (Ty->isTokenTy())
5314       return 0U;
5315     unsigned TypeSize = DL.getTypeSizeInBits(Ty->getScalarType());
5316     return std::max<unsigned>(1, VF * TypeSize / WidestRegister);
5317   };
5318 
5319   for (unsigned int i = 0; i < Index; ++i) {
5320     Instruction *I = IdxToInstr[i];
5321 
5322     // Remove all of the instructions that end at this location.
5323     InstrList &List = TransposeEnds[i];
5324     for (Instruction *ToRemove : List)
5325       OpenIntervals.erase(ToRemove);
5326 
5327     // Ignore instructions that are never used within the loop.
5328     if (!Ends.count(I))
5329       continue;
5330 
5331     // Skip ignored values.
5332     if (ValuesToIgnore.count(I))
5333       continue;
5334 
5335     // For each VF find the maximum usage of registers.
5336     for (unsigned j = 0, e = VFs.size(); j < e; ++j) {
5337       if (VFs[j] == 1) {
5338         MaxUsages[j] = std::max(MaxUsages[j], OpenIntervals.size());
5339         continue;
5340       }
5341       collectUniformsAndScalars(VFs[j]);
5342       // Count the number of live intervals.
5343       unsigned RegUsage = 0;
5344       for (auto Inst : OpenIntervals) {
5345         // Skip ignored values for VF > 1.
5346         if (VecValuesToIgnore.count(Inst) ||
5347             isScalarAfterVectorization(Inst, VFs[j]))
5348           continue;
5349         RegUsage += GetRegUsage(Inst->getType(), VFs[j]);
5350       }
5351       MaxUsages[j] = std::max(MaxUsages[j], RegUsage);
5352     }
5353 
5354     DEBUG(dbgs() << "LV(REG): At #" << i << " Interval # "
5355                  << OpenIntervals.size() << '\n');
5356 
5357     // Add the current instruction to the list of open intervals.
5358     OpenIntervals.insert(I);
5359   }
5360 
5361   for (unsigned i = 0, e = VFs.size(); i < e; ++i) {
5362     unsigned Invariant = 0;
5363     if (VFs[i] == 1)
5364       Invariant = LoopInvariants.size();
5365     else {
5366       for (auto Inst : LoopInvariants)
5367         Invariant += GetRegUsage(Inst->getType(), VFs[i]);
5368     }
5369 
5370     DEBUG(dbgs() << "LV(REG): VF = " << VFs[i] << '\n');
5371     DEBUG(dbgs() << "LV(REG): Found max usage: " << MaxUsages[i] << '\n');
5372     DEBUG(dbgs() << "LV(REG): Found invariant usage: " << Invariant << '\n');
5373 
5374     RU.LoopInvariantRegs = Invariant;
5375     RU.MaxLocalUsers = MaxUsages[i];
5376     RUs[i] = RU;
5377   }
5378 
5379   return RUs;
5380 }
5381 
5382 bool LoopVectorizationCostModel::useEmulatedMaskMemRefHack(Instruction *I){
5383   // TODO: Cost model for emulated masked load/store is completely
5384   // broken. This hack guides the cost model to use an artificially
5385   // high enough value to practically disable vectorization with such
5386   // operations, except where previously deployed legality hack allowed
5387   // using very low cost values. This is to avoid regressions coming simply
5388   // from moving "masked load/store" check from legality to cost model.
5389   // Masked Load/Gather emulation was previously never allowed.
5390   // Limited number of Masked Store/Scatter emulation was allowed.
5391   assert(isScalarWithPredication(I) &&
5392          "Expecting a scalar emulated instruction");
5393   return isa<LoadInst>(I) ||
5394          (isa<StoreInst>(I) &&
5395           NumPredStores > NumberOfStoresToPredicate);
5396 }
5397 
5398 void LoopVectorizationCostModel::collectInstsToScalarize(unsigned VF) {
5399   // If we aren't vectorizing the loop, or if we've already collected the
5400   // instructions to scalarize, there's nothing to do. Collection may already
5401   // have occurred if we have a user-selected VF and are now computing the
5402   // expected cost for interleaving.
5403   if (VF < 2 || InstsToScalarize.count(VF))
5404     return;
5405 
5406   // Initialize a mapping for VF in InstsToScalalarize. If we find that it's
5407   // not profitable to scalarize any instructions, the presence of VF in the
5408   // map will indicate that we've analyzed it already.
5409   ScalarCostsTy &ScalarCostsVF = InstsToScalarize[VF];
5410 
5411   // Find all the instructions that are scalar with predication in the loop and
5412   // determine if it would be better to not if-convert the blocks they are in.
5413   // If so, we also record the instructions to scalarize.
5414   for (BasicBlock *BB : TheLoop->blocks()) {
5415     if (!Legal->blockNeedsPredication(BB))
5416       continue;
5417     for (Instruction &I : *BB)
5418       if (isScalarWithPredication(&I)) {
5419         ScalarCostsTy ScalarCosts;
5420         // Do not apply discount logic if hacked cost is needed
5421         // for emulated masked memrefs.
5422         if (!useEmulatedMaskMemRefHack(&I) &&
5423             computePredInstDiscount(&I, ScalarCosts, VF) >= 0)
5424           ScalarCostsVF.insert(ScalarCosts.begin(), ScalarCosts.end());
5425         // Remember that BB will remain after vectorization.
5426         PredicatedBBsAfterVectorization.insert(BB);
5427       }
5428   }
5429 }
5430 
5431 int LoopVectorizationCostModel::computePredInstDiscount(
5432     Instruction *PredInst, DenseMap<Instruction *, unsigned> &ScalarCosts,
5433     unsigned VF) {
5434   assert(!isUniformAfterVectorization(PredInst, VF) &&
5435          "Instruction marked uniform-after-vectorization will be predicated");
5436 
5437   // Initialize the discount to zero, meaning that the scalar version and the
5438   // vector version cost the same.
5439   int Discount = 0;
5440 
5441   // Holds instructions to analyze. The instructions we visit are mapped in
5442   // ScalarCosts. Those instructions are the ones that would be scalarized if
5443   // we find that the scalar version costs less.
5444   SmallVector<Instruction *, 8> Worklist;
5445 
5446   // Returns true if the given instruction can be scalarized.
5447   auto canBeScalarized = [&](Instruction *I) -> bool {
5448     // We only attempt to scalarize instructions forming a single-use chain
5449     // from the original predicated block that would otherwise be vectorized.
5450     // Although not strictly necessary, we give up on instructions we know will
5451     // already be scalar to avoid traversing chains that are unlikely to be
5452     // beneficial.
5453     if (!I->hasOneUse() || PredInst->getParent() != I->getParent() ||
5454         isScalarAfterVectorization(I, VF))
5455       return false;
5456 
5457     // If the instruction is scalar with predication, it will be analyzed
5458     // separately. We ignore it within the context of PredInst.
5459     if (isScalarWithPredication(I))
5460       return false;
5461 
5462     // If any of the instruction's operands are uniform after vectorization,
5463     // the instruction cannot be scalarized. This prevents, for example, a
5464     // masked load from being scalarized.
5465     //
5466     // We assume we will only emit a value for lane zero of an instruction
5467     // marked uniform after vectorization, rather than VF identical values.
5468     // Thus, if we scalarize an instruction that uses a uniform, we would
5469     // create uses of values corresponding to the lanes we aren't emitting code
5470     // for. This behavior can be changed by allowing getScalarValue to clone
5471     // the lane zero values for uniforms rather than asserting.
5472     for (Use &U : I->operands())
5473       if (auto *J = dyn_cast<Instruction>(U.get()))
5474         if (isUniformAfterVectorization(J, VF))
5475           return false;
5476 
5477     // Otherwise, we can scalarize the instruction.
5478     return true;
5479   };
5480 
5481   // Returns true if an operand that cannot be scalarized must be extracted
5482   // from a vector. We will account for this scalarization overhead below. Note
5483   // that the non-void predicated instructions are placed in their own blocks,
5484   // and their return values are inserted into vectors. Thus, an extract would
5485   // still be required.
5486   auto needsExtract = [&](Instruction *I) -> bool {
5487     return TheLoop->contains(I) && !isScalarAfterVectorization(I, VF);
5488   };
5489 
5490   // Compute the expected cost discount from scalarizing the entire expression
5491   // feeding the predicated instruction. We currently only consider expressions
5492   // that are single-use instruction chains.
5493   Worklist.push_back(PredInst);
5494   while (!Worklist.empty()) {
5495     Instruction *I = Worklist.pop_back_val();
5496 
5497     // If we've already analyzed the instruction, there's nothing to do.
5498     if (ScalarCosts.count(I))
5499       continue;
5500 
5501     // Compute the cost of the vector instruction. Note that this cost already
5502     // includes the scalarization overhead of the predicated instruction.
5503     unsigned VectorCost = getInstructionCost(I, VF).first;
5504 
5505     // Compute the cost of the scalarized instruction. This cost is the cost of
5506     // the instruction as if it wasn't if-converted and instead remained in the
5507     // predicated block. We will scale this cost by block probability after
5508     // computing the scalarization overhead.
5509     unsigned ScalarCost = VF * getInstructionCost(I, 1).first;
5510 
5511     // Compute the scalarization overhead of needed insertelement instructions
5512     // and phi nodes.
5513     if (isScalarWithPredication(I) && !I->getType()->isVoidTy()) {
5514       ScalarCost += TTI.getScalarizationOverhead(ToVectorTy(I->getType(), VF),
5515                                                  true, false);
5516       ScalarCost += VF * TTI.getCFInstrCost(Instruction::PHI);
5517     }
5518 
5519     // Compute the scalarization overhead of needed extractelement
5520     // instructions. For each of the instruction's operands, if the operand can
5521     // be scalarized, add it to the worklist; otherwise, account for the
5522     // overhead.
5523     for (Use &U : I->operands())
5524       if (auto *J = dyn_cast<Instruction>(U.get())) {
5525         assert(VectorType::isValidElementType(J->getType()) &&
5526                "Instruction has non-scalar type");
5527         if (canBeScalarized(J))
5528           Worklist.push_back(J);
5529         else if (needsExtract(J))
5530           ScalarCost += TTI.getScalarizationOverhead(
5531                               ToVectorTy(J->getType(),VF), false, true);
5532       }
5533 
5534     // Scale the total scalar cost by block probability.
5535     ScalarCost /= getReciprocalPredBlockProb();
5536 
5537     // Compute the discount. A non-negative discount means the vector version
5538     // of the instruction costs more, and scalarizing would be beneficial.
5539     Discount += VectorCost - ScalarCost;
5540     ScalarCosts[I] = ScalarCost;
5541   }
5542 
5543   return Discount;
5544 }
5545 
5546 LoopVectorizationCostModel::VectorizationCostTy
5547 LoopVectorizationCostModel::expectedCost(unsigned VF) {
5548   VectorizationCostTy Cost;
5549 
5550   // For each block.
5551   for (BasicBlock *BB : TheLoop->blocks()) {
5552     VectorizationCostTy BlockCost;
5553 
5554     // For each instruction in the old loop.
5555     for (Instruction &I : BB->instructionsWithoutDebug()) {
5556       // Skip ignored values.
5557       if (ValuesToIgnore.count(&I) ||
5558           (VF > 1 && VecValuesToIgnore.count(&I)))
5559         continue;
5560 
5561       VectorizationCostTy C = getInstructionCost(&I, VF);
5562 
5563       // Check if we should override the cost.
5564       if (ForceTargetInstructionCost.getNumOccurrences() > 0)
5565         C.first = ForceTargetInstructionCost;
5566 
5567       BlockCost.first += C.first;
5568       BlockCost.second |= C.second;
5569       DEBUG(dbgs() << "LV: Found an estimated cost of " << C.first << " for VF "
5570                    << VF << " For instruction: " << I << '\n');
5571     }
5572 
5573     // If we are vectorizing a predicated block, it will have been
5574     // if-converted. This means that the block's instructions (aside from
5575     // stores and instructions that may divide by zero) will now be
5576     // unconditionally executed. For the scalar case, we may not always execute
5577     // the predicated block. Thus, scale the block's cost by the probability of
5578     // executing it.
5579     if (VF == 1 && Legal->blockNeedsPredication(BB))
5580       BlockCost.first /= getReciprocalPredBlockProb();
5581 
5582     Cost.first += BlockCost.first;
5583     Cost.second |= BlockCost.second;
5584   }
5585 
5586   return Cost;
5587 }
5588 
5589 /// \brief Gets Address Access SCEV after verifying that the access pattern
5590 /// is loop invariant except the induction variable dependence.
5591 ///
5592 /// This SCEV can be sent to the Target in order to estimate the address
5593 /// calculation cost.
5594 static const SCEV *getAddressAccessSCEV(
5595               Value *Ptr,
5596               LoopVectorizationLegality *Legal,
5597               PredicatedScalarEvolution &PSE,
5598               const Loop *TheLoop) {
5599 
5600   auto *Gep = dyn_cast<GetElementPtrInst>(Ptr);
5601   if (!Gep)
5602     return nullptr;
5603 
5604   // We are looking for a gep with all loop invariant indices except for one
5605   // which should be an induction variable.
5606   auto SE = PSE.getSE();
5607   unsigned NumOperands = Gep->getNumOperands();
5608   for (unsigned i = 1; i < NumOperands; ++i) {
5609     Value *Opd = Gep->getOperand(i);
5610     if (!SE->isLoopInvariant(SE->getSCEV(Opd), TheLoop) &&
5611         !Legal->isInductionVariable(Opd))
5612       return nullptr;
5613   }
5614 
5615   // Now we know we have a GEP ptr, %inv, %ind, %inv. return the Ptr SCEV.
5616   return PSE.getSCEV(Ptr);
5617 }
5618 
5619 static bool isStrideMul(Instruction *I, LoopVectorizationLegality *Legal) {
5620   return Legal->hasStride(I->getOperand(0)) ||
5621          Legal->hasStride(I->getOperand(1));
5622 }
5623 
5624 unsigned LoopVectorizationCostModel::getMemInstScalarizationCost(Instruction *I,
5625                                                                  unsigned VF) {
5626   Type *ValTy = getMemInstValueType(I);
5627   auto SE = PSE.getSE();
5628 
5629   unsigned Alignment = getMemInstAlignment(I);
5630   unsigned AS = getMemInstAddressSpace(I);
5631   Value *Ptr = getLoadStorePointerOperand(I);
5632   Type *PtrTy = ToVectorTy(Ptr->getType(), VF);
5633 
5634   // Figure out whether the access is strided and get the stride value
5635   // if it's known in compile time
5636   const SCEV *PtrSCEV = getAddressAccessSCEV(Ptr, Legal, PSE, TheLoop);
5637 
5638   // Get the cost of the scalar memory instruction and address computation.
5639   unsigned Cost = VF * TTI.getAddressComputationCost(PtrTy, SE, PtrSCEV);
5640 
5641   Cost += VF *
5642           TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), Alignment,
5643                               AS, I);
5644 
5645   // Get the overhead of the extractelement and insertelement instructions
5646   // we might create due to scalarization.
5647   Cost += getScalarizationOverhead(I, VF, TTI);
5648 
5649   // If we have a predicated store, it may not be executed for each vector
5650   // lane. Scale the cost by the probability of executing the predicated
5651   // block.
5652   if (isScalarWithPredication(I)) {
5653     Cost /= getReciprocalPredBlockProb();
5654 
5655     if (useEmulatedMaskMemRefHack(I))
5656       // Artificially setting to a high enough value to practically disable
5657       // vectorization with such operations.
5658       Cost = 3000000;
5659   }
5660 
5661   return Cost;
5662 }
5663 
5664 unsigned LoopVectorizationCostModel::getConsecutiveMemOpCost(Instruction *I,
5665                                                              unsigned VF) {
5666   Type *ValTy = getMemInstValueType(I);
5667   Type *VectorTy = ToVectorTy(ValTy, VF);
5668   unsigned Alignment = getMemInstAlignment(I);
5669   Value *Ptr = getLoadStorePointerOperand(I);
5670   unsigned AS = getMemInstAddressSpace(I);
5671   int ConsecutiveStride = Legal->isConsecutivePtr(Ptr);
5672 
5673   assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) &&
5674          "Stride should be 1 or -1 for consecutive memory access");
5675   unsigned Cost = 0;
5676   if (Legal->isMaskRequired(I))
5677     Cost += TTI.getMaskedMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS);
5678   else
5679     Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS, I);
5680 
5681   bool Reverse = ConsecutiveStride < 0;
5682   if (Reverse)
5683     Cost += TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, 0);
5684   return Cost;
5685 }
5686 
5687 unsigned LoopVectorizationCostModel::getUniformMemOpCost(Instruction *I,
5688                                                          unsigned VF) {
5689   LoadInst *LI = cast<LoadInst>(I);
5690   Type *ValTy = LI->getType();
5691   Type *VectorTy = ToVectorTy(ValTy, VF);
5692   unsigned Alignment = LI->getAlignment();
5693   unsigned AS = LI->getPointerAddressSpace();
5694 
5695   return TTI.getAddressComputationCost(ValTy) +
5696          TTI.getMemoryOpCost(Instruction::Load, ValTy, Alignment, AS) +
5697          TTI.getShuffleCost(TargetTransformInfo::SK_Broadcast, VectorTy);
5698 }
5699 
5700 unsigned LoopVectorizationCostModel::getGatherScatterCost(Instruction *I,
5701                                                           unsigned VF) {
5702   Type *ValTy = getMemInstValueType(I);
5703   Type *VectorTy = ToVectorTy(ValTy, VF);
5704   unsigned Alignment = getMemInstAlignment(I);
5705   Value *Ptr = getLoadStorePointerOperand(I);
5706 
5707   return TTI.getAddressComputationCost(VectorTy) +
5708          TTI.getGatherScatterOpCost(I->getOpcode(), VectorTy, Ptr,
5709                                     Legal->isMaskRequired(I), Alignment);
5710 }
5711 
5712 unsigned LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I,
5713                                                             unsigned VF) {
5714   Type *ValTy = getMemInstValueType(I);
5715   Type *VectorTy = ToVectorTy(ValTy, VF);
5716   unsigned AS = getMemInstAddressSpace(I);
5717 
5718   auto Group = getInterleavedAccessGroup(I);
5719   assert(Group && "Fail to get an interleaved access group.");
5720 
5721   unsigned InterleaveFactor = Group->getFactor();
5722   Type *WideVecTy = VectorType::get(ValTy, VF * InterleaveFactor);
5723 
5724   // Holds the indices of existing members in an interleaved load group.
5725   // An interleaved store group doesn't need this as it doesn't allow gaps.
5726   SmallVector<unsigned, 4> Indices;
5727   if (isa<LoadInst>(I)) {
5728     for (unsigned i = 0; i < InterleaveFactor; i++)
5729       if (Group->getMember(i))
5730         Indices.push_back(i);
5731   }
5732 
5733   // Calculate the cost of the whole interleaved group.
5734   unsigned Cost = TTI.getInterleavedMemoryOpCost(I->getOpcode(), WideVecTy,
5735                                                  Group->getFactor(), Indices,
5736                                                  Group->getAlignment(), AS);
5737 
5738   if (Group->isReverse())
5739     Cost += Group->getNumMembers() *
5740             TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, 0);
5741   return Cost;
5742 }
5743 
5744 unsigned LoopVectorizationCostModel::getMemoryInstructionCost(Instruction *I,
5745                                                               unsigned VF) {
5746   // Calculate scalar cost only. Vectorization cost should be ready at this
5747   // moment.
5748   if (VF == 1) {
5749     Type *ValTy = getMemInstValueType(I);
5750     unsigned Alignment = getMemInstAlignment(I);
5751     unsigned AS = getMemInstAddressSpace(I);
5752 
5753     return TTI.getAddressComputationCost(ValTy) +
5754            TTI.getMemoryOpCost(I->getOpcode(), ValTy, Alignment, AS, I);
5755   }
5756   return getWideningCost(I, VF);
5757 }
5758 
5759 LoopVectorizationCostModel::VectorizationCostTy
5760 LoopVectorizationCostModel::getInstructionCost(Instruction *I, unsigned VF) {
5761   // If we know that this instruction will remain uniform, check the cost of
5762   // the scalar version.
5763   if (isUniformAfterVectorization(I, VF))
5764     VF = 1;
5765 
5766   if (VF > 1 && isProfitableToScalarize(I, VF))
5767     return VectorizationCostTy(InstsToScalarize[VF][I], false);
5768 
5769   // Forced scalars do not have any scalarization overhead.
5770   if (VF > 1 && ForcedScalars.count(VF) &&
5771       ForcedScalars.find(VF)->second.count(I))
5772     return VectorizationCostTy((getInstructionCost(I, 1).first * VF), false);
5773 
5774   Type *VectorTy;
5775   unsigned C = getInstructionCost(I, VF, VectorTy);
5776 
5777   bool TypeNotScalarized =
5778       VF > 1 && VectorTy->isVectorTy() && TTI.getNumberOfParts(VectorTy) < VF;
5779   return VectorizationCostTy(C, TypeNotScalarized);
5780 }
5781 
5782 void LoopVectorizationCostModel::setCostBasedWideningDecision(unsigned VF) {
5783   if (VF == 1)
5784     return;
5785   NumPredStores = 0;
5786   for (BasicBlock *BB : TheLoop->blocks()) {
5787     // For each instruction in the old loop.
5788     for (Instruction &I : *BB) {
5789       Value *Ptr =  getLoadStorePointerOperand(&I);
5790       if (!Ptr)
5791         continue;
5792 
5793       if (isa<StoreInst>(&I) && isScalarWithPredication(&I))
5794         NumPredStores++;
5795       if (isa<LoadInst>(&I) && Legal->isUniform(Ptr)) {
5796         // Scalar load + broadcast
5797         unsigned Cost = getUniformMemOpCost(&I, VF);
5798         setWideningDecision(&I, VF, CM_Scalarize, Cost);
5799         continue;
5800       }
5801 
5802       // We assume that widening is the best solution when possible.
5803       if (memoryInstructionCanBeWidened(&I, VF)) {
5804         unsigned Cost = getConsecutiveMemOpCost(&I, VF);
5805         int ConsecutiveStride =
5806                Legal->isConsecutivePtr(getLoadStorePointerOperand(&I));
5807         assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) &&
5808                "Expected consecutive stride.");
5809         InstWidening Decision =
5810             ConsecutiveStride == 1 ? CM_Widen : CM_Widen_Reverse;
5811         setWideningDecision(&I, VF, Decision, Cost);
5812         continue;
5813       }
5814 
5815       // Choose between Interleaving, Gather/Scatter or Scalarization.
5816       unsigned InterleaveCost = std::numeric_limits<unsigned>::max();
5817       unsigned NumAccesses = 1;
5818       if (isAccessInterleaved(&I)) {
5819         auto Group = getInterleavedAccessGroup(&I);
5820         assert(Group && "Fail to get an interleaved access group.");
5821 
5822         // Make one decision for the whole group.
5823         if (getWideningDecision(&I, VF) != CM_Unknown)
5824           continue;
5825 
5826         NumAccesses = Group->getNumMembers();
5827         InterleaveCost = getInterleaveGroupCost(&I, VF);
5828       }
5829 
5830       unsigned GatherScatterCost =
5831           isLegalGatherOrScatter(&I)
5832               ? getGatherScatterCost(&I, VF) * NumAccesses
5833               : std::numeric_limits<unsigned>::max();
5834 
5835       unsigned ScalarizationCost =
5836           getMemInstScalarizationCost(&I, VF) * NumAccesses;
5837 
5838       // Choose better solution for the current VF,
5839       // write down this decision and use it during vectorization.
5840       unsigned Cost;
5841       InstWidening Decision;
5842       if (InterleaveCost <= GatherScatterCost &&
5843           InterleaveCost < ScalarizationCost) {
5844         Decision = CM_Interleave;
5845         Cost = InterleaveCost;
5846       } else if (GatherScatterCost < ScalarizationCost) {
5847         Decision = CM_GatherScatter;
5848         Cost = GatherScatterCost;
5849       } else {
5850         Decision = CM_Scalarize;
5851         Cost = ScalarizationCost;
5852       }
5853       // If the instructions belongs to an interleave group, the whole group
5854       // receives the same decision. The whole group receives the cost, but
5855       // the cost will actually be assigned to one instruction.
5856       if (auto Group = getInterleavedAccessGroup(&I))
5857         setWideningDecision(Group, VF, Decision, Cost);
5858       else
5859         setWideningDecision(&I, VF, Decision, Cost);
5860     }
5861   }
5862 
5863   // Make sure that any load of address and any other address computation
5864   // remains scalar unless there is gather/scatter support. This avoids
5865   // inevitable extracts into address registers, and also has the benefit of
5866   // activating LSR more, since that pass can't optimize vectorized
5867   // addresses.
5868   if (TTI.prefersVectorizedAddressing())
5869     return;
5870 
5871   // Start with all scalar pointer uses.
5872   SmallPtrSet<Instruction *, 8> AddrDefs;
5873   for (BasicBlock *BB : TheLoop->blocks())
5874     for (Instruction &I : *BB) {
5875       Instruction *PtrDef =
5876         dyn_cast_or_null<Instruction>(getLoadStorePointerOperand(&I));
5877       if (PtrDef && TheLoop->contains(PtrDef) &&
5878           getWideningDecision(&I, VF) != CM_GatherScatter)
5879         AddrDefs.insert(PtrDef);
5880     }
5881 
5882   // Add all instructions used to generate the addresses.
5883   SmallVector<Instruction *, 4> Worklist;
5884   for (auto *I : AddrDefs)
5885     Worklist.push_back(I);
5886   while (!Worklist.empty()) {
5887     Instruction *I = Worklist.pop_back_val();
5888     for (auto &Op : I->operands())
5889       if (auto *InstOp = dyn_cast<Instruction>(Op))
5890         if ((InstOp->getParent() == I->getParent()) && !isa<PHINode>(InstOp) &&
5891             AddrDefs.insert(InstOp).second)
5892           Worklist.push_back(InstOp);
5893   }
5894 
5895   for (auto *I : AddrDefs) {
5896     if (isa<LoadInst>(I)) {
5897       // Setting the desired widening decision should ideally be handled in
5898       // by cost functions, but since this involves the task of finding out
5899       // if the loaded register is involved in an address computation, it is
5900       // instead changed here when we know this is the case.
5901       InstWidening Decision = getWideningDecision(I, VF);
5902       if (Decision == CM_Widen || Decision == CM_Widen_Reverse)
5903         // Scalarize a widened load of address.
5904         setWideningDecision(I, VF, CM_Scalarize,
5905                             (VF * getMemoryInstructionCost(I, 1)));
5906       else if (auto Group = getInterleavedAccessGroup(I)) {
5907         // Scalarize an interleave group of address loads.
5908         for (unsigned I = 0; I < Group->getFactor(); ++I) {
5909           if (Instruction *Member = Group->getMember(I))
5910             setWideningDecision(Member, VF, CM_Scalarize,
5911                                 (VF * getMemoryInstructionCost(Member, 1)));
5912         }
5913       }
5914     } else
5915       // Make sure I gets scalarized and a cost estimate without
5916       // scalarization overhead.
5917       ForcedScalars[VF].insert(I);
5918   }
5919 }
5920 
5921 unsigned LoopVectorizationCostModel::getInstructionCost(Instruction *I,
5922                                                         unsigned VF,
5923                                                         Type *&VectorTy) {
5924   Type *RetTy = I->getType();
5925   if (canTruncateToMinimalBitwidth(I, VF))
5926     RetTy = IntegerType::get(RetTy->getContext(), MinBWs[I]);
5927   VectorTy = isScalarAfterVectorization(I, VF) ? RetTy : ToVectorTy(RetTy, VF);
5928   auto SE = PSE.getSE();
5929 
5930   // TODO: We need to estimate the cost of intrinsic calls.
5931   switch (I->getOpcode()) {
5932   case Instruction::GetElementPtr:
5933     // We mark this instruction as zero-cost because the cost of GEPs in
5934     // vectorized code depends on whether the corresponding memory instruction
5935     // is scalarized or not. Therefore, we handle GEPs with the memory
5936     // instruction cost.
5937     return 0;
5938   case Instruction::Br: {
5939     // In cases of scalarized and predicated instructions, there will be VF
5940     // predicated blocks in the vectorized loop. Each branch around these
5941     // blocks requires also an extract of its vector compare i1 element.
5942     bool ScalarPredicatedBB = false;
5943     BranchInst *BI = cast<BranchInst>(I);
5944     if (VF > 1 && BI->isConditional() &&
5945         (PredicatedBBsAfterVectorization.count(BI->getSuccessor(0)) ||
5946          PredicatedBBsAfterVectorization.count(BI->getSuccessor(1))))
5947       ScalarPredicatedBB = true;
5948 
5949     if (ScalarPredicatedBB) {
5950       // Return cost for branches around scalarized and predicated blocks.
5951       Type *Vec_i1Ty =
5952           VectorType::get(IntegerType::getInt1Ty(RetTy->getContext()), VF);
5953       return (TTI.getScalarizationOverhead(Vec_i1Ty, false, true) +
5954               (TTI.getCFInstrCost(Instruction::Br) * VF));
5955     } else if (I->getParent() == TheLoop->getLoopLatch() || VF == 1)
5956       // The back-edge branch will remain, as will all scalar branches.
5957       return TTI.getCFInstrCost(Instruction::Br);
5958     else
5959       // This branch will be eliminated by if-conversion.
5960       return 0;
5961     // Note: We currently assume zero cost for an unconditional branch inside
5962     // a predicated block since it will become a fall-through, although we
5963     // may decide in the future to call TTI for all branches.
5964   }
5965   case Instruction::PHI: {
5966     auto *Phi = cast<PHINode>(I);
5967 
5968     // First-order recurrences are replaced by vector shuffles inside the loop.
5969     if (VF > 1 && Legal->isFirstOrderRecurrence(Phi))
5970       return TTI.getShuffleCost(TargetTransformInfo::SK_ExtractSubvector,
5971                                 VectorTy, VF - 1, VectorTy);
5972 
5973     // Phi nodes in non-header blocks (not inductions, reductions, etc.) are
5974     // converted into select instructions. We require N - 1 selects per phi
5975     // node, where N is the number of incoming values.
5976     if (VF > 1 && Phi->getParent() != TheLoop->getHeader())
5977       return (Phi->getNumIncomingValues() - 1) *
5978              TTI.getCmpSelInstrCost(
5979                  Instruction::Select, ToVectorTy(Phi->getType(), VF),
5980                  ToVectorTy(Type::getInt1Ty(Phi->getContext()), VF));
5981 
5982     return TTI.getCFInstrCost(Instruction::PHI);
5983   }
5984   case Instruction::UDiv:
5985   case Instruction::SDiv:
5986   case Instruction::URem:
5987   case Instruction::SRem:
5988     // If we have a predicated instruction, it may not be executed for each
5989     // vector lane. Get the scalarization cost and scale this amount by the
5990     // probability of executing the predicated block. If the instruction is not
5991     // predicated, we fall through to the next case.
5992     if (VF > 1 && isScalarWithPredication(I)) {
5993       unsigned Cost = 0;
5994 
5995       // These instructions have a non-void type, so account for the phi nodes
5996       // that we will create. This cost is likely to be zero. The phi node
5997       // cost, if any, should be scaled by the block probability because it
5998       // models a copy at the end of each predicated block.
5999       Cost += VF * TTI.getCFInstrCost(Instruction::PHI);
6000 
6001       // The cost of the non-predicated instruction.
6002       Cost += VF * TTI.getArithmeticInstrCost(I->getOpcode(), RetTy);
6003 
6004       // The cost of insertelement and extractelement instructions needed for
6005       // scalarization.
6006       Cost += getScalarizationOverhead(I, VF, TTI);
6007 
6008       // Scale the cost by the probability of executing the predicated blocks.
6009       // This assumes the predicated block for each vector lane is equally
6010       // likely.
6011       return Cost / getReciprocalPredBlockProb();
6012     }
6013     LLVM_FALLTHROUGH;
6014   case Instruction::Add:
6015   case Instruction::FAdd:
6016   case Instruction::Sub:
6017   case Instruction::FSub:
6018   case Instruction::Mul:
6019   case Instruction::FMul:
6020   case Instruction::FDiv:
6021   case Instruction::FRem:
6022   case Instruction::Shl:
6023   case Instruction::LShr:
6024   case Instruction::AShr:
6025   case Instruction::And:
6026   case Instruction::Or:
6027   case Instruction::Xor: {
6028     // Since we will replace the stride by 1 the multiplication should go away.
6029     if (I->getOpcode() == Instruction::Mul && isStrideMul(I, Legal))
6030       return 0;
6031     // Certain instructions can be cheaper to vectorize if they have a constant
6032     // second vector operand. One example of this are shifts on x86.
6033     TargetTransformInfo::OperandValueKind Op1VK =
6034         TargetTransformInfo::OK_AnyValue;
6035     TargetTransformInfo::OperandValueKind Op2VK =
6036         TargetTransformInfo::OK_AnyValue;
6037     TargetTransformInfo::OperandValueProperties Op1VP =
6038         TargetTransformInfo::OP_None;
6039     TargetTransformInfo::OperandValueProperties Op2VP =
6040         TargetTransformInfo::OP_None;
6041     Value *Op2 = I->getOperand(1);
6042 
6043     // Check for a splat or for a non uniform vector of constants.
6044     if (isa<ConstantInt>(Op2)) {
6045       ConstantInt *CInt = cast<ConstantInt>(Op2);
6046       if (CInt && CInt->getValue().isPowerOf2())
6047         Op2VP = TargetTransformInfo::OP_PowerOf2;
6048       Op2VK = TargetTransformInfo::OK_UniformConstantValue;
6049     } else if (isa<ConstantVector>(Op2) || isa<ConstantDataVector>(Op2)) {
6050       Op2VK = TargetTransformInfo::OK_NonUniformConstantValue;
6051       Constant *SplatValue = cast<Constant>(Op2)->getSplatValue();
6052       if (SplatValue) {
6053         ConstantInt *CInt = dyn_cast<ConstantInt>(SplatValue);
6054         if (CInt && CInt->getValue().isPowerOf2())
6055           Op2VP = TargetTransformInfo::OP_PowerOf2;
6056         Op2VK = TargetTransformInfo::OK_UniformConstantValue;
6057       }
6058     } else if (Legal->isUniform(Op2)) {
6059       Op2VK = TargetTransformInfo::OK_UniformValue;
6060     }
6061     SmallVector<const Value *, 4> Operands(I->operand_values());
6062     unsigned N = isScalarAfterVectorization(I, VF) ? VF : 1;
6063     return N * TTI.getArithmeticInstrCost(I->getOpcode(), VectorTy, Op1VK,
6064                                           Op2VK, Op1VP, Op2VP, Operands);
6065   }
6066   case Instruction::Select: {
6067     SelectInst *SI = cast<SelectInst>(I);
6068     const SCEV *CondSCEV = SE->getSCEV(SI->getCondition());
6069     bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop));
6070     Type *CondTy = SI->getCondition()->getType();
6071     if (!ScalarCond)
6072       CondTy = VectorType::get(CondTy, VF);
6073 
6074     return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, CondTy, I);
6075   }
6076   case Instruction::ICmp:
6077   case Instruction::FCmp: {
6078     Type *ValTy = I->getOperand(0)->getType();
6079     Instruction *Op0AsInstruction = dyn_cast<Instruction>(I->getOperand(0));
6080     if (canTruncateToMinimalBitwidth(Op0AsInstruction, VF))
6081       ValTy = IntegerType::get(ValTy->getContext(), MinBWs[Op0AsInstruction]);
6082     VectorTy = ToVectorTy(ValTy, VF);
6083     return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, nullptr, I);
6084   }
6085   case Instruction::Store:
6086   case Instruction::Load: {
6087     unsigned Width = VF;
6088     if (Width > 1) {
6089       InstWidening Decision = getWideningDecision(I, Width);
6090       assert(Decision != CM_Unknown &&
6091              "CM decision should be taken at this point");
6092       if (Decision == CM_Scalarize)
6093         Width = 1;
6094     }
6095     VectorTy = ToVectorTy(getMemInstValueType(I), Width);
6096     return getMemoryInstructionCost(I, VF);
6097   }
6098   case Instruction::ZExt:
6099   case Instruction::SExt:
6100   case Instruction::FPToUI:
6101   case Instruction::FPToSI:
6102   case Instruction::FPExt:
6103   case Instruction::PtrToInt:
6104   case Instruction::IntToPtr:
6105   case Instruction::SIToFP:
6106   case Instruction::UIToFP:
6107   case Instruction::Trunc:
6108   case Instruction::FPTrunc:
6109   case Instruction::BitCast: {
6110     // We optimize the truncation of induction variables having constant
6111     // integer steps. The cost of these truncations is the same as the scalar
6112     // operation.
6113     if (isOptimizableIVTruncate(I, VF)) {
6114       auto *Trunc = cast<TruncInst>(I);
6115       return TTI.getCastInstrCost(Instruction::Trunc, Trunc->getDestTy(),
6116                                   Trunc->getSrcTy(), Trunc);
6117     }
6118 
6119     Type *SrcScalarTy = I->getOperand(0)->getType();
6120     Type *SrcVecTy =
6121         VectorTy->isVectorTy() ? ToVectorTy(SrcScalarTy, VF) : SrcScalarTy;
6122     if (canTruncateToMinimalBitwidth(I, VF)) {
6123       // This cast is going to be shrunk. This may remove the cast or it might
6124       // turn it into slightly different cast. For example, if MinBW == 16,
6125       // "zext i8 %1 to i32" becomes "zext i8 %1 to i16".
6126       //
6127       // Calculate the modified src and dest types.
6128       Type *MinVecTy = VectorTy;
6129       if (I->getOpcode() == Instruction::Trunc) {
6130         SrcVecTy = smallestIntegerVectorType(SrcVecTy, MinVecTy);
6131         VectorTy =
6132             largestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy);
6133       } else if (I->getOpcode() == Instruction::ZExt ||
6134                  I->getOpcode() == Instruction::SExt) {
6135         SrcVecTy = largestIntegerVectorType(SrcVecTy, MinVecTy);
6136         VectorTy =
6137             smallestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy);
6138       }
6139     }
6140 
6141     unsigned N = isScalarAfterVectorization(I, VF) ? VF : 1;
6142     return N * TTI.getCastInstrCost(I->getOpcode(), VectorTy, SrcVecTy, I);
6143   }
6144   case Instruction::Call: {
6145     bool NeedToScalarize;
6146     CallInst *CI = cast<CallInst>(I);
6147     unsigned CallCost = getVectorCallCost(CI, VF, TTI, TLI, NeedToScalarize);
6148     if (getVectorIntrinsicIDForCall(CI, TLI))
6149       return std::min(CallCost, getVectorIntrinsicCost(CI, VF, TTI, TLI));
6150     return CallCost;
6151   }
6152   default:
6153     // The cost of executing VF copies of the scalar instruction. This opcode
6154     // is unknown. Assume that it is the same as 'mul'.
6155     return VF * TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy) +
6156            getScalarizationOverhead(I, VF, TTI);
6157   } // end of switch.
6158 }
6159 
6160 char LoopVectorize::ID = 0;
6161 
6162 static const char lv_name[] = "Loop Vectorization";
6163 
6164 INITIALIZE_PASS_BEGIN(LoopVectorize, LV_NAME, lv_name, false, false)
6165 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
6166 INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass)
6167 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
6168 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
6169 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
6170 INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass)
6171 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
6172 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
6173 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
6174 INITIALIZE_PASS_DEPENDENCY(LoopAccessLegacyAnalysis)
6175 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass)
6176 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass)
6177 INITIALIZE_PASS_END(LoopVectorize, LV_NAME, lv_name, false, false)
6178 
6179 namespace llvm {
6180 
6181 Pass *createLoopVectorizePass(bool NoUnrolling, bool AlwaysVectorize) {
6182   return new LoopVectorize(NoUnrolling, AlwaysVectorize);
6183 }
6184 
6185 } // end namespace llvm
6186 
6187 bool LoopVectorizationCostModel::isConsecutiveLoadOrStore(Instruction *Inst) {
6188   // Check if the pointer operand of a load or store instruction is
6189   // consecutive.
6190   if (auto *Ptr = getLoadStorePointerOperand(Inst))
6191     return Legal->isConsecutivePtr(Ptr);
6192   return false;
6193 }
6194 
6195 void LoopVectorizationCostModel::collectValuesToIgnore() {
6196   // Ignore ephemeral values.
6197   CodeMetrics::collectEphemeralValues(TheLoop, AC, ValuesToIgnore);
6198 
6199   // Ignore type-promoting instructions we identified during reduction
6200   // detection.
6201   for (auto &Reduction : *Legal->getReductionVars()) {
6202     RecurrenceDescriptor &RedDes = Reduction.second;
6203     SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts();
6204     VecValuesToIgnore.insert(Casts.begin(), Casts.end());
6205   }
6206   // Ignore type-casting instructions we identified during induction
6207   // detection.
6208   for (auto &Induction : *Legal->getInductionVars()) {
6209     InductionDescriptor &IndDes = Induction.second;
6210     const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts();
6211     VecValuesToIgnore.insert(Casts.begin(), Casts.end());
6212   }
6213 }
6214 
6215 VectorizationFactor
6216 LoopVectorizationPlanner::planInVPlanNativePath(bool OptForSize,
6217                                                 unsigned UserVF) {
6218   // Width 1 means no vectorize, cost 0 means uncomputed cost.
6219   const VectorizationFactor NoVectorization = {1U, 0U};
6220 
6221   // Outer loop handling: They may require CFG and instruction level
6222   // transformations before even evaluating whether vectorization is profitable.
6223   // Since we cannot modify the incoming IR, we need to build VPlan upfront in
6224   // the vectorization pipeline.
6225   if (!OrigLoop->empty()) {
6226     assert(EnableVPlanNativePath && "VPlan-native path is not enabled.");
6227     assert(UserVF && "Expected UserVF for outer loop vectorization.");
6228     assert(isPowerOf2_32(UserVF) && "VF needs to be a power of two");
6229     DEBUG(dbgs() << "LV: Using user VF " << UserVF << ".\n");
6230     buildVPlans(UserVF, UserVF);
6231 
6232     return {UserVF, 0};
6233   }
6234 
6235   DEBUG(dbgs() << "LV: Not vectorizing. Inner loops aren't supported in the "
6236                   "VPlan-native path.\n");
6237   return NoVectorization;
6238 }
6239 
6240 VectorizationFactor
6241 LoopVectorizationPlanner::plan(bool OptForSize, unsigned UserVF) {
6242   assert(OrigLoop->empty() && "Inner loop expected.");
6243   // Width 1 means no vectorize, cost 0 means uncomputed cost.
6244   const VectorizationFactor NoVectorization = {1U, 0U};
6245   Optional<unsigned> MaybeMaxVF = CM.computeMaxVF(OptForSize);
6246   if (!MaybeMaxVF.hasValue()) // Cases considered too costly to vectorize.
6247     return NoVectorization;
6248 
6249   if (UserVF) {
6250     DEBUG(dbgs() << "LV: Using user VF " << UserVF << ".\n");
6251     assert(isPowerOf2_32(UserVF) && "VF needs to be a power of two");
6252     // Collect the instructions (and their associated costs) that will be more
6253     // profitable to scalarize.
6254     CM.selectUserVectorizationFactor(UserVF);
6255     buildVPlans(UserVF, UserVF);
6256     DEBUG(printPlans(dbgs()));
6257     return {UserVF, 0};
6258   }
6259 
6260   unsigned MaxVF = MaybeMaxVF.getValue();
6261   assert(MaxVF != 0 && "MaxVF is zero.");
6262 
6263   for (unsigned VF = 1; VF <= MaxVF; VF *= 2) {
6264     // Collect Uniform and Scalar instructions after vectorization with VF.
6265     CM.collectUniformsAndScalars(VF);
6266 
6267     // Collect the instructions (and their associated costs) that will be more
6268     // profitable to scalarize.
6269     if (VF > 1)
6270       CM.collectInstsToScalarize(VF);
6271   }
6272 
6273   buildVPlans(1, MaxVF);
6274   DEBUG(printPlans(dbgs()));
6275   if (MaxVF == 1)
6276     return NoVectorization;
6277 
6278   // Select the optimal vectorization factor.
6279   return CM.selectVectorizationFactor(MaxVF);
6280 }
6281 
6282 void LoopVectorizationPlanner::setBestPlan(unsigned VF, unsigned UF) {
6283   DEBUG(dbgs() << "Setting best plan to VF=" << VF << ", UF=" << UF << '\n');
6284   BestVF = VF;
6285   BestUF = UF;
6286 
6287   erase_if(VPlans, [VF](const VPlanPtr &Plan) {
6288     return !Plan->hasVF(VF);
6289   });
6290   assert(VPlans.size() == 1 && "Best VF has not a single VPlan.");
6291 }
6292 
6293 void LoopVectorizationPlanner::executePlan(InnerLoopVectorizer &ILV,
6294                                            DominatorTree *DT) {
6295   // Perform the actual loop transformation.
6296 
6297   // 1. Create a new empty loop. Unlink the old loop and connect the new one.
6298   VPCallbackILV CallbackILV(ILV);
6299 
6300   VPTransformState State{BestVF, BestUF,      LI,
6301                          DT,     ILV.Builder, ILV.VectorLoopValueMap,
6302                          &ILV,   CallbackILV};
6303   State.CFG.PrevBB = ILV.createVectorizedLoopSkeleton();
6304 
6305   //===------------------------------------------------===//
6306   //
6307   // Notice: any optimization or new instruction that go
6308   // into the code below should also be implemented in
6309   // the cost-model.
6310   //
6311   //===------------------------------------------------===//
6312 
6313   // 2. Copy and widen instructions from the old loop into the new loop.
6314   assert(VPlans.size() == 1 && "Not a single VPlan to execute.");
6315   VPlans.front()->execute(&State);
6316 
6317   // 3. Fix the vectorized code: take care of header phi's, live-outs,
6318   //    predication, updating analyses.
6319   ILV.fixVectorizedLoop();
6320 }
6321 
6322 void LoopVectorizationPlanner::collectTriviallyDeadInstructions(
6323     SmallPtrSetImpl<Instruction *> &DeadInstructions) {
6324   BasicBlock *Latch = OrigLoop->getLoopLatch();
6325 
6326   // We create new control-flow for the vectorized loop, so the original
6327   // condition will be dead after vectorization if it's only used by the
6328   // branch.
6329   auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0));
6330   if (Cmp && Cmp->hasOneUse())
6331     DeadInstructions.insert(Cmp);
6332 
6333   // We create new "steps" for induction variable updates to which the original
6334   // induction variables map. An original update instruction will be dead if
6335   // all its users except the induction variable are dead.
6336   for (auto &Induction : *Legal->getInductionVars()) {
6337     PHINode *Ind = Induction.first;
6338     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
6339     if (llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
6340           return U == Ind || DeadInstructions.count(cast<Instruction>(U));
6341         }))
6342       DeadInstructions.insert(IndUpdate);
6343 
6344     // We record as "Dead" also the type-casting instructions we had identified
6345     // during induction analysis. We don't need any handling for them in the
6346     // vectorized loop because we have proven that, under a proper runtime
6347     // test guarding the vectorized loop, the value of the phi, and the casted
6348     // value of the phi, are the same. The last instruction in this casting chain
6349     // will get its scalar/vector/widened def from the scalar/vector/widened def
6350     // of the respective phi node. Any other casts in the induction def-use chain
6351     // have no other uses outside the phi update chain, and will be ignored.
6352     InductionDescriptor &IndDes = Induction.second;
6353     const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts();
6354     DeadInstructions.insert(Casts.begin(), Casts.end());
6355   }
6356 }
6357 
6358 Value *InnerLoopUnroller::reverseVector(Value *Vec) { return Vec; }
6359 
6360 Value *InnerLoopUnroller::getBroadcastInstrs(Value *V) { return V; }
6361 
6362 Value *InnerLoopUnroller::getStepVector(Value *Val, int StartIdx, Value *Step,
6363                                         Instruction::BinaryOps BinOp) {
6364   // When unrolling and the VF is 1, we only need to add a simple scalar.
6365   Type *Ty = Val->getType();
6366   assert(!Ty->isVectorTy() && "Val must be a scalar");
6367 
6368   if (Ty->isFloatingPointTy()) {
6369     Constant *C = ConstantFP::get(Ty, (double)StartIdx);
6370 
6371     // Floating point operations had to be 'fast' to enable the unrolling.
6372     Value *MulOp = addFastMathFlag(Builder.CreateFMul(C, Step));
6373     return addFastMathFlag(Builder.CreateBinOp(BinOp, Val, MulOp));
6374   }
6375   Constant *C = ConstantInt::get(Ty, StartIdx);
6376   return Builder.CreateAdd(Val, Builder.CreateMul(C, Step), "induction");
6377 }
6378 
6379 static void AddRuntimeUnrollDisableMetaData(Loop *L) {
6380   SmallVector<Metadata *, 4> MDs;
6381   // Reserve first location for self reference to the LoopID metadata node.
6382   MDs.push_back(nullptr);
6383   bool IsUnrollMetadata = false;
6384   MDNode *LoopID = L->getLoopID();
6385   if (LoopID) {
6386     // First find existing loop unrolling disable metadata.
6387     for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) {
6388       auto *MD = dyn_cast<MDNode>(LoopID->getOperand(i));
6389       if (MD) {
6390         const auto *S = dyn_cast<MDString>(MD->getOperand(0));
6391         IsUnrollMetadata =
6392             S && S->getString().startswith("llvm.loop.unroll.disable");
6393       }
6394       MDs.push_back(LoopID->getOperand(i));
6395     }
6396   }
6397 
6398   if (!IsUnrollMetadata) {
6399     // Add runtime unroll disable metadata.
6400     LLVMContext &Context = L->getHeader()->getContext();
6401     SmallVector<Metadata *, 1> DisableOperands;
6402     DisableOperands.push_back(
6403         MDString::get(Context, "llvm.loop.unroll.runtime.disable"));
6404     MDNode *DisableNode = MDNode::get(Context, DisableOperands);
6405     MDs.push_back(DisableNode);
6406     MDNode *NewLoopID = MDNode::get(Context, MDs);
6407     // Set operand 0 to refer to the loop id itself.
6408     NewLoopID->replaceOperandWith(0, NewLoopID);
6409     L->setLoopID(NewLoopID);
6410   }
6411 }
6412 
6413 bool LoopVectorizationPlanner::getDecisionAndClampRange(
6414     const std::function<bool(unsigned)> &Predicate, VFRange &Range) {
6415   assert(Range.End > Range.Start && "Trying to test an empty VF range.");
6416   bool PredicateAtRangeStart = Predicate(Range.Start);
6417 
6418   for (unsigned TmpVF = Range.Start * 2; TmpVF < Range.End; TmpVF *= 2)
6419     if (Predicate(TmpVF) != PredicateAtRangeStart) {
6420       Range.End = TmpVF;
6421       break;
6422     }
6423 
6424   return PredicateAtRangeStart;
6425 }
6426 
6427 /// Build VPlans for the full range of feasible VF's = {\p MinVF, 2 * \p MinVF,
6428 /// 4 * \p MinVF, ..., \p MaxVF} by repeatedly building a VPlan for a sub-range
6429 /// of VF's starting at a given VF and extending it as much as possible. Each
6430 /// vectorization decision can potentially shorten this sub-range during
6431 /// buildVPlan().
6432 void LoopVectorizationPlanner::buildVPlans(unsigned MinVF, unsigned MaxVF) {
6433 
6434   // Collect conditions feeding internal conditional branches; they need to be
6435   // represented in VPlan for it to model masking.
6436   SmallPtrSet<Value *, 1> NeedDef;
6437 
6438   auto *Latch = OrigLoop->getLoopLatch();
6439   for (BasicBlock *BB : OrigLoop->blocks()) {
6440     if (BB == Latch)
6441       continue;
6442     BranchInst *Branch = dyn_cast<BranchInst>(BB->getTerminator());
6443     if (Branch && Branch->isConditional())
6444       NeedDef.insert(Branch->getCondition());
6445   }
6446 
6447   for (unsigned VF = MinVF; VF < MaxVF + 1;) {
6448     VFRange SubRange = {VF, MaxVF + 1};
6449     VPlans.push_back(buildVPlan(SubRange, NeedDef));
6450     VF = SubRange.End;
6451   }
6452 }
6453 
6454 VPValue *LoopVectorizationPlanner::createEdgeMask(BasicBlock *Src,
6455                                                   BasicBlock *Dst,
6456                                                   VPlanPtr &Plan) {
6457   assert(is_contained(predecessors(Dst), Src) && "Invalid edge");
6458 
6459   // Look for cached value.
6460   std::pair<BasicBlock *, BasicBlock *> Edge(Src, Dst);
6461   EdgeMaskCacheTy::iterator ECEntryIt = EdgeMaskCache.find(Edge);
6462   if (ECEntryIt != EdgeMaskCache.end())
6463     return ECEntryIt->second;
6464 
6465   VPValue *SrcMask = createBlockInMask(Src, Plan);
6466 
6467   // The terminator has to be a branch inst!
6468   BranchInst *BI = dyn_cast<BranchInst>(Src->getTerminator());
6469   assert(BI && "Unexpected terminator found");
6470 
6471   if (!BI->isConditional())
6472     return EdgeMaskCache[Edge] = SrcMask;
6473 
6474   VPValue *EdgeMask = Plan->getVPValue(BI->getCondition());
6475   assert(EdgeMask && "No Edge Mask found for condition");
6476 
6477   if (BI->getSuccessor(0) != Dst)
6478     EdgeMask = Builder.createNot(EdgeMask);
6479 
6480   if (SrcMask) // Otherwise block in-mask is all-one, no need to AND.
6481     EdgeMask = Builder.createAnd(EdgeMask, SrcMask);
6482 
6483   return EdgeMaskCache[Edge] = EdgeMask;
6484 }
6485 
6486 VPValue *LoopVectorizationPlanner::createBlockInMask(BasicBlock *BB,
6487                                                      VPlanPtr &Plan) {
6488   assert(OrigLoop->contains(BB) && "Block is not a part of a loop");
6489 
6490   // Look for cached value.
6491   BlockMaskCacheTy::iterator BCEntryIt = BlockMaskCache.find(BB);
6492   if (BCEntryIt != BlockMaskCache.end())
6493     return BCEntryIt->second;
6494 
6495   // All-one mask is modelled as no-mask following the convention for masked
6496   // load/store/gather/scatter. Initialize BlockMask to no-mask.
6497   VPValue *BlockMask = nullptr;
6498 
6499   // Loop incoming mask is all-one.
6500   if (OrigLoop->getHeader() == BB)
6501     return BlockMaskCache[BB] = BlockMask;
6502 
6503   // This is the block mask. We OR all incoming edges.
6504   for (auto *Predecessor : predecessors(BB)) {
6505     VPValue *EdgeMask = createEdgeMask(Predecessor, BB, Plan);
6506     if (!EdgeMask) // Mask of predecessor is all-one so mask of block is too.
6507       return BlockMaskCache[BB] = EdgeMask;
6508 
6509     if (!BlockMask) { // BlockMask has its initialized nullptr value.
6510       BlockMask = EdgeMask;
6511       continue;
6512     }
6513 
6514     BlockMask = Builder.createOr(BlockMask, EdgeMask);
6515   }
6516 
6517   return BlockMaskCache[BB] = BlockMask;
6518 }
6519 
6520 VPInterleaveRecipe *
6521 LoopVectorizationPlanner::tryToInterleaveMemory(Instruction *I,
6522                                                 VFRange &Range) {
6523   const InterleaveGroup *IG = CM.getInterleavedAccessGroup(I);
6524   if (!IG)
6525     return nullptr;
6526 
6527   // Now check if IG is relevant for VF's in the given range.
6528   auto isIGMember = [&](Instruction *I) -> std::function<bool(unsigned)> {
6529     return [=](unsigned VF) -> bool {
6530       return (VF >= 2 && // Query is illegal for VF == 1
6531               CM.getWideningDecision(I, VF) ==
6532                   LoopVectorizationCostModel::CM_Interleave);
6533     };
6534   };
6535   if (!getDecisionAndClampRange(isIGMember(I), Range))
6536     return nullptr;
6537 
6538   // I is a member of an InterleaveGroup for VF's in the (possibly trimmed)
6539   // range. If it's the primary member of the IG construct a VPInterleaveRecipe.
6540   // Otherwise, it's an adjunct member of the IG, do not construct any Recipe.
6541   assert(I == IG->getInsertPos() &&
6542          "Generating a recipe for an adjunct member of an interleave group");
6543 
6544   return new VPInterleaveRecipe(IG);
6545 }
6546 
6547 VPWidenMemoryInstructionRecipe *
6548 LoopVectorizationPlanner::tryToWidenMemory(Instruction *I, VFRange &Range,
6549                                            VPlanPtr &Plan) {
6550   if (!isa<LoadInst>(I) && !isa<StoreInst>(I))
6551     return nullptr;
6552 
6553   auto willWiden = [&](unsigned VF) -> bool {
6554     if (VF == 1)
6555       return false;
6556     if (CM.isScalarAfterVectorization(I, VF) ||
6557         CM.isProfitableToScalarize(I, VF))
6558       return false;
6559     LoopVectorizationCostModel::InstWidening Decision =
6560         CM.getWideningDecision(I, VF);
6561     assert(Decision != LoopVectorizationCostModel::CM_Unknown &&
6562            "CM decision should be taken at this point.");
6563     assert(Decision != LoopVectorizationCostModel::CM_Interleave &&
6564            "Interleave memory opportunity should be caught earlier.");
6565     return Decision != LoopVectorizationCostModel::CM_Scalarize;
6566   };
6567 
6568   if (!getDecisionAndClampRange(willWiden, Range))
6569     return nullptr;
6570 
6571   VPValue *Mask = nullptr;
6572   if (Legal->isMaskRequired(I))
6573     Mask = createBlockInMask(I->getParent(), Plan);
6574 
6575   return new VPWidenMemoryInstructionRecipe(*I, Mask);
6576 }
6577 
6578 VPWidenIntOrFpInductionRecipe *
6579 LoopVectorizationPlanner::tryToOptimizeInduction(Instruction *I,
6580                                                  VFRange &Range) {
6581   if (PHINode *Phi = dyn_cast<PHINode>(I)) {
6582     // Check if this is an integer or fp induction. If so, build the recipe that
6583     // produces its scalar and vector values.
6584     InductionDescriptor II = Legal->getInductionVars()->lookup(Phi);
6585     if (II.getKind() == InductionDescriptor::IK_IntInduction ||
6586         II.getKind() == InductionDescriptor::IK_FpInduction)
6587       return new VPWidenIntOrFpInductionRecipe(Phi);
6588 
6589     return nullptr;
6590   }
6591 
6592   // Optimize the special case where the source is a constant integer
6593   // induction variable. Notice that we can only optimize the 'trunc' case
6594   // because (a) FP conversions lose precision, (b) sext/zext may wrap, and
6595   // (c) other casts depend on pointer size.
6596 
6597   // Determine whether \p K is a truncation based on an induction variable that
6598   // can be optimized.
6599   auto isOptimizableIVTruncate =
6600       [&](Instruction *K) -> std::function<bool(unsigned)> {
6601     return
6602         [=](unsigned VF) -> bool { return CM.isOptimizableIVTruncate(K, VF); };
6603   };
6604 
6605   if (isa<TruncInst>(I) &&
6606       getDecisionAndClampRange(isOptimizableIVTruncate(I), Range))
6607     return new VPWidenIntOrFpInductionRecipe(cast<PHINode>(I->getOperand(0)),
6608                                              cast<TruncInst>(I));
6609   return nullptr;
6610 }
6611 
6612 VPBlendRecipe *
6613 LoopVectorizationPlanner::tryToBlend(Instruction *I, VPlanPtr &Plan) {
6614   PHINode *Phi = dyn_cast<PHINode>(I);
6615   if (!Phi || Phi->getParent() == OrigLoop->getHeader())
6616     return nullptr;
6617 
6618   // We know that all PHIs in non-header blocks are converted into selects, so
6619   // we don't have to worry about the insertion order and we can just use the
6620   // builder. At this point we generate the predication tree. There may be
6621   // duplications since this is a simple recursive scan, but future
6622   // optimizations will clean it up.
6623 
6624   SmallVector<VPValue *, 2> Masks;
6625   unsigned NumIncoming = Phi->getNumIncomingValues();
6626   for (unsigned In = 0; In < NumIncoming; In++) {
6627     VPValue *EdgeMask =
6628       createEdgeMask(Phi->getIncomingBlock(In), Phi->getParent(), Plan);
6629     assert((EdgeMask || NumIncoming == 1) &&
6630            "Multiple predecessors with one having a full mask");
6631     if (EdgeMask)
6632       Masks.push_back(EdgeMask);
6633   }
6634   return new VPBlendRecipe(Phi, Masks);
6635 }
6636 
6637 bool LoopVectorizationPlanner::tryToWiden(Instruction *I, VPBasicBlock *VPBB,
6638                                           VFRange &Range) {
6639   if (CM.isScalarWithPredication(I))
6640     return false;
6641 
6642   auto IsVectorizableOpcode = [](unsigned Opcode) {
6643     switch (Opcode) {
6644     case Instruction::Add:
6645     case Instruction::And:
6646     case Instruction::AShr:
6647     case Instruction::BitCast:
6648     case Instruction::Br:
6649     case Instruction::Call:
6650     case Instruction::FAdd:
6651     case Instruction::FCmp:
6652     case Instruction::FDiv:
6653     case Instruction::FMul:
6654     case Instruction::FPExt:
6655     case Instruction::FPToSI:
6656     case Instruction::FPToUI:
6657     case Instruction::FPTrunc:
6658     case Instruction::FRem:
6659     case Instruction::FSub:
6660     case Instruction::GetElementPtr:
6661     case Instruction::ICmp:
6662     case Instruction::IntToPtr:
6663     case Instruction::Load:
6664     case Instruction::LShr:
6665     case Instruction::Mul:
6666     case Instruction::Or:
6667     case Instruction::PHI:
6668     case Instruction::PtrToInt:
6669     case Instruction::SDiv:
6670     case Instruction::Select:
6671     case Instruction::SExt:
6672     case Instruction::Shl:
6673     case Instruction::SIToFP:
6674     case Instruction::SRem:
6675     case Instruction::Store:
6676     case Instruction::Sub:
6677     case Instruction::Trunc:
6678     case Instruction::UDiv:
6679     case Instruction::UIToFP:
6680     case Instruction::URem:
6681     case Instruction::Xor:
6682     case Instruction::ZExt:
6683       return true;
6684     }
6685     return false;
6686   };
6687 
6688   if (!IsVectorizableOpcode(I->getOpcode()))
6689     return false;
6690 
6691   if (CallInst *CI = dyn_cast<CallInst>(I)) {
6692     Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
6693     if (ID && (ID == Intrinsic::assume || ID == Intrinsic::lifetime_end ||
6694                ID == Intrinsic::lifetime_start || ID == Intrinsic::sideeffect))
6695       return false;
6696   }
6697 
6698   auto willWiden = [&](unsigned VF) -> bool {
6699     if (!isa<PHINode>(I) && (CM.isScalarAfterVectorization(I, VF) ||
6700                              CM.isProfitableToScalarize(I, VF)))
6701       return false;
6702     if (CallInst *CI = dyn_cast<CallInst>(I)) {
6703       Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
6704       // The following case may be scalarized depending on the VF.
6705       // The flag shows whether we use Intrinsic or a usual Call for vectorized
6706       // version of the instruction.
6707       // Is it beneficial to perform intrinsic call compared to lib call?
6708       bool NeedToScalarize;
6709       unsigned CallCost = getVectorCallCost(CI, VF, *TTI, TLI, NeedToScalarize);
6710       bool UseVectorIntrinsic =
6711           ID && getVectorIntrinsicCost(CI, VF, *TTI, TLI) <= CallCost;
6712       return UseVectorIntrinsic || !NeedToScalarize;
6713     }
6714     if (isa<LoadInst>(I) || isa<StoreInst>(I)) {
6715       assert(CM.getWideningDecision(I, VF) ==
6716                  LoopVectorizationCostModel::CM_Scalarize &&
6717              "Memory widening decisions should have been taken care by now");
6718       return false;
6719     }
6720     return true;
6721   };
6722 
6723   if (!getDecisionAndClampRange(willWiden, Range))
6724     return false;
6725 
6726   // Success: widen this instruction. We optimize the common case where
6727   // consecutive instructions can be represented by a single recipe.
6728   if (!VPBB->empty()) {
6729     VPWidenRecipe *LastWidenRecipe = dyn_cast<VPWidenRecipe>(&VPBB->back());
6730     if (LastWidenRecipe && LastWidenRecipe->appendInstruction(I))
6731       return true;
6732   }
6733 
6734   VPBB->appendRecipe(new VPWidenRecipe(I));
6735   return true;
6736 }
6737 
6738 VPBasicBlock *LoopVectorizationPlanner::handleReplication(
6739     Instruction *I, VFRange &Range, VPBasicBlock *VPBB,
6740     DenseMap<Instruction *, VPReplicateRecipe *> &PredInst2Recipe,
6741     VPlanPtr &Plan) {
6742   bool IsUniform = getDecisionAndClampRange(
6743       [&](unsigned VF) { return CM.isUniformAfterVectorization(I, VF); },
6744       Range);
6745 
6746   bool IsPredicated = CM.isScalarWithPredication(I);
6747   auto *Recipe = new VPReplicateRecipe(I, IsUniform, IsPredicated);
6748 
6749   // Find if I uses a predicated instruction. If so, it will use its scalar
6750   // value. Avoid hoisting the insert-element which packs the scalar value into
6751   // a vector value, as that happens iff all users use the vector value.
6752   for (auto &Op : I->operands())
6753     if (auto *PredInst = dyn_cast<Instruction>(Op))
6754       if (PredInst2Recipe.find(PredInst) != PredInst2Recipe.end())
6755         PredInst2Recipe[PredInst]->setAlsoPack(false);
6756 
6757   // Finalize the recipe for Instr, first if it is not predicated.
6758   if (!IsPredicated) {
6759     DEBUG(dbgs() << "LV: Scalarizing:" << *I << "\n");
6760     VPBB->appendRecipe(Recipe);
6761     return VPBB;
6762   }
6763   DEBUG(dbgs() << "LV: Scalarizing and predicating:" << *I << "\n");
6764   assert(VPBB->getSuccessors().empty() &&
6765          "VPBB has successors when handling predicated replication.");
6766   // Record predicated instructions for above packing optimizations.
6767   PredInst2Recipe[I] = Recipe;
6768   VPBlockBase *Region =
6769     VPBB->setOneSuccessor(createReplicateRegion(I, Recipe, Plan));
6770   return cast<VPBasicBlock>(Region->setOneSuccessor(new VPBasicBlock()));
6771 }
6772 
6773 VPRegionBlock *
6774 LoopVectorizationPlanner::createReplicateRegion(Instruction *Instr,
6775                                                 VPRecipeBase *PredRecipe,
6776                                                 VPlanPtr &Plan) {
6777   // Instructions marked for predication are replicated and placed under an
6778   // if-then construct to prevent side-effects.
6779 
6780   // Generate recipes to compute the block mask for this region.
6781   VPValue *BlockInMask = createBlockInMask(Instr->getParent(), Plan);
6782 
6783   // Build the triangular if-then region.
6784   std::string RegionName = (Twine("pred.") + Instr->getOpcodeName()).str();
6785   assert(Instr->getParent() && "Predicated instruction not in any basic block");
6786   auto *BOMRecipe = new VPBranchOnMaskRecipe(BlockInMask);
6787   auto *Entry = new VPBasicBlock(Twine(RegionName) + ".entry", BOMRecipe);
6788   auto *PHIRecipe =
6789       Instr->getType()->isVoidTy() ? nullptr : new VPPredInstPHIRecipe(Instr);
6790   auto *Exit = new VPBasicBlock(Twine(RegionName) + ".continue", PHIRecipe);
6791   auto *Pred = new VPBasicBlock(Twine(RegionName) + ".if", PredRecipe);
6792   VPRegionBlock *Region = new VPRegionBlock(Entry, Exit, RegionName, true);
6793 
6794   // Note: first set Entry as region entry and then connect successors starting
6795   // from it in order, to propagate the "parent" of each VPBasicBlock.
6796   Entry->setTwoSuccessors(Pred, Exit);
6797   Pred->setOneSuccessor(Exit);
6798 
6799   return Region;
6800 }
6801 
6802 LoopVectorizationPlanner::VPlanPtr
6803 LoopVectorizationPlanner::buildVPlan(VFRange &Range,
6804                                      const SmallPtrSetImpl<Value *> &NeedDef) {
6805   // Outer loop handling: They may require CFG and instruction level
6806   // transformations before even evaluating whether vectorization is profitable.
6807   // Since we cannot modify the incoming IR, we need to build VPlan upfront in
6808   // the vectorization pipeline.
6809   if (!OrigLoop->empty()) {
6810     assert(EnableVPlanNativePath && "VPlan-native path is not enabled.");
6811 
6812     // Create new empty VPlan
6813     auto Plan = llvm::make_unique<VPlan>();
6814     return Plan;
6815   }
6816 
6817   assert(OrigLoop->empty() && "Inner loop expected.");
6818   EdgeMaskCache.clear();
6819   BlockMaskCache.clear();
6820   DenseMap<Instruction *, Instruction *> &SinkAfter = Legal->getSinkAfter();
6821   DenseMap<Instruction *, Instruction *> SinkAfterInverse;
6822 
6823   // Collect instructions from the original loop that will become trivially dead
6824   // in the vectorized loop. We don't need to vectorize these instructions. For
6825   // example, original induction update instructions can become dead because we
6826   // separately emit induction "steps" when generating code for the new loop.
6827   // Similarly, we create a new latch condition when setting up the structure
6828   // of the new loop, so the old one can become dead.
6829   SmallPtrSet<Instruction *, 4> DeadInstructions;
6830   collectTriviallyDeadInstructions(DeadInstructions);
6831 
6832   // Hold a mapping from predicated instructions to their recipes, in order to
6833   // fix their AlsoPack behavior if a user is determined to replicate and use a
6834   // scalar instead of vector value.
6835   DenseMap<Instruction *, VPReplicateRecipe *> PredInst2Recipe;
6836 
6837   // Create a dummy pre-entry VPBasicBlock to start building the VPlan.
6838   VPBasicBlock *VPBB = new VPBasicBlock("Pre-Entry");
6839   auto Plan = llvm::make_unique<VPlan>(VPBB);
6840 
6841   // Represent values that will have defs inside VPlan.
6842   for (Value *V : NeedDef)
6843     Plan->addVPValue(V);
6844 
6845   // Scan the body of the loop in a topological order to visit each basic block
6846   // after having visited its predecessor basic blocks.
6847   LoopBlocksDFS DFS(OrigLoop);
6848   DFS.perform(LI);
6849 
6850   for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) {
6851     // Relevant instructions from basic block BB will be grouped into VPRecipe
6852     // ingredients and fill a new VPBasicBlock.
6853     unsigned VPBBsForBB = 0;
6854     auto *FirstVPBBForBB = new VPBasicBlock(BB->getName());
6855     VPBB->setOneSuccessor(FirstVPBBForBB);
6856     VPBB = FirstVPBBForBB;
6857     Builder.setInsertPoint(VPBB);
6858 
6859     std::vector<Instruction *> Ingredients;
6860 
6861     // Organize the ingredients to vectorize from current basic block in the
6862     // right order.
6863     for (Instruction &I : BB->instructionsWithoutDebug()) {
6864       Instruction *Instr = &I;
6865 
6866       // First filter out irrelevant instructions, to ensure no recipes are
6867       // built for them.
6868       if (isa<BranchInst>(Instr) || DeadInstructions.count(Instr))
6869         continue;
6870 
6871       // I is a member of an InterleaveGroup for Range.Start. If it's an adjunct
6872       // member of the IG, do not construct any Recipe for it.
6873       const InterleaveGroup *IG = CM.getInterleavedAccessGroup(Instr);
6874       if (IG && Instr != IG->getInsertPos() &&
6875           Range.Start >= 2 && // Query is illegal for VF == 1
6876           CM.getWideningDecision(Instr, Range.Start) ==
6877               LoopVectorizationCostModel::CM_Interleave) {
6878         if (SinkAfterInverse.count(Instr))
6879           Ingredients.push_back(SinkAfterInverse.find(Instr)->second);
6880         continue;
6881       }
6882 
6883       // Move instructions to handle first-order recurrences, step 1: avoid
6884       // handling this instruction until after we've handled the instruction it
6885       // should follow.
6886       auto SAIt = SinkAfter.find(Instr);
6887       if (SAIt != SinkAfter.end()) {
6888         DEBUG(dbgs() << "Sinking" << *SAIt->first << " after" << *SAIt->second
6889                      << " to vectorize a 1st order recurrence.\n");
6890         SinkAfterInverse[SAIt->second] = Instr;
6891         continue;
6892       }
6893 
6894       Ingredients.push_back(Instr);
6895 
6896       // Move instructions to handle first-order recurrences, step 2: push the
6897       // instruction to be sunk at its insertion point.
6898       auto SAInvIt = SinkAfterInverse.find(Instr);
6899       if (SAInvIt != SinkAfterInverse.end())
6900         Ingredients.push_back(SAInvIt->second);
6901     }
6902 
6903     // Introduce each ingredient into VPlan.
6904     for (Instruction *Instr : Ingredients) {
6905       VPRecipeBase *Recipe = nullptr;
6906 
6907       // Check if Instr should belong to an interleave memory recipe, or already
6908       // does. In the latter case Instr is irrelevant.
6909       if ((Recipe = tryToInterleaveMemory(Instr, Range))) {
6910         VPBB->appendRecipe(Recipe);
6911         continue;
6912       }
6913 
6914       // Check if Instr is a memory operation that should be widened.
6915       if ((Recipe = tryToWidenMemory(Instr, Range, Plan))) {
6916         VPBB->appendRecipe(Recipe);
6917         continue;
6918       }
6919 
6920       // Check if Instr should form some PHI recipe.
6921       if ((Recipe = tryToOptimizeInduction(Instr, Range))) {
6922         VPBB->appendRecipe(Recipe);
6923         continue;
6924       }
6925       if ((Recipe = tryToBlend(Instr, Plan))) {
6926         VPBB->appendRecipe(Recipe);
6927         continue;
6928       }
6929       if (PHINode *Phi = dyn_cast<PHINode>(Instr)) {
6930         VPBB->appendRecipe(new VPWidenPHIRecipe(Phi));
6931         continue;
6932       }
6933 
6934       // Check if Instr is to be widened by a general VPWidenRecipe, after
6935       // having first checked for specific widening recipes that deal with
6936       // Interleave Groups, Inductions and Phi nodes.
6937       if (tryToWiden(Instr, VPBB, Range))
6938         continue;
6939 
6940       // Otherwise, if all widening options failed, Instruction is to be
6941       // replicated. This may create a successor for VPBB.
6942       VPBasicBlock *NextVPBB =
6943         handleReplication(Instr, Range, VPBB, PredInst2Recipe, Plan);
6944       if (NextVPBB != VPBB) {
6945         VPBB = NextVPBB;
6946         VPBB->setName(BB->hasName() ? BB->getName() + "." + Twine(VPBBsForBB++)
6947                                     : "");
6948       }
6949     }
6950   }
6951 
6952   // Discard empty dummy pre-entry VPBasicBlock. Note that other VPBasicBlocks
6953   // may also be empty, such as the last one VPBB, reflecting original
6954   // basic-blocks with no recipes.
6955   VPBasicBlock *PreEntry = cast<VPBasicBlock>(Plan->getEntry());
6956   assert(PreEntry->empty() && "Expecting empty pre-entry block.");
6957   VPBlockBase *Entry = Plan->setEntry(PreEntry->getSingleSuccessor());
6958   PreEntry->disconnectSuccessor(Entry);
6959   delete PreEntry;
6960 
6961   std::string PlanName;
6962   raw_string_ostream RSO(PlanName);
6963   unsigned VF = Range.Start;
6964   Plan->addVF(VF);
6965   RSO << "Initial VPlan for VF={" << VF;
6966   for (VF *= 2; VF < Range.End; VF *= 2) {
6967     Plan->addVF(VF);
6968     RSO << "," << VF;
6969   }
6970   RSO << "},UF>=1";
6971   RSO.flush();
6972   Plan->setName(PlanName);
6973 
6974   return Plan;
6975 }
6976 
6977 Value* LoopVectorizationPlanner::VPCallbackILV::
6978 getOrCreateVectorValues(Value *V, unsigned Part) {
6979       return ILV.getOrCreateVectorValue(V, Part);
6980 }
6981 
6982 void VPInterleaveRecipe::print(raw_ostream &O, const Twine &Indent) const {
6983   O << " +\n"
6984     << Indent << "\"INTERLEAVE-GROUP with factor " << IG->getFactor() << " at ";
6985   IG->getInsertPos()->printAsOperand(O, false);
6986   O << "\\l\"";
6987   for (unsigned i = 0; i < IG->getFactor(); ++i)
6988     if (Instruction *I = IG->getMember(i))
6989       O << " +\n"
6990         << Indent << "\"  " << VPlanIngredient(I) << " " << i << "\\l\"";
6991 }
6992 
6993 void VPWidenRecipe::execute(VPTransformState &State) {
6994   for (auto &Instr : make_range(Begin, End))
6995     State.ILV->widenInstruction(Instr);
6996 }
6997 
6998 void VPWidenIntOrFpInductionRecipe::execute(VPTransformState &State) {
6999   assert(!State.Instance && "Int or FP induction being replicated.");
7000   State.ILV->widenIntOrFpInduction(IV, Trunc);
7001 }
7002 
7003 void VPWidenPHIRecipe::execute(VPTransformState &State) {
7004   State.ILV->widenPHIInstruction(Phi, State.UF, State.VF);
7005 }
7006 
7007 void VPBlendRecipe::execute(VPTransformState &State) {
7008   State.ILV->setDebugLocFromInst(State.Builder, Phi);
7009   // We know that all PHIs in non-header blocks are converted into
7010   // selects, so we don't have to worry about the insertion order and we
7011   // can just use the builder.
7012   // At this point we generate the predication tree. There may be
7013   // duplications since this is a simple recursive scan, but future
7014   // optimizations will clean it up.
7015 
7016   unsigned NumIncoming = Phi->getNumIncomingValues();
7017 
7018   assert((User || NumIncoming == 1) &&
7019          "Multiple predecessors with predecessors having a full mask");
7020   // Generate a sequence of selects of the form:
7021   // SELECT(Mask3, In3,
7022   //      SELECT(Mask2, In2,
7023   //                   ( ...)))
7024   InnerLoopVectorizer::VectorParts Entry(State.UF);
7025   for (unsigned In = 0; In < NumIncoming; ++In) {
7026     for (unsigned Part = 0; Part < State.UF; ++Part) {
7027       // We might have single edge PHIs (blocks) - use an identity
7028       // 'select' for the first PHI operand.
7029       Value *In0 =
7030           State.ILV->getOrCreateVectorValue(Phi->getIncomingValue(In), Part);
7031       if (In == 0)
7032         Entry[Part] = In0; // Initialize with the first incoming value.
7033       else {
7034         // Select between the current value and the previous incoming edge
7035         // based on the incoming mask.
7036         Value *Cond = State.get(User->getOperand(In), Part);
7037         Entry[Part] =
7038             State.Builder.CreateSelect(Cond, In0, Entry[Part], "predphi");
7039       }
7040     }
7041   }
7042   for (unsigned Part = 0; Part < State.UF; ++Part)
7043     State.ValueMap.setVectorValue(Phi, Part, Entry[Part]);
7044 }
7045 
7046 void VPInterleaveRecipe::execute(VPTransformState &State) {
7047   assert(!State.Instance && "Interleave group being replicated.");
7048   State.ILV->vectorizeInterleaveGroup(IG->getInsertPos());
7049 }
7050 
7051 void VPReplicateRecipe::execute(VPTransformState &State) {
7052   if (State.Instance) { // Generate a single instance.
7053     State.ILV->scalarizeInstruction(Ingredient, *State.Instance, IsPredicated);
7054     // Insert scalar instance packing it into a vector.
7055     if (AlsoPack && State.VF > 1) {
7056       // If we're constructing lane 0, initialize to start from undef.
7057       if (State.Instance->Lane == 0) {
7058         Value *Undef =
7059             UndefValue::get(VectorType::get(Ingredient->getType(), State.VF));
7060         State.ValueMap.setVectorValue(Ingredient, State.Instance->Part, Undef);
7061       }
7062       State.ILV->packScalarIntoVectorValue(Ingredient, *State.Instance);
7063     }
7064     return;
7065   }
7066 
7067   // Generate scalar instances for all VF lanes of all UF parts, unless the
7068   // instruction is uniform inwhich case generate only the first lane for each
7069   // of the UF parts.
7070   unsigned EndLane = IsUniform ? 1 : State.VF;
7071   for (unsigned Part = 0; Part < State.UF; ++Part)
7072     for (unsigned Lane = 0; Lane < EndLane; ++Lane)
7073       State.ILV->scalarizeInstruction(Ingredient, {Part, Lane}, IsPredicated);
7074 }
7075 
7076 void VPBranchOnMaskRecipe::execute(VPTransformState &State) {
7077   assert(State.Instance && "Branch on Mask works only on single instance.");
7078 
7079   unsigned Part = State.Instance->Part;
7080   unsigned Lane = State.Instance->Lane;
7081 
7082   Value *ConditionBit = nullptr;
7083   if (!User) // Block in mask is all-one.
7084     ConditionBit = State.Builder.getTrue();
7085   else {
7086     VPValue *BlockInMask = User->getOperand(0);
7087     ConditionBit = State.get(BlockInMask, Part);
7088     if (ConditionBit->getType()->isVectorTy())
7089       ConditionBit = State.Builder.CreateExtractElement(
7090           ConditionBit, State.Builder.getInt32(Lane));
7091   }
7092 
7093   // Replace the temporary unreachable terminator with a new conditional branch,
7094   // whose two destinations will be set later when they are created.
7095   auto *CurrentTerminator = State.CFG.PrevBB->getTerminator();
7096   assert(isa<UnreachableInst>(CurrentTerminator) &&
7097          "Expected to replace unreachable terminator with conditional branch.");
7098   auto *CondBr = BranchInst::Create(State.CFG.PrevBB, nullptr, ConditionBit);
7099   CondBr->setSuccessor(0, nullptr);
7100   ReplaceInstWithInst(CurrentTerminator, CondBr);
7101 }
7102 
7103 void VPPredInstPHIRecipe::execute(VPTransformState &State) {
7104   assert(State.Instance && "Predicated instruction PHI works per instance.");
7105   Instruction *ScalarPredInst = cast<Instruction>(
7106       State.ValueMap.getScalarValue(PredInst, *State.Instance));
7107   BasicBlock *PredicatedBB = ScalarPredInst->getParent();
7108   BasicBlock *PredicatingBB = PredicatedBB->getSinglePredecessor();
7109   assert(PredicatingBB && "Predicated block has no single predecessor.");
7110 
7111   // By current pack/unpack logic we need to generate only a single phi node: if
7112   // a vector value for the predicated instruction exists at this point it means
7113   // the instruction has vector users only, and a phi for the vector value is
7114   // needed. In this case the recipe of the predicated instruction is marked to
7115   // also do that packing, thereby "hoisting" the insert-element sequence.
7116   // Otherwise, a phi node for the scalar value is needed.
7117   unsigned Part = State.Instance->Part;
7118   if (State.ValueMap.hasVectorValue(PredInst, Part)) {
7119     Value *VectorValue = State.ValueMap.getVectorValue(PredInst, Part);
7120     InsertElementInst *IEI = cast<InsertElementInst>(VectorValue);
7121     PHINode *VPhi = State.Builder.CreatePHI(IEI->getType(), 2);
7122     VPhi->addIncoming(IEI->getOperand(0), PredicatingBB); // Unmodified vector.
7123     VPhi->addIncoming(IEI, PredicatedBB); // New vector with inserted element.
7124     State.ValueMap.resetVectorValue(PredInst, Part, VPhi); // Update cache.
7125   } else {
7126     Type *PredInstType = PredInst->getType();
7127     PHINode *Phi = State.Builder.CreatePHI(PredInstType, 2);
7128     Phi->addIncoming(UndefValue::get(ScalarPredInst->getType()), PredicatingBB);
7129     Phi->addIncoming(ScalarPredInst, PredicatedBB);
7130     State.ValueMap.resetScalarValue(PredInst, *State.Instance, Phi);
7131   }
7132 }
7133 
7134 void VPWidenMemoryInstructionRecipe::execute(VPTransformState &State) {
7135   if (!User)
7136     return State.ILV->vectorizeMemoryInstruction(&Instr);
7137 
7138   // Last (and currently only) operand is a mask.
7139   InnerLoopVectorizer::VectorParts MaskValues(State.UF);
7140   VPValue *Mask = User->getOperand(User->getNumOperands() - 1);
7141   for (unsigned Part = 0; Part < State.UF; ++Part)
7142     MaskValues[Part] = State.get(Mask, Part);
7143   State.ILV->vectorizeMemoryInstruction(&Instr, &MaskValues);
7144 }
7145 
7146 // Process the loop in the VPlan-native vectorization path. This path builds
7147 // VPlan upfront in the vectorization pipeline, which allows to apply
7148 // VPlan-to-VPlan transformations from the very beginning without modifying the
7149 // input LLVM IR.
7150 static bool processLoopInVPlanNativePath(
7151     Loop *L, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT,
7152     LoopVectorizationLegality *LVL, TargetTransformInfo *TTI,
7153     TargetLibraryInfo *TLI, DemandedBits *DB, AssumptionCache *AC,
7154     OptimizationRemarkEmitter *ORE, LoopVectorizeHints &Hints) {
7155 
7156   assert(EnableVPlanNativePath && "VPlan-native path is disabled.");
7157   Function *F = L->getHeader()->getParent();
7158   InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL->getLAI());
7159   LoopVectorizationCostModel CM(L, PSE, LI, LVL, *TTI, TLI, DB, AC, ORE, F,
7160                                 &Hints, IAI);
7161   // Use the planner for outer loop vectorization.
7162   // TODO: CM is not used at this point inside the planner. Turn CM into an
7163   // optional argument if we don't need it in the future.
7164   LoopVectorizationPlanner LVP(L, LI, TLI, TTI, LVL, CM);
7165 
7166   // Get user vectorization factor.
7167   unsigned UserVF = Hints.getWidth();
7168 
7169   // Check the function attributes to find out if this function should be
7170   // optimized for size.
7171   bool OptForSize =
7172       Hints.getForce() != LoopVectorizeHints::FK_Enabled && F->optForSize();
7173 
7174   // Plan how to best vectorize, return the best VF and its cost.
7175   LVP.planInVPlanNativePath(OptForSize, UserVF);
7176 
7177   // Returning false. We are currently not generating vector code in the VPlan
7178   // native path.
7179   return false;
7180 }
7181 
7182 bool LoopVectorizePass::processLoop(Loop *L) {
7183   assert((EnableVPlanNativePath || L->empty()) &&
7184          "VPlan-native path is not enabled. Only process inner loops.");
7185 
7186 #ifndef NDEBUG
7187   const std::string DebugLocStr = getDebugLocString(L);
7188 #endif /* NDEBUG */
7189 
7190   DEBUG(dbgs() << "\nLV: Checking a loop in \""
7191                << L->getHeader()->getParent()->getName() << "\" from "
7192                << DebugLocStr << "\n");
7193 
7194   LoopVectorizeHints Hints(L, DisableUnrolling, *ORE);
7195 
7196   DEBUG(dbgs() << "LV: Loop hints:"
7197                << " force="
7198                << (Hints.getForce() == LoopVectorizeHints::FK_Disabled
7199                        ? "disabled"
7200                        : (Hints.getForce() == LoopVectorizeHints::FK_Enabled
7201                               ? "enabled"
7202                               : "?"))
7203                << " width=" << Hints.getWidth()
7204                << " unroll=" << Hints.getInterleave() << "\n");
7205 
7206   // Function containing loop
7207   Function *F = L->getHeader()->getParent();
7208 
7209   // Looking at the diagnostic output is the only way to determine if a loop
7210   // was vectorized (other than looking at the IR or machine code), so it
7211   // is important to generate an optimization remark for each loop. Most of
7212   // these messages are generated as OptimizationRemarkAnalysis. Remarks
7213   // generated as OptimizationRemark and OptimizationRemarkMissed are
7214   // less verbose reporting vectorized loops and unvectorized loops that may
7215   // benefit from vectorization, respectively.
7216 
7217   if (!Hints.allowVectorization(F, L, AlwaysVectorize)) {
7218     DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n");
7219     return false;
7220   }
7221 
7222   PredicatedScalarEvolution PSE(*SE, *L);
7223 
7224   // Check if it is legal to vectorize the loop.
7225   LoopVectorizationRequirements Requirements(*ORE);
7226   LoopVectorizationLegality LVL(L, PSE, DT, TLI, AA, F, GetLAA, LI, ORE,
7227                                 &Requirements, &Hints, DB, AC);
7228   if (!LVL.canVectorize(EnableVPlanNativePath)) {
7229     DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n");
7230     emitMissedWarning(F, L, Hints, ORE);
7231     return false;
7232   }
7233 
7234   // Check the function attributes to find out if this function should be
7235   // optimized for size.
7236   bool OptForSize =
7237       Hints.getForce() != LoopVectorizeHints::FK_Enabled && F->optForSize();
7238 
7239   // Entrance to the VPlan-native vectorization path. Outer loops are processed
7240   // here. They may require CFG and instruction level transformations before
7241   // even evaluating whether vectorization is profitable. Since we cannot modify
7242   // the incoming IR, we need to build VPlan upfront in the vectorization
7243   // pipeline.
7244   if (!L->empty())
7245     return processLoopInVPlanNativePath(L, PSE, LI, DT, &LVL, TTI, TLI, DB, AC,
7246                                         ORE, Hints);
7247 
7248   assert(L->empty() && "Inner loop expected.");
7249   // Check the loop for a trip count threshold: vectorize loops with a tiny trip
7250   // count by optimizing for size, to minimize overheads.
7251   // Prefer constant trip counts over profile data, over upper bound estimate.
7252   unsigned ExpectedTC = 0;
7253   bool HasExpectedTC = false;
7254   if (const SCEVConstant *ConstExits =
7255       dyn_cast<SCEVConstant>(SE->getBackedgeTakenCount(L))) {
7256     const APInt &ExitsCount = ConstExits->getAPInt();
7257     // We are interested in small values for ExpectedTC. Skip over those that
7258     // can't fit an unsigned.
7259     if (ExitsCount.ult(std::numeric_limits<unsigned>::max())) {
7260       ExpectedTC = static_cast<unsigned>(ExitsCount.getZExtValue()) + 1;
7261       HasExpectedTC = true;
7262     }
7263   }
7264   // ExpectedTC may be large because it's bound by a variable. Check
7265   // profiling information to validate we should vectorize.
7266   if (!HasExpectedTC && LoopVectorizeWithBlockFrequency) {
7267     auto EstimatedTC = getLoopEstimatedTripCount(L);
7268     if (EstimatedTC) {
7269       ExpectedTC = *EstimatedTC;
7270       HasExpectedTC = true;
7271     }
7272   }
7273   if (!HasExpectedTC) {
7274     ExpectedTC = SE->getSmallConstantMaxTripCount(L);
7275     HasExpectedTC = (ExpectedTC > 0);
7276   }
7277 
7278   if (HasExpectedTC && ExpectedTC < TinyTripCountVectorThreshold) {
7279     DEBUG(dbgs() << "LV: Found a loop with a very small trip count. "
7280                  << "This loop is worth vectorizing only if no scalar "
7281                  << "iteration overheads are incurred.");
7282     if (Hints.getForce() == LoopVectorizeHints::FK_Enabled)
7283       DEBUG(dbgs() << " But vectorizing was explicitly forced.\n");
7284     else {
7285       DEBUG(dbgs() << "\n");
7286       // Loops with a very small trip count are considered for vectorization
7287       // under OptForSize, thereby making sure the cost of their loop body is
7288       // dominant, free of runtime guards and scalar iteration overheads.
7289       OptForSize = true;
7290     }
7291   }
7292 
7293   // Check the function attributes to see if implicit floats are allowed.
7294   // FIXME: This check doesn't seem possibly correct -- what if the loop is
7295   // an integer loop and the vector instructions selected are purely integer
7296   // vector instructions?
7297   if (F->hasFnAttribute(Attribute::NoImplicitFloat)) {
7298     DEBUG(dbgs() << "LV: Can't vectorize when the NoImplicitFloat"
7299                     "attribute is used.\n");
7300     ORE->emit(createLVMissedAnalysis(Hints.vectorizeAnalysisPassName(),
7301                                      "NoImplicitFloat", L)
7302               << "loop not vectorized due to NoImplicitFloat attribute");
7303     emitMissedWarning(F, L, Hints, ORE);
7304     return false;
7305   }
7306 
7307   // Check if the target supports potentially unsafe FP vectorization.
7308   // FIXME: Add a check for the type of safety issue (denormal, signaling)
7309   // for the target we're vectorizing for, to make sure none of the
7310   // additional fp-math flags can help.
7311   if (Hints.isPotentiallyUnsafe() &&
7312       TTI->isFPVectorizationPotentiallyUnsafe()) {
7313     DEBUG(dbgs() << "LV: Potentially unsafe FP op prevents vectorization.\n");
7314     ORE->emit(
7315         createLVMissedAnalysis(Hints.vectorizeAnalysisPassName(), "UnsafeFP", L)
7316         << "loop not vectorized due to unsafe FP support.");
7317     emitMissedWarning(F, L, Hints, ORE);
7318     return false;
7319   }
7320 
7321   bool UseInterleaved = TTI->enableInterleavedAccessVectorization();
7322   InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL.getLAI());
7323 
7324   // If an override option has been passed in for interleaved accesses, use it.
7325   if (EnableInterleavedMemAccesses.getNumOccurrences() > 0)
7326     UseInterleaved = EnableInterleavedMemAccesses;
7327 
7328   // Analyze interleaved memory accesses.
7329   if (UseInterleaved) {
7330     IAI.analyzeInterleaving();
7331   }
7332 
7333   // Use the cost model.
7334   LoopVectorizationCostModel CM(L, PSE, LI, &LVL, *TTI, TLI, DB, AC, ORE, F,
7335                                 &Hints, IAI);
7336   CM.collectValuesToIgnore();
7337 
7338   // Use the planner for vectorization.
7339   LoopVectorizationPlanner LVP(L, LI, TLI, TTI, &LVL, CM);
7340 
7341   // Get user vectorization factor.
7342   unsigned UserVF = Hints.getWidth();
7343 
7344   // Plan how to best vectorize, return the best VF and its cost.
7345   VectorizationFactor VF = LVP.plan(OptForSize, UserVF);
7346 
7347   // Select the interleave count.
7348   unsigned IC = CM.selectInterleaveCount(OptForSize, VF.Width, VF.Cost);
7349 
7350   // Get user interleave count.
7351   unsigned UserIC = Hints.getInterleave();
7352 
7353   // Identify the diagnostic messages that should be produced.
7354   std::pair<StringRef, std::string> VecDiagMsg, IntDiagMsg;
7355   bool VectorizeLoop = true, InterleaveLoop = true;
7356   if (Requirements.doesNotMeet(F, L, Hints)) {
7357     DEBUG(dbgs() << "LV: Not vectorizing: loop did not meet vectorization "
7358                     "requirements.\n");
7359     emitMissedWarning(F, L, Hints, ORE);
7360     return false;
7361   }
7362 
7363   if (VF.Width == 1) {
7364     DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n");
7365     VecDiagMsg = std::make_pair(
7366         "VectorizationNotBeneficial",
7367         "the cost-model indicates that vectorization is not beneficial");
7368     VectorizeLoop = false;
7369   }
7370 
7371   if (IC == 1 && UserIC <= 1) {
7372     // Tell the user interleaving is not beneficial.
7373     DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n");
7374     IntDiagMsg = std::make_pair(
7375         "InterleavingNotBeneficial",
7376         "the cost-model indicates that interleaving is not beneficial");
7377     InterleaveLoop = false;
7378     if (UserIC == 1) {
7379       IntDiagMsg.first = "InterleavingNotBeneficialAndDisabled";
7380       IntDiagMsg.second +=
7381           " and is explicitly disabled or interleave count is set to 1";
7382     }
7383   } else if (IC > 1 && UserIC == 1) {
7384     // Tell the user interleaving is beneficial, but it explicitly disabled.
7385     DEBUG(dbgs()
7386           << "LV: Interleaving is beneficial but is explicitly disabled.");
7387     IntDiagMsg = std::make_pair(
7388         "InterleavingBeneficialButDisabled",
7389         "the cost-model indicates that interleaving is beneficial "
7390         "but is explicitly disabled or interleave count is set to 1");
7391     InterleaveLoop = false;
7392   }
7393 
7394   // Override IC if user provided an interleave count.
7395   IC = UserIC > 0 ? UserIC : IC;
7396 
7397   // Emit diagnostic messages, if any.
7398   const char *VAPassName = Hints.vectorizeAnalysisPassName();
7399   if (!VectorizeLoop && !InterleaveLoop) {
7400     // Do not vectorize or interleaving the loop.
7401     ORE->emit([&]() {
7402       return OptimizationRemarkMissed(VAPassName, VecDiagMsg.first,
7403                                       L->getStartLoc(), L->getHeader())
7404              << VecDiagMsg.second;
7405     });
7406     ORE->emit([&]() {
7407       return OptimizationRemarkMissed(LV_NAME, IntDiagMsg.first,
7408                                       L->getStartLoc(), L->getHeader())
7409              << IntDiagMsg.second;
7410     });
7411     return false;
7412   } else if (!VectorizeLoop && InterleaveLoop) {
7413     DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n');
7414     ORE->emit([&]() {
7415       return OptimizationRemarkAnalysis(VAPassName, VecDiagMsg.first,
7416                                         L->getStartLoc(), L->getHeader())
7417              << VecDiagMsg.second;
7418     });
7419   } else if (VectorizeLoop && !InterleaveLoop) {
7420     DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width << ") in "
7421                  << DebugLocStr << '\n');
7422     ORE->emit([&]() {
7423       return OptimizationRemarkAnalysis(LV_NAME, IntDiagMsg.first,
7424                                         L->getStartLoc(), L->getHeader())
7425              << IntDiagMsg.second;
7426     });
7427   } else if (VectorizeLoop && InterleaveLoop) {
7428     DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width << ") in "
7429                  << DebugLocStr << '\n');
7430     DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n');
7431   }
7432 
7433   LVP.setBestPlan(VF.Width, IC);
7434 
7435   using namespace ore;
7436 
7437   if (!VectorizeLoop) {
7438     assert(IC > 1 && "interleave count should not be 1 or 0");
7439     // If we decided that it is not legal to vectorize the loop, then
7440     // interleave it.
7441     InnerLoopUnroller Unroller(L, PSE, LI, DT, TLI, TTI, AC, ORE, IC, &LVL,
7442                                &CM);
7443     LVP.executePlan(Unroller, DT);
7444 
7445     ORE->emit([&]() {
7446       return OptimizationRemark(LV_NAME, "Interleaved", L->getStartLoc(),
7447                                 L->getHeader())
7448              << "interleaved loop (interleaved count: "
7449              << NV("InterleaveCount", IC) << ")";
7450     });
7451   } else {
7452     // If we decided that it is *legal* to vectorize the loop, then do it.
7453     InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, IC,
7454                            &LVL, &CM);
7455     LVP.executePlan(LB, DT);
7456     ++LoopsVectorized;
7457 
7458     // Add metadata to disable runtime unrolling a scalar loop when there are
7459     // no runtime checks about strides and memory. A scalar loop that is
7460     // rarely used is not worth unrolling.
7461     if (!LB.areSafetyChecksAdded())
7462       AddRuntimeUnrollDisableMetaData(L);
7463 
7464     // Report the vectorization decision.
7465     ORE->emit([&]() {
7466       return OptimizationRemark(LV_NAME, "Vectorized", L->getStartLoc(),
7467                                 L->getHeader())
7468              << "vectorized loop (vectorization width: "
7469              << NV("VectorizationFactor", VF.Width)
7470              << ", interleaved count: " << NV("InterleaveCount", IC) << ")";
7471     });
7472   }
7473 
7474   // Mark the loop as already vectorized to avoid vectorizing again.
7475   Hints.setAlreadyVectorized();
7476 
7477   DEBUG(verifyFunction(*L->getHeader()->getParent()));
7478   return true;
7479 }
7480 
7481 bool LoopVectorizePass::runImpl(
7482     Function &F, ScalarEvolution &SE_, LoopInfo &LI_, TargetTransformInfo &TTI_,
7483     DominatorTree &DT_, BlockFrequencyInfo &BFI_, TargetLibraryInfo *TLI_,
7484     DemandedBits &DB_, AliasAnalysis &AA_, AssumptionCache &AC_,
7485     std::function<const LoopAccessInfo &(Loop &)> &GetLAA_,
7486     OptimizationRemarkEmitter &ORE_) {
7487   SE = &SE_;
7488   LI = &LI_;
7489   TTI = &TTI_;
7490   DT = &DT_;
7491   BFI = &BFI_;
7492   TLI = TLI_;
7493   AA = &AA_;
7494   AC = &AC_;
7495   GetLAA = &GetLAA_;
7496   DB = &DB_;
7497   ORE = &ORE_;
7498 
7499   // Don't attempt if
7500   // 1. the target claims to have no vector registers, and
7501   // 2. interleaving won't help ILP.
7502   //
7503   // The second condition is necessary because, even if the target has no
7504   // vector registers, loop vectorization may still enable scalar
7505   // interleaving.
7506   if (!TTI->getNumberOfRegisters(true) && TTI->getMaxInterleaveFactor(1) < 2)
7507     return false;
7508 
7509   bool Changed = false;
7510 
7511   // The vectorizer requires loops to be in simplified form.
7512   // Since simplification may add new inner loops, it has to run before the
7513   // legality and profitability checks. This means running the loop vectorizer
7514   // will simplify all loops, regardless of whether anything end up being
7515   // vectorized.
7516   for (auto &L : *LI)
7517     Changed |= simplifyLoop(L, DT, LI, SE, AC, false /* PreserveLCSSA */);
7518 
7519   // Build up a worklist of inner-loops to vectorize. This is necessary as
7520   // the act of vectorizing or partially unrolling a loop creates new loops
7521   // and can invalidate iterators across the loops.
7522   SmallVector<Loop *, 8> Worklist;
7523 
7524   for (Loop *L : *LI)
7525     collectSupportedLoops(*L, LI, ORE, Worklist);
7526 
7527   LoopsAnalyzed += Worklist.size();
7528 
7529   // Now walk the identified inner loops.
7530   while (!Worklist.empty()) {
7531     Loop *L = Worklist.pop_back_val();
7532 
7533     // For the inner loops we actually process, form LCSSA to simplify the
7534     // transform.
7535     Changed |= formLCSSARecursively(*L, *DT, LI, SE);
7536 
7537     Changed |= processLoop(L);
7538   }
7539 
7540   // Process each loop nest in the function.
7541   return Changed;
7542 }
7543 
7544 PreservedAnalyses LoopVectorizePass::run(Function &F,
7545                                          FunctionAnalysisManager &AM) {
7546     auto &SE = AM.getResult<ScalarEvolutionAnalysis>(F);
7547     auto &LI = AM.getResult<LoopAnalysis>(F);
7548     auto &TTI = AM.getResult<TargetIRAnalysis>(F);
7549     auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
7550     auto &BFI = AM.getResult<BlockFrequencyAnalysis>(F);
7551     auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
7552     auto &AA = AM.getResult<AAManager>(F);
7553     auto &AC = AM.getResult<AssumptionAnalysis>(F);
7554     auto &DB = AM.getResult<DemandedBitsAnalysis>(F);
7555     auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F);
7556 
7557     auto &LAM = AM.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager();
7558     std::function<const LoopAccessInfo &(Loop &)> GetLAA =
7559         [&](Loop &L) -> const LoopAccessInfo & {
7560       LoopStandardAnalysisResults AR = {AA, AC, DT, LI, SE, TLI, TTI, nullptr};
7561       return LAM.getResult<LoopAccessAnalysis>(L, AR);
7562     };
7563     bool Changed =
7564         runImpl(F, SE, LI, TTI, DT, BFI, &TLI, DB, AA, AC, GetLAA, ORE);
7565     if (!Changed)
7566       return PreservedAnalyses::all();
7567     PreservedAnalyses PA;
7568     PA.preserve<LoopAnalysis>();
7569     PA.preserve<DominatorTreeAnalysis>();
7570     PA.preserve<BasicAA>();
7571     PA.preserve<GlobalsAA>();
7572     return PA;
7573 }
7574