1 //===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops
11 // and generates target-independent LLVM-IR.
12 // The vectorizer uses the TargetTransformInfo analysis to estimate the costs
13 // of instructions in order to estimate the profitability of vectorization.
14 //
15 // The loop vectorizer combines consecutive loop iterations into a single
16 // 'wide' iteration. After this transformation the index is incremented
17 // by the SIMD vector width, and not by one.
18 //
19 // This pass has three parts:
20 // 1. The main loop pass that drives the different parts.
21 // 2. LoopVectorizationLegality - A unit that checks for the legality
22 //    of the vectorization.
23 // 3. InnerLoopVectorizer - A unit that performs the actual
24 //    widening of instructions.
25 // 4. LoopVectorizationCostModel - A unit that checks for the profitability
26 //    of vectorization. It decides on the optimal vector width, which
27 //    can be one, if vectorization is not profitable.
28 //
29 // There is a development effort going on to migrate loop vectorizer to the
30 // VPlan infrastructure and to introduce outer loop vectorization support (see
31 // docs/Proposal/VectorizationPlan.rst and
32 // http://lists.llvm.org/pipermail/llvm-dev/2017-December/119523.html). For this
33 // purpose, we temporarily introduced the VPlan-native vectorization path: an
34 // alternative vectorization path that is natively implemented on top of the
35 // VPlan infrastructure. See EnableVPlanNativePath for enabling.
36 //
37 //===----------------------------------------------------------------------===//
38 //
39 // The reduction-variable vectorization is based on the paper:
40 //  D. Nuzman and R. Henderson. Multi-platform Auto-vectorization.
41 //
42 // Variable uniformity checks are inspired by:
43 //  Karrenberg, R. and Hack, S. Whole Function Vectorization.
44 //
45 // The interleaved access vectorization is based on the paper:
46 //  Dorit Nuzman, Ira Rosen and Ayal Zaks.  Auto-Vectorization of Interleaved
47 //  Data for SIMD
48 //
49 // Other ideas/concepts are from:
50 //  A. Zaks and D. Nuzman. Autovectorization in GCC-two years later.
51 //
52 //  S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua.  An Evaluation of
53 //  Vectorizing Compilers.
54 //
55 //===----------------------------------------------------------------------===//
56 
57 #include "llvm/Transforms/Vectorize/LoopVectorize.h"
58 #include "LoopVectorizationPlanner.h"
59 #include "VPRecipeBuilder.h"
60 #include "VPlanHCFGBuilder.h"
61 #include "llvm/ADT/APInt.h"
62 #include "llvm/ADT/ArrayRef.h"
63 #include "llvm/ADT/DenseMap.h"
64 #include "llvm/ADT/DenseMapInfo.h"
65 #include "llvm/ADT/Hashing.h"
66 #include "llvm/ADT/MapVector.h"
67 #include "llvm/ADT/None.h"
68 #include "llvm/ADT/Optional.h"
69 #include "llvm/ADT/STLExtras.h"
70 #include "llvm/ADT/SetVector.h"
71 #include "llvm/ADT/SmallPtrSet.h"
72 #include "llvm/ADT/SmallVector.h"
73 #include "llvm/ADT/Statistic.h"
74 #include "llvm/ADT/StringRef.h"
75 #include "llvm/ADT/Twine.h"
76 #include "llvm/ADT/iterator_range.h"
77 #include "llvm/Analysis/AssumptionCache.h"
78 #include "llvm/Analysis/BasicAliasAnalysis.h"
79 #include "llvm/Analysis/BlockFrequencyInfo.h"
80 #include "llvm/Analysis/CFG.h"
81 #include "llvm/Analysis/CodeMetrics.h"
82 #include "llvm/Analysis/DemandedBits.h"
83 #include "llvm/Analysis/GlobalsModRef.h"
84 #include "llvm/Analysis/LoopAccessAnalysis.h"
85 #include "llvm/Analysis/LoopAnalysisManager.h"
86 #include "llvm/Analysis/LoopInfo.h"
87 #include "llvm/Analysis/LoopIterator.h"
88 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
89 #include "llvm/Analysis/ScalarEvolution.h"
90 #include "llvm/Analysis/ScalarEvolutionExpander.h"
91 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
92 #include "llvm/Analysis/TargetLibraryInfo.h"
93 #include "llvm/Analysis/TargetTransformInfo.h"
94 #include "llvm/Analysis/VectorUtils.h"
95 #include "llvm/IR/Attributes.h"
96 #include "llvm/IR/BasicBlock.h"
97 #include "llvm/IR/CFG.h"
98 #include "llvm/IR/Constant.h"
99 #include "llvm/IR/Constants.h"
100 #include "llvm/IR/DataLayout.h"
101 #include "llvm/IR/DebugInfoMetadata.h"
102 #include "llvm/IR/DebugLoc.h"
103 #include "llvm/IR/DerivedTypes.h"
104 #include "llvm/IR/DiagnosticInfo.h"
105 #include "llvm/IR/Dominators.h"
106 #include "llvm/IR/Function.h"
107 #include "llvm/IR/IRBuilder.h"
108 #include "llvm/IR/InstrTypes.h"
109 #include "llvm/IR/Instruction.h"
110 #include "llvm/IR/Instructions.h"
111 #include "llvm/IR/IntrinsicInst.h"
112 #include "llvm/IR/Intrinsics.h"
113 #include "llvm/IR/LLVMContext.h"
114 #include "llvm/IR/Metadata.h"
115 #include "llvm/IR/Module.h"
116 #include "llvm/IR/Operator.h"
117 #include "llvm/IR/Type.h"
118 #include "llvm/IR/Use.h"
119 #include "llvm/IR/User.h"
120 #include "llvm/IR/Value.h"
121 #include "llvm/IR/ValueHandle.h"
122 #include "llvm/IR/Verifier.h"
123 #include "llvm/Pass.h"
124 #include "llvm/Support/Casting.h"
125 #include "llvm/Support/CommandLine.h"
126 #include "llvm/Support/Compiler.h"
127 #include "llvm/Support/Debug.h"
128 #include "llvm/Support/ErrorHandling.h"
129 #include "llvm/Support/MathExtras.h"
130 #include "llvm/Support/raw_ostream.h"
131 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
132 #include "llvm/Transforms/Utils/LoopSimplify.h"
133 #include "llvm/Transforms/Utils/LoopUtils.h"
134 #include "llvm/Transforms/Utils/LoopVersioning.h"
135 #include "llvm/Transforms/Vectorize/LoopVectorizationLegality.h"
136 #include <algorithm>
137 #include <cassert>
138 #include <cstdint>
139 #include <cstdlib>
140 #include <functional>
141 #include <iterator>
142 #include <limits>
143 #include <memory>
144 #include <string>
145 #include <tuple>
146 #include <utility>
147 #include <vector>
148 
149 using namespace llvm;
150 
151 #define LV_NAME "loop-vectorize"
152 #define DEBUG_TYPE LV_NAME
153 
154 STATISTIC(LoopsVectorized, "Number of loops vectorized");
155 STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization");
156 
157 /// Loops with a known constant trip count below this number are vectorized only
158 /// if no scalar iteration overheads are incurred.
159 static cl::opt<unsigned> TinyTripCountVectorThreshold(
160     "vectorizer-min-trip-count", cl::init(16), cl::Hidden,
161     cl::desc("Loops with a constant trip count that is smaller than this "
162              "value are vectorized only if no scalar iteration overheads "
163              "are incurred."));
164 
165 static cl::opt<bool> MaximizeBandwidth(
166     "vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden,
167     cl::desc("Maximize bandwidth when selecting vectorization factor which "
168              "will be determined by the smallest type in loop."));
169 
170 static cl::opt<bool> EnableInterleavedMemAccesses(
171     "enable-interleaved-mem-accesses", cl::init(false), cl::Hidden,
172     cl::desc("Enable vectorization on interleaved memory accesses in a loop"));
173 
174 /// Maximum factor for an interleaved memory access.
175 static cl::opt<unsigned> MaxInterleaveGroupFactor(
176     "max-interleave-group-factor", cl::Hidden,
177     cl::desc("Maximum factor for an interleaved access group (default = 8)"),
178     cl::init(8));
179 
180 /// We don't interleave loops with a known constant trip count below this
181 /// number.
182 static const unsigned TinyTripCountInterleaveThreshold = 128;
183 
184 static cl::opt<unsigned> ForceTargetNumScalarRegs(
185     "force-target-num-scalar-regs", cl::init(0), cl::Hidden,
186     cl::desc("A flag that overrides the target's number of scalar registers."));
187 
188 static cl::opt<unsigned> ForceTargetNumVectorRegs(
189     "force-target-num-vector-regs", cl::init(0), cl::Hidden,
190     cl::desc("A flag that overrides the target's number of vector registers."));
191 
192 static cl::opt<unsigned> ForceTargetMaxScalarInterleaveFactor(
193     "force-target-max-scalar-interleave", cl::init(0), cl::Hidden,
194     cl::desc("A flag that overrides the target's max interleave factor for "
195              "scalar loops."));
196 
197 static cl::opt<unsigned> ForceTargetMaxVectorInterleaveFactor(
198     "force-target-max-vector-interleave", cl::init(0), cl::Hidden,
199     cl::desc("A flag that overrides the target's max interleave factor for "
200              "vectorized loops."));
201 
202 static cl::opt<unsigned> ForceTargetInstructionCost(
203     "force-target-instruction-cost", cl::init(0), cl::Hidden,
204     cl::desc("A flag that overrides the target's expected cost for "
205              "an instruction to a single constant value. Mostly "
206              "useful for getting consistent testing."));
207 
208 static cl::opt<unsigned> SmallLoopCost(
209     "small-loop-cost", cl::init(20), cl::Hidden,
210     cl::desc(
211         "The cost of a loop that is considered 'small' by the interleaver."));
212 
213 static cl::opt<bool> LoopVectorizeWithBlockFrequency(
214     "loop-vectorize-with-block-frequency", cl::init(true), cl::Hidden,
215     cl::desc("Enable the use of the block frequency analysis to access PGO "
216              "heuristics minimizing code growth in cold regions and being more "
217              "aggressive in hot regions."));
218 
219 // Runtime interleave loops for load/store throughput.
220 static cl::opt<bool> EnableLoadStoreRuntimeInterleave(
221     "enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden,
222     cl::desc(
223         "Enable runtime interleaving until load/store ports are saturated"));
224 
225 /// The number of stores in a loop that are allowed to need predication.
226 static cl::opt<unsigned> NumberOfStoresToPredicate(
227     "vectorize-num-stores-pred", cl::init(1), cl::Hidden,
228     cl::desc("Max number of stores to be predicated behind an if."));
229 
230 static cl::opt<bool> EnableIndVarRegisterHeur(
231     "enable-ind-var-reg-heur", cl::init(true), cl::Hidden,
232     cl::desc("Count the induction variable only once when interleaving"));
233 
234 static cl::opt<bool> EnableCondStoresVectorization(
235     "enable-cond-stores-vec", cl::init(true), cl::Hidden,
236     cl::desc("Enable if predication of stores during vectorization."));
237 
238 static cl::opt<unsigned> MaxNestedScalarReductionIC(
239     "max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden,
240     cl::desc("The maximum interleave count to use when interleaving a scalar "
241              "reduction in a nested loop."));
242 
243 static cl::opt<bool> EnableVPlanNativePath(
244     "enable-vplan-native-path", cl::init(false), cl::Hidden,
245     cl::desc("Enable VPlan-native vectorization path with "
246              "support for outer loop vectorization."));
247 
248 // This flag enables the stress testing of the VPlan H-CFG construction in the
249 // VPlan-native vectorization path. It must be used in conjuction with
250 // -enable-vplan-native-path. -vplan-verify-hcfg can also be used to enable the
251 // verification of the H-CFGs built.
252 static cl::opt<bool> VPlanBuildStressTest(
253     "vplan-build-stress-test", cl::init(false), cl::Hidden,
254     cl::desc(
255         "Build VPlan for every supported loop nest in the function and bail "
256         "out right after the build (stress test the VPlan H-CFG construction "
257         "in the VPlan-native vectorization path)."));
258 
259 /// A helper function for converting Scalar types to vector types.
260 /// If the incoming type is void, we return void. If the VF is 1, we return
261 /// the scalar type.
262 static Type *ToVectorTy(Type *Scalar, unsigned VF) {
263   if (Scalar->isVoidTy() || VF == 1)
264     return Scalar;
265   return VectorType::get(Scalar, VF);
266 }
267 
268 // FIXME: The following helper functions have multiple implementations
269 // in the project. They can be effectively organized in a common Load/Store
270 // utilities unit.
271 
272 /// A helper function that returns the type of loaded or stored value.
273 static Type *getMemInstValueType(Value *I) {
274   assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
275          "Expected Load or Store instruction");
276   if (auto *LI = dyn_cast<LoadInst>(I))
277     return LI->getType();
278   return cast<StoreInst>(I)->getValueOperand()->getType();
279 }
280 
281 /// A helper function that returns the alignment of load or store instruction.
282 static unsigned getMemInstAlignment(Value *I) {
283   assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
284          "Expected Load or Store instruction");
285   if (auto *LI = dyn_cast<LoadInst>(I))
286     return LI->getAlignment();
287   return cast<StoreInst>(I)->getAlignment();
288 }
289 
290 /// A helper function that returns the address space of the pointer operand of
291 /// load or store instruction.
292 static unsigned getMemInstAddressSpace(Value *I) {
293   assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
294          "Expected Load or Store instruction");
295   if (auto *LI = dyn_cast<LoadInst>(I))
296     return LI->getPointerAddressSpace();
297   return cast<StoreInst>(I)->getPointerAddressSpace();
298 }
299 
300 /// A helper function that returns true if the given type is irregular. The
301 /// type is irregular if its allocated size doesn't equal the store size of an
302 /// element of the corresponding vector type at the given vectorization factor.
303 static bool hasIrregularType(Type *Ty, const DataLayout &DL, unsigned VF) {
304   // Determine if an array of VF elements of type Ty is "bitcast compatible"
305   // with a <VF x Ty> vector.
306   if (VF > 1) {
307     auto *VectorTy = VectorType::get(Ty, VF);
308     return VF * DL.getTypeAllocSize(Ty) != DL.getTypeStoreSize(VectorTy);
309   }
310 
311   // If the vectorization factor is one, we just check if an array of type Ty
312   // requires padding between elements.
313   return DL.getTypeAllocSizeInBits(Ty) != DL.getTypeSizeInBits(Ty);
314 }
315 
316 /// A helper function that returns the reciprocal of the block probability of
317 /// predicated blocks. If we return X, we are assuming the predicated block
318 /// will execute once for every X iterations of the loop header.
319 ///
320 /// TODO: We should use actual block probability here, if available. Currently,
321 ///       we always assume predicated blocks have a 50% chance of executing.
322 static unsigned getReciprocalPredBlockProb() { return 2; }
323 
324 /// A helper function that adds a 'fast' flag to floating-point operations.
325 static Value *addFastMathFlag(Value *V) {
326   if (isa<FPMathOperator>(V)) {
327     FastMathFlags Flags;
328     Flags.setFast();
329     cast<Instruction>(V)->setFastMathFlags(Flags);
330   }
331   return V;
332 }
333 
334 /// A helper function that returns an integer or floating-point constant with
335 /// value C.
336 static Constant *getSignedIntOrFpConstant(Type *Ty, int64_t C) {
337   return Ty->isIntegerTy() ? ConstantInt::getSigned(Ty, C)
338                            : ConstantFP::get(Ty, C);
339 }
340 
341 namespace llvm {
342 
343 /// InnerLoopVectorizer vectorizes loops which contain only one basic
344 /// block to a specified vectorization factor (VF).
345 /// This class performs the widening of scalars into vectors, or multiple
346 /// scalars. This class also implements the following features:
347 /// * It inserts an epilogue loop for handling loops that don't have iteration
348 ///   counts that are known to be a multiple of the vectorization factor.
349 /// * It handles the code generation for reduction variables.
350 /// * Scalarization (implementation using scalars) of un-vectorizable
351 ///   instructions.
352 /// InnerLoopVectorizer does not perform any vectorization-legality
353 /// checks, and relies on the caller to check for the different legality
354 /// aspects. The InnerLoopVectorizer relies on the
355 /// LoopVectorizationLegality class to provide information about the induction
356 /// and reduction variables that were found to a given vectorization factor.
357 class InnerLoopVectorizer {
358 public:
359   InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE,
360                       LoopInfo *LI, DominatorTree *DT,
361                       const TargetLibraryInfo *TLI,
362                       const TargetTransformInfo *TTI, AssumptionCache *AC,
363                       OptimizationRemarkEmitter *ORE, unsigned VecWidth,
364                       unsigned UnrollFactor, LoopVectorizationLegality *LVL,
365                       LoopVectorizationCostModel *CM)
366       : OrigLoop(OrigLoop), PSE(PSE), LI(LI), DT(DT), TLI(TLI), TTI(TTI),
367         AC(AC), ORE(ORE), VF(VecWidth), UF(UnrollFactor),
368         Builder(PSE.getSE()->getContext()),
369         VectorLoopValueMap(UnrollFactor, VecWidth), Legal(LVL), Cost(CM) {}
370   virtual ~InnerLoopVectorizer() = default;
371 
372   /// Create a new empty loop. Unlink the old loop and connect the new one.
373   /// Return the pre-header block of the new loop.
374   BasicBlock *createVectorizedLoopSkeleton();
375 
376   /// Widen a single instruction within the innermost loop.
377   void widenInstruction(Instruction &I);
378 
379   /// Fix the vectorized code, taking care of header phi's, live-outs, and more.
380   void fixVectorizedLoop();
381 
382   // Return true if any runtime check is added.
383   bool areSafetyChecksAdded() { return AddedSafetyChecks; }
384 
385   /// A type for vectorized values in the new loop. Each value from the
386   /// original loop, when vectorized, is represented by UF vector values in the
387   /// new unrolled loop, where UF is the unroll factor.
388   using VectorParts = SmallVector<Value *, 2>;
389 
390   /// Vectorize a single PHINode in a block. This method handles the induction
391   /// variable canonicalization. It supports both VF = 1 for unrolled loops and
392   /// arbitrary length vectors.
393   void widenPHIInstruction(Instruction *PN, unsigned UF, unsigned VF);
394 
395   /// A helper function to scalarize a single Instruction in the innermost loop.
396   /// Generates a sequence of scalar instances for each lane between \p MinLane
397   /// and \p MaxLane, times each part between \p MinPart and \p MaxPart,
398   /// inclusive..
399   void scalarizeInstruction(Instruction *Instr, const VPIteration &Instance,
400                             bool IfPredicateInstr);
401 
402   /// Widen an integer or floating-point induction variable \p IV. If \p Trunc
403   /// is provided, the integer induction variable will first be truncated to
404   /// the corresponding type.
405   void widenIntOrFpInduction(PHINode *IV, TruncInst *Trunc = nullptr);
406 
407   /// getOrCreateVectorValue and getOrCreateScalarValue coordinate to generate a
408   /// vector or scalar value on-demand if one is not yet available. When
409   /// vectorizing a loop, we visit the definition of an instruction before its
410   /// uses. When visiting the definition, we either vectorize or scalarize the
411   /// instruction, creating an entry for it in the corresponding map. (In some
412   /// cases, such as induction variables, we will create both vector and scalar
413   /// entries.) Then, as we encounter uses of the definition, we derive values
414   /// for each scalar or vector use unless such a value is already available.
415   /// For example, if we scalarize a definition and one of its uses is vector,
416   /// we build the required vector on-demand with an insertelement sequence
417   /// when visiting the use. Otherwise, if the use is scalar, we can use the
418   /// existing scalar definition.
419   ///
420   /// Return a value in the new loop corresponding to \p V from the original
421   /// loop at unroll index \p Part. If the value has already been vectorized,
422   /// the corresponding vector entry in VectorLoopValueMap is returned. If,
423   /// however, the value has a scalar entry in VectorLoopValueMap, we construct
424   /// a new vector value on-demand by inserting the scalar values into a vector
425   /// with an insertelement sequence. If the value has been neither vectorized
426   /// nor scalarized, it must be loop invariant, so we simply broadcast the
427   /// value into a vector.
428   Value *getOrCreateVectorValue(Value *V, unsigned Part);
429 
430   /// Return a value in the new loop corresponding to \p V from the original
431   /// loop at unroll and vector indices \p Instance. If the value has been
432   /// vectorized but not scalarized, the necessary extractelement instruction
433   /// will be generated.
434   Value *getOrCreateScalarValue(Value *V, const VPIteration &Instance);
435 
436   /// Construct the vector value of a scalarized value \p V one lane at a time.
437   void packScalarIntoVectorValue(Value *V, const VPIteration &Instance);
438 
439   /// Try to vectorize the interleaved access group that \p Instr belongs to.
440   void vectorizeInterleaveGroup(Instruction *Instr);
441 
442   /// Vectorize Load and Store instructions, optionally masking the vector
443   /// operations if \p BlockInMask is non-null.
444   void vectorizeMemoryInstruction(Instruction *Instr,
445                                   VectorParts *BlockInMask = nullptr);
446 
447   /// Set the debug location in the builder using the debug location in
448   /// the instruction.
449   void setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr);
450 
451 protected:
452   friend class LoopVectorizationPlanner;
453 
454   /// A small list of PHINodes.
455   using PhiVector = SmallVector<PHINode *, 4>;
456 
457   /// A type for scalarized values in the new loop. Each value from the
458   /// original loop, when scalarized, is represented by UF x VF scalar values
459   /// in the new unrolled loop, where UF is the unroll factor and VF is the
460   /// vectorization factor.
461   using ScalarParts = SmallVector<SmallVector<Value *, 4>, 2>;
462 
463   /// Set up the values of the IVs correctly when exiting the vector loop.
464   void fixupIVUsers(PHINode *OrigPhi, const InductionDescriptor &II,
465                     Value *CountRoundDown, Value *EndValue,
466                     BasicBlock *MiddleBlock);
467 
468   /// Create a new induction variable inside L.
469   PHINode *createInductionVariable(Loop *L, Value *Start, Value *End,
470                                    Value *Step, Instruction *DL);
471 
472   /// Handle all cross-iteration phis in the header.
473   void fixCrossIterationPHIs();
474 
475   /// Fix a first-order recurrence. This is the second phase of vectorizing
476   /// this phi node.
477   void fixFirstOrderRecurrence(PHINode *Phi);
478 
479   /// Fix a reduction cross-iteration phi. This is the second phase of
480   /// vectorizing this phi node.
481   void fixReduction(PHINode *Phi);
482 
483   /// The Loop exit block may have single value PHI nodes with some
484   /// incoming value. While vectorizing we only handled real values
485   /// that were defined inside the loop and we should have one value for
486   /// each predecessor of its parent basic block. See PR14725.
487   void fixLCSSAPHIs();
488 
489   /// Iteratively sink the scalarized operands of a predicated instruction into
490   /// the block that was created for it.
491   void sinkScalarOperands(Instruction *PredInst);
492 
493   /// Shrinks vector element sizes to the smallest bitwidth they can be legally
494   /// represented as.
495   void truncateToMinimalBitwidths();
496 
497   /// Insert the new loop to the loop hierarchy and pass manager
498   /// and update the analysis passes.
499   void updateAnalysis();
500 
501   /// Create a broadcast instruction. This method generates a broadcast
502   /// instruction (shuffle) for loop invariant values and for the induction
503   /// value. If this is the induction variable then we extend it to N, N+1, ...
504   /// this is needed because each iteration in the loop corresponds to a SIMD
505   /// element.
506   virtual Value *getBroadcastInstrs(Value *V);
507 
508   /// This function adds (StartIdx, StartIdx + Step, StartIdx + 2*Step, ...)
509   /// to each vector element of Val. The sequence starts at StartIndex.
510   /// \p Opcode is relevant for FP induction variable.
511   virtual Value *getStepVector(Value *Val, int StartIdx, Value *Step,
512                                Instruction::BinaryOps Opcode =
513                                Instruction::BinaryOpsEnd);
514 
515   /// Compute scalar induction steps. \p ScalarIV is the scalar induction
516   /// variable on which to base the steps, \p Step is the size of the step, and
517   /// \p EntryVal is the value from the original loop that maps to the steps.
518   /// Note that \p EntryVal doesn't have to be an induction variable - it
519   /// can also be a truncate instruction.
520   void buildScalarSteps(Value *ScalarIV, Value *Step, Instruction *EntryVal,
521                         const InductionDescriptor &ID);
522 
523   /// Create a vector induction phi node based on an existing scalar one. \p
524   /// EntryVal is the value from the original loop that maps to the vector phi
525   /// node, and \p Step is the loop-invariant step. If \p EntryVal is a
526   /// truncate instruction, instead of widening the original IV, we widen a
527   /// version of the IV truncated to \p EntryVal's type.
528   void createVectorIntOrFpInductionPHI(const InductionDescriptor &II,
529                                        Value *Step, Instruction *EntryVal);
530 
531   /// Returns true if an instruction \p I should be scalarized instead of
532   /// vectorized for the chosen vectorization factor.
533   bool shouldScalarizeInstruction(Instruction *I) const;
534 
535   /// Returns true if we should generate a scalar version of \p IV.
536   bool needsScalarInduction(Instruction *IV) const;
537 
538   /// If there is a cast involved in the induction variable \p ID, which should
539   /// be ignored in the vectorized loop body, this function records the
540   /// VectorLoopValue of the respective Phi also as the VectorLoopValue of the
541   /// cast. We had already proved that the casted Phi is equal to the uncasted
542   /// Phi in the vectorized loop (under a runtime guard), and therefore
543   /// there is no need to vectorize the cast - the same value can be used in the
544   /// vector loop for both the Phi and the cast.
545   /// If \p VectorLoopValue is a scalarized value, \p Lane is also specified,
546   /// Otherwise, \p VectorLoopValue is a widened/vectorized value.
547   ///
548   /// \p EntryVal is the value from the original loop that maps to the vector
549   /// phi node and is used to distinguish what is the IV currently being
550   /// processed - original one (if \p EntryVal is a phi corresponding to the
551   /// original IV) or the "newly-created" one based on the proof mentioned above
552   /// (see also buildScalarSteps() and createVectorIntOrFPInductionPHI()). In the
553   /// latter case \p EntryVal is a TruncInst and we must not record anything for
554   /// that IV, but it's error-prone to expect callers of this routine to care
555   /// about that, hence this explicit parameter.
556   void recordVectorLoopValueForInductionCast(const InductionDescriptor &ID,
557                                              const Instruction *EntryVal,
558                                              Value *VectorLoopValue,
559                                              unsigned Part,
560                                              unsigned Lane = UINT_MAX);
561 
562   /// Generate a shuffle sequence that will reverse the vector Vec.
563   virtual Value *reverseVector(Value *Vec);
564 
565   /// Returns (and creates if needed) the original loop trip count.
566   Value *getOrCreateTripCount(Loop *NewLoop);
567 
568   /// Returns (and creates if needed) the trip count of the widened loop.
569   Value *getOrCreateVectorTripCount(Loop *NewLoop);
570 
571   /// Returns a bitcasted value to the requested vector type.
572   /// Also handles bitcasts of vector<float> <-> vector<pointer> types.
573   Value *createBitOrPointerCast(Value *V, VectorType *DstVTy,
574                                 const DataLayout &DL);
575 
576   /// Emit a bypass check to see if the vector trip count is zero, including if
577   /// it overflows.
578   void emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass);
579 
580   /// Emit a bypass check to see if all of the SCEV assumptions we've
581   /// had to make are correct.
582   void emitSCEVChecks(Loop *L, BasicBlock *Bypass);
583 
584   /// Emit bypass checks to check any memory assumptions we may have made.
585   void emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass);
586 
587   /// Add additional metadata to \p To that was not present on \p Orig.
588   ///
589   /// Currently this is used to add the noalias annotations based on the
590   /// inserted memchecks.  Use this for instructions that are *cloned* into the
591   /// vector loop.
592   void addNewMetadata(Instruction *To, const Instruction *Orig);
593 
594   /// Add metadata from one instruction to another.
595   ///
596   /// This includes both the original MDs from \p From and additional ones (\see
597   /// addNewMetadata).  Use this for *newly created* instructions in the vector
598   /// loop.
599   void addMetadata(Instruction *To, Instruction *From);
600 
601   /// Similar to the previous function but it adds the metadata to a
602   /// vector of instructions.
603   void addMetadata(ArrayRef<Value *> To, Instruction *From);
604 
605   /// The original loop.
606   Loop *OrigLoop;
607 
608   /// A wrapper around ScalarEvolution used to add runtime SCEV checks. Applies
609   /// dynamic knowledge to simplify SCEV expressions and converts them to a
610   /// more usable form.
611   PredicatedScalarEvolution &PSE;
612 
613   /// Loop Info.
614   LoopInfo *LI;
615 
616   /// Dominator Tree.
617   DominatorTree *DT;
618 
619   /// Alias Analysis.
620   AliasAnalysis *AA;
621 
622   /// Target Library Info.
623   const TargetLibraryInfo *TLI;
624 
625   /// Target Transform Info.
626   const TargetTransformInfo *TTI;
627 
628   /// Assumption Cache.
629   AssumptionCache *AC;
630 
631   /// Interface to emit optimization remarks.
632   OptimizationRemarkEmitter *ORE;
633 
634   /// LoopVersioning.  It's only set up (non-null) if memchecks were
635   /// used.
636   ///
637   /// This is currently only used to add no-alias metadata based on the
638   /// memchecks.  The actually versioning is performed manually.
639   std::unique_ptr<LoopVersioning> LVer;
640 
641   /// The vectorization SIMD factor to use. Each vector will have this many
642   /// vector elements.
643   unsigned VF;
644 
645   /// The vectorization unroll factor to use. Each scalar is vectorized to this
646   /// many different vector instructions.
647   unsigned UF;
648 
649   /// The builder that we use
650   IRBuilder<> Builder;
651 
652   // --- Vectorization state ---
653 
654   /// The vector-loop preheader.
655   BasicBlock *LoopVectorPreHeader;
656 
657   /// The scalar-loop preheader.
658   BasicBlock *LoopScalarPreHeader;
659 
660   /// Middle Block between the vector and the scalar.
661   BasicBlock *LoopMiddleBlock;
662 
663   /// The ExitBlock of the scalar loop.
664   BasicBlock *LoopExitBlock;
665 
666   /// The vector loop body.
667   BasicBlock *LoopVectorBody;
668 
669   /// The scalar loop body.
670   BasicBlock *LoopScalarBody;
671 
672   /// A list of all bypass blocks. The first block is the entry of the loop.
673   SmallVector<BasicBlock *, 4> LoopBypassBlocks;
674 
675   /// The new Induction variable which was added to the new block.
676   PHINode *Induction = nullptr;
677 
678   /// The induction variable of the old basic block.
679   PHINode *OldInduction = nullptr;
680 
681   /// Maps values from the original loop to their corresponding values in the
682   /// vectorized loop. A key value can map to either vector values, scalar
683   /// values or both kinds of values, depending on whether the key was
684   /// vectorized and scalarized.
685   VectorizerValueMap VectorLoopValueMap;
686 
687   /// Store instructions that were predicated.
688   SmallVector<Instruction *, 4> PredicatedInstructions;
689 
690   /// Trip count of the original loop.
691   Value *TripCount = nullptr;
692 
693   /// Trip count of the widened loop (TripCount - TripCount % (VF*UF))
694   Value *VectorTripCount = nullptr;
695 
696   /// The legality analysis.
697   LoopVectorizationLegality *Legal;
698 
699   /// The profitablity analysis.
700   LoopVectorizationCostModel *Cost;
701 
702   // Record whether runtime checks are added.
703   bool AddedSafetyChecks = false;
704 
705   // Holds the end values for each induction variable. We save the end values
706   // so we can later fix-up the external users of the induction variables.
707   DenseMap<PHINode *, Value *> IVEndValues;
708 };
709 
710 class InnerLoopUnroller : public InnerLoopVectorizer {
711 public:
712   InnerLoopUnroller(Loop *OrigLoop, PredicatedScalarEvolution &PSE,
713                     LoopInfo *LI, DominatorTree *DT,
714                     const TargetLibraryInfo *TLI,
715                     const TargetTransformInfo *TTI, AssumptionCache *AC,
716                     OptimizationRemarkEmitter *ORE, unsigned UnrollFactor,
717                     LoopVectorizationLegality *LVL,
718                     LoopVectorizationCostModel *CM)
719       : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 1,
720                             UnrollFactor, LVL, CM) {}
721 
722 private:
723   Value *getBroadcastInstrs(Value *V) override;
724   Value *getStepVector(Value *Val, int StartIdx, Value *Step,
725                        Instruction::BinaryOps Opcode =
726                        Instruction::BinaryOpsEnd) override;
727   Value *reverseVector(Value *Vec) override;
728 };
729 
730 } // end namespace llvm
731 
732 /// Look for a meaningful debug location on the instruction or it's
733 /// operands.
734 static Instruction *getDebugLocFromInstOrOperands(Instruction *I) {
735   if (!I)
736     return I;
737 
738   DebugLoc Empty;
739   if (I->getDebugLoc() != Empty)
740     return I;
741 
742   for (User::op_iterator OI = I->op_begin(), OE = I->op_end(); OI != OE; ++OI) {
743     if (Instruction *OpInst = dyn_cast<Instruction>(*OI))
744       if (OpInst->getDebugLoc() != Empty)
745         return OpInst;
746   }
747 
748   return I;
749 }
750 
751 void InnerLoopVectorizer::setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr) {
752   if (const Instruction *Inst = dyn_cast_or_null<Instruction>(Ptr)) {
753     const DILocation *DIL = Inst->getDebugLoc();
754     if (DIL && Inst->getFunction()->isDebugInfoForProfiling() &&
755         !isa<DbgInfoIntrinsic>(Inst))
756       B.SetCurrentDebugLocation(DIL->cloneWithDuplicationFactor(UF * VF));
757     else
758       B.SetCurrentDebugLocation(DIL);
759   } else
760     B.SetCurrentDebugLocation(DebugLoc());
761 }
762 
763 #ifndef NDEBUG
764 /// \return string containing a file name and a line # for the given loop.
765 static std::string getDebugLocString(const Loop *L) {
766   std::string Result;
767   if (L) {
768     raw_string_ostream OS(Result);
769     if (const DebugLoc LoopDbgLoc = L->getStartLoc())
770       LoopDbgLoc.print(OS);
771     else
772       // Just print the module name.
773       OS << L->getHeader()->getParent()->getParent()->getModuleIdentifier();
774     OS.flush();
775   }
776   return Result;
777 }
778 #endif
779 
780 void InnerLoopVectorizer::addNewMetadata(Instruction *To,
781                                          const Instruction *Orig) {
782   // If the loop was versioned with memchecks, add the corresponding no-alias
783   // metadata.
784   if (LVer && (isa<LoadInst>(Orig) || isa<StoreInst>(Orig)))
785     LVer->annotateInstWithNoAlias(To, Orig);
786 }
787 
788 void InnerLoopVectorizer::addMetadata(Instruction *To,
789                                       Instruction *From) {
790   propagateMetadata(To, From);
791   addNewMetadata(To, From);
792 }
793 
794 void InnerLoopVectorizer::addMetadata(ArrayRef<Value *> To,
795                                       Instruction *From) {
796   for (Value *V : To) {
797     if (Instruction *I = dyn_cast<Instruction>(V))
798       addMetadata(I, From);
799   }
800 }
801 
802 namespace llvm {
803 
804 /// The group of interleaved loads/stores sharing the same stride and
805 /// close to each other.
806 ///
807 /// Each member in this group has an index starting from 0, and the largest
808 /// index should be less than interleaved factor, which is equal to the absolute
809 /// value of the access's stride.
810 ///
811 /// E.g. An interleaved load group of factor 4:
812 ///        for (unsigned i = 0; i < 1024; i+=4) {
813 ///          a = A[i];                           // Member of index 0
814 ///          b = A[i+1];                         // Member of index 1
815 ///          d = A[i+3];                         // Member of index 3
816 ///          ...
817 ///        }
818 ///
819 ///      An interleaved store group of factor 4:
820 ///        for (unsigned i = 0; i < 1024; i+=4) {
821 ///          ...
822 ///          A[i]   = a;                         // Member of index 0
823 ///          A[i+1] = b;                         // Member of index 1
824 ///          A[i+2] = c;                         // Member of index 2
825 ///          A[i+3] = d;                         // Member of index 3
826 ///        }
827 ///
828 /// Note: the interleaved load group could have gaps (missing members), but
829 /// the interleaved store group doesn't allow gaps.
830 class InterleaveGroup {
831 public:
832   InterleaveGroup(Instruction *Instr, int Stride, unsigned Align)
833       : Align(Align), InsertPos(Instr) {
834     assert(Align && "The alignment should be non-zero");
835 
836     Factor = std::abs(Stride);
837     assert(Factor > 1 && "Invalid interleave factor");
838 
839     Reverse = Stride < 0;
840     Members[0] = Instr;
841   }
842 
843   bool isReverse() const { return Reverse; }
844   unsigned getFactor() const { return Factor; }
845   unsigned getAlignment() const { return Align; }
846   unsigned getNumMembers() const { return Members.size(); }
847 
848   /// Try to insert a new member \p Instr with index \p Index and
849   /// alignment \p NewAlign. The index is related to the leader and it could be
850   /// negative if it is the new leader.
851   ///
852   /// \returns false if the instruction doesn't belong to the group.
853   bool insertMember(Instruction *Instr, int Index, unsigned NewAlign) {
854     assert(NewAlign && "The new member's alignment should be non-zero");
855 
856     int Key = Index + SmallestKey;
857 
858     // Skip if there is already a member with the same index.
859     if (Members.find(Key) != Members.end())
860       return false;
861 
862     if (Key > LargestKey) {
863       // The largest index is always less than the interleave factor.
864       if (Index >= static_cast<int>(Factor))
865         return false;
866 
867       LargestKey = Key;
868     } else if (Key < SmallestKey) {
869       // The largest index is always less than the interleave factor.
870       if (LargestKey - Key >= static_cast<int>(Factor))
871         return false;
872 
873       SmallestKey = Key;
874     }
875 
876     // It's always safe to select the minimum alignment.
877     Align = std::min(Align, NewAlign);
878     Members[Key] = Instr;
879     return true;
880   }
881 
882   /// Get the member with the given index \p Index
883   ///
884   /// \returns nullptr if contains no such member.
885   Instruction *getMember(unsigned Index) const {
886     int Key = SmallestKey + Index;
887     auto Member = Members.find(Key);
888     if (Member == Members.end())
889       return nullptr;
890 
891     return Member->second;
892   }
893 
894   /// Get the index for the given member. Unlike the key in the member
895   /// map, the index starts from 0.
896   unsigned getIndex(Instruction *Instr) const {
897     for (auto I : Members)
898       if (I.second == Instr)
899         return I.first - SmallestKey;
900 
901     llvm_unreachable("InterleaveGroup contains no such member");
902   }
903 
904   Instruction *getInsertPos() const { return InsertPos; }
905   void setInsertPos(Instruction *Inst) { InsertPos = Inst; }
906 
907   /// Add metadata (e.g. alias info) from the instructions in this group to \p
908   /// NewInst.
909   ///
910   /// FIXME: this function currently does not add noalias metadata a'la
911   /// addNewMedata.  To do that we need to compute the intersection of the
912   /// noalias info from all members.
913   void addMetadata(Instruction *NewInst) const {
914     SmallVector<Value *, 4> VL;
915     std::transform(Members.begin(), Members.end(), std::back_inserter(VL),
916                    [](std::pair<int, Instruction *> p) { return p.second; });
917     propagateMetadata(NewInst, VL);
918   }
919 
920 private:
921   unsigned Factor; // Interleave Factor.
922   bool Reverse;
923   unsigned Align;
924   DenseMap<int, Instruction *> Members;
925   int SmallestKey = 0;
926   int LargestKey = 0;
927 
928   // To avoid breaking dependences, vectorized instructions of an interleave
929   // group should be inserted at either the first load or the last store in
930   // program order.
931   //
932   // E.g. %even = load i32             // Insert Position
933   //      %add = add i32 %even         // Use of %even
934   //      %odd = load i32
935   //
936   //      store i32 %even
937   //      %odd = add i32               // Def of %odd
938   //      store i32 %odd               // Insert Position
939   Instruction *InsertPos;
940 };
941 } // end namespace llvm
942 
943 namespace {
944 
945 /// Drive the analysis of interleaved memory accesses in the loop.
946 ///
947 /// Use this class to analyze interleaved accesses only when we can vectorize
948 /// a loop. Otherwise it's meaningless to do analysis as the vectorization
949 /// on interleaved accesses is unsafe.
950 ///
951 /// The analysis collects interleave groups and records the relationships
952 /// between the member and the group in a map.
953 class InterleavedAccessInfo {
954 public:
955   InterleavedAccessInfo(PredicatedScalarEvolution &PSE, Loop *L,
956                         DominatorTree *DT, LoopInfo *LI,
957                         const LoopAccessInfo *LAI)
958     : PSE(PSE), TheLoop(L), DT(DT), LI(LI), LAI(LAI) {}
959 
960   ~InterleavedAccessInfo() {
961     SmallPtrSet<InterleaveGroup *, 4> DelSet;
962     // Avoid releasing a pointer twice.
963     for (auto &I : InterleaveGroupMap)
964       DelSet.insert(I.second);
965     for (auto *Ptr : DelSet)
966       delete Ptr;
967   }
968 
969   /// Analyze the interleaved accesses and collect them in interleave
970   /// groups. Substitute symbolic strides using \p Strides.
971   void analyzeInterleaving();
972 
973   /// Check if \p Instr belongs to any interleave group.
974   bool isInterleaved(Instruction *Instr) const {
975     return InterleaveGroupMap.find(Instr) != InterleaveGroupMap.end();
976   }
977 
978   /// Get the interleave group that \p Instr belongs to.
979   ///
980   /// \returns nullptr if doesn't have such group.
981   InterleaveGroup *getInterleaveGroup(Instruction *Instr) const {
982     auto Group = InterleaveGroupMap.find(Instr);
983     if (Group == InterleaveGroupMap.end())
984       return nullptr;
985     return Group->second;
986   }
987 
988   /// Returns true if an interleaved group that may access memory
989   /// out-of-bounds requires a scalar epilogue iteration for correctness.
990   bool requiresScalarEpilogue() const { return RequiresScalarEpilogue; }
991 
992 private:
993   /// A wrapper around ScalarEvolution, used to add runtime SCEV checks.
994   /// Simplifies SCEV expressions in the context of existing SCEV assumptions.
995   /// The interleaved access analysis can also add new predicates (for example
996   /// by versioning strides of pointers).
997   PredicatedScalarEvolution &PSE;
998 
999   Loop *TheLoop;
1000   DominatorTree *DT;
1001   LoopInfo *LI;
1002   const LoopAccessInfo *LAI;
1003 
1004   /// True if the loop may contain non-reversed interleaved groups with
1005   /// out-of-bounds accesses. We ensure we don't speculatively access memory
1006   /// out-of-bounds by executing at least one scalar epilogue iteration.
1007   bool RequiresScalarEpilogue = false;
1008 
1009   /// Holds the relationships between the members and the interleave group.
1010   DenseMap<Instruction *, InterleaveGroup *> InterleaveGroupMap;
1011 
1012   /// Holds dependences among the memory accesses in the loop. It maps a source
1013   /// access to a set of dependent sink accesses.
1014   DenseMap<Instruction *, SmallPtrSet<Instruction *, 2>> Dependences;
1015 
1016   /// The descriptor for a strided memory access.
1017   struct StrideDescriptor {
1018     StrideDescriptor() = default;
1019     StrideDescriptor(int64_t Stride, const SCEV *Scev, uint64_t Size,
1020                      unsigned Align)
1021         : Stride(Stride), Scev(Scev), Size(Size), Align(Align) {}
1022 
1023     // The access's stride. It is negative for a reverse access.
1024     int64_t Stride = 0;
1025 
1026     // The scalar expression of this access.
1027     const SCEV *Scev = nullptr;
1028 
1029     // The size of the memory object.
1030     uint64_t Size = 0;
1031 
1032     // The alignment of this access.
1033     unsigned Align = 0;
1034   };
1035 
1036   /// A type for holding instructions and their stride descriptors.
1037   using StrideEntry = std::pair<Instruction *, StrideDescriptor>;
1038 
1039   /// Create a new interleave group with the given instruction \p Instr,
1040   /// stride \p Stride and alignment \p Align.
1041   ///
1042   /// \returns the newly created interleave group.
1043   InterleaveGroup *createInterleaveGroup(Instruction *Instr, int Stride,
1044                                          unsigned Align) {
1045     assert(!isInterleaved(Instr) && "Already in an interleaved access group");
1046     InterleaveGroupMap[Instr] = new InterleaveGroup(Instr, Stride, Align);
1047     return InterleaveGroupMap[Instr];
1048   }
1049 
1050   /// Release the group and remove all the relationships.
1051   void releaseGroup(InterleaveGroup *Group) {
1052     for (unsigned i = 0; i < Group->getFactor(); i++)
1053       if (Instruction *Member = Group->getMember(i))
1054         InterleaveGroupMap.erase(Member);
1055 
1056     delete Group;
1057   }
1058 
1059   /// Collect all the accesses with a constant stride in program order.
1060   void collectConstStrideAccesses(
1061       MapVector<Instruction *, StrideDescriptor> &AccessStrideInfo,
1062       const ValueToValueMap &Strides);
1063 
1064   /// Returns true if \p Stride is allowed in an interleaved group.
1065   static bool isStrided(int Stride) {
1066     unsigned Factor = std::abs(Stride);
1067     return Factor >= 2 && Factor <= MaxInterleaveGroupFactor;
1068   }
1069 
1070   /// Returns true if \p BB is a predicated block.
1071   bool isPredicated(BasicBlock *BB) const {
1072     return LoopAccessInfo::blockNeedsPredication(BB, TheLoop, DT);
1073   }
1074 
1075   /// Returns true if LoopAccessInfo can be used for dependence queries.
1076   bool areDependencesValid() const {
1077     return LAI && LAI->getDepChecker().getDependences();
1078   }
1079 
1080   /// Returns true if memory accesses \p A and \p B can be reordered, if
1081   /// necessary, when constructing interleaved groups.
1082   ///
1083   /// \p A must precede \p B in program order. We return false if reordering is
1084   /// not necessary or is prevented because \p A and \p B may be dependent.
1085   bool canReorderMemAccessesForInterleavedGroups(StrideEntry *A,
1086                                                  StrideEntry *B) const {
1087     // Code motion for interleaved accesses can potentially hoist strided loads
1088     // and sink strided stores. The code below checks the legality of the
1089     // following two conditions:
1090     //
1091     // 1. Potentially moving a strided load (B) before any store (A) that
1092     //    precedes B, or
1093     //
1094     // 2. Potentially moving a strided store (A) after any load or store (B)
1095     //    that A precedes.
1096     //
1097     // It's legal to reorder A and B if we know there isn't a dependence from A
1098     // to B. Note that this determination is conservative since some
1099     // dependences could potentially be reordered safely.
1100 
1101     // A is potentially the source of a dependence.
1102     auto *Src = A->first;
1103     auto SrcDes = A->second;
1104 
1105     // B is potentially the sink of a dependence.
1106     auto *Sink = B->first;
1107     auto SinkDes = B->second;
1108 
1109     // Code motion for interleaved accesses can't violate WAR dependences.
1110     // Thus, reordering is legal if the source isn't a write.
1111     if (!Src->mayWriteToMemory())
1112       return true;
1113 
1114     // At least one of the accesses must be strided.
1115     if (!isStrided(SrcDes.Stride) && !isStrided(SinkDes.Stride))
1116       return true;
1117 
1118     // If dependence information is not available from LoopAccessInfo,
1119     // conservatively assume the instructions can't be reordered.
1120     if (!areDependencesValid())
1121       return false;
1122 
1123     // If we know there is a dependence from source to sink, assume the
1124     // instructions can't be reordered. Otherwise, reordering is legal.
1125     return Dependences.find(Src) == Dependences.end() ||
1126            !Dependences.lookup(Src).count(Sink);
1127   }
1128 
1129   /// Collect the dependences from LoopAccessInfo.
1130   ///
1131   /// We process the dependences once during the interleaved access analysis to
1132   /// enable constant-time dependence queries.
1133   void collectDependences() {
1134     if (!areDependencesValid())
1135       return;
1136     auto *Deps = LAI->getDepChecker().getDependences();
1137     for (auto Dep : *Deps)
1138       Dependences[Dep.getSource(*LAI)].insert(Dep.getDestination(*LAI));
1139   }
1140 };
1141 
1142 } // end anonymous namespace
1143 
1144 static void emitMissedWarning(Function *F, Loop *L,
1145                               const LoopVectorizeHints &LH,
1146                               OptimizationRemarkEmitter *ORE) {
1147   LH.emitRemarkWithHints();
1148 
1149   if (LH.getForce() == LoopVectorizeHints::FK_Enabled) {
1150     if (LH.getWidth() != 1)
1151       ORE->emit(DiagnosticInfoOptimizationFailure(
1152                     DEBUG_TYPE, "FailedRequestedVectorization",
1153                     L->getStartLoc(), L->getHeader())
1154                 << "loop not vectorized: "
1155                 << "failed explicitly specified loop vectorization");
1156     else if (LH.getInterleave() != 1)
1157       ORE->emit(DiagnosticInfoOptimizationFailure(
1158                     DEBUG_TYPE, "FailedRequestedInterleaving", L->getStartLoc(),
1159                     L->getHeader())
1160                 << "loop not interleaved: "
1161                 << "failed explicitly specified loop interleaving");
1162   }
1163 }
1164 
1165 namespace llvm {
1166 
1167 /// LoopVectorizationCostModel - estimates the expected speedups due to
1168 /// vectorization.
1169 /// In many cases vectorization is not profitable. This can happen because of
1170 /// a number of reasons. In this class we mainly attempt to predict the
1171 /// expected speedup/slowdowns due to the supported instruction set. We use the
1172 /// TargetTransformInfo to query the different backends for the cost of
1173 /// different operations.
1174 class LoopVectorizationCostModel {
1175 public:
1176   LoopVectorizationCostModel(Loop *L, PredicatedScalarEvolution &PSE,
1177                              LoopInfo *LI, LoopVectorizationLegality *Legal,
1178                              const TargetTransformInfo &TTI,
1179                              const TargetLibraryInfo *TLI, DemandedBits *DB,
1180                              AssumptionCache *AC,
1181                              OptimizationRemarkEmitter *ORE, const Function *F,
1182                              const LoopVectorizeHints *Hints,
1183                              InterleavedAccessInfo &IAI)
1184       : TheLoop(L), PSE(PSE), LI(LI), Legal(Legal), TTI(TTI), TLI(TLI), DB(DB),
1185     AC(AC), ORE(ORE), TheFunction(F), Hints(Hints), InterleaveInfo(IAI) {}
1186 
1187   /// \return An upper bound for the vectorization factor, or None if
1188   /// vectorization should be avoided up front.
1189   Optional<unsigned> computeMaxVF(bool OptForSize);
1190 
1191   /// \return The most profitable vectorization factor and the cost of that VF.
1192   /// This method checks every power of two up to MaxVF. If UserVF is not ZERO
1193   /// then this vectorization factor will be selected if vectorization is
1194   /// possible.
1195   VectorizationFactor selectVectorizationFactor(unsigned MaxVF);
1196 
1197   /// Setup cost-based decisions for user vectorization factor.
1198   void selectUserVectorizationFactor(unsigned UserVF) {
1199     collectUniformsAndScalars(UserVF);
1200     collectInstsToScalarize(UserVF);
1201   }
1202 
1203   /// \return The size (in bits) of the smallest and widest types in the code
1204   /// that needs to be vectorized. We ignore values that remain scalar such as
1205   /// 64 bit loop indices.
1206   std::pair<unsigned, unsigned> getSmallestAndWidestTypes();
1207 
1208   /// \return The desired interleave count.
1209   /// If interleave count has been specified by metadata it will be returned.
1210   /// Otherwise, the interleave count is computed and returned. VF and LoopCost
1211   /// are the selected vectorization factor and the cost of the selected VF.
1212   unsigned selectInterleaveCount(bool OptForSize, unsigned VF,
1213                                  unsigned LoopCost);
1214 
1215   /// Memory access instruction may be vectorized in more than one way.
1216   /// Form of instruction after vectorization depends on cost.
1217   /// This function takes cost-based decisions for Load/Store instructions
1218   /// and collects them in a map. This decisions map is used for building
1219   /// the lists of loop-uniform and loop-scalar instructions.
1220   /// The calculated cost is saved with widening decision in order to
1221   /// avoid redundant calculations.
1222   void setCostBasedWideningDecision(unsigned VF);
1223 
1224   /// A struct that represents some properties of the register usage
1225   /// of a loop.
1226   struct RegisterUsage {
1227     /// Holds the number of loop invariant values that are used in the loop.
1228     unsigned LoopInvariantRegs;
1229 
1230     /// Holds the maximum number of concurrent live intervals in the loop.
1231     unsigned MaxLocalUsers;
1232   };
1233 
1234   /// \return Returns information about the register usages of the loop for the
1235   /// given vectorization factors.
1236   SmallVector<RegisterUsage, 8> calculateRegisterUsage(ArrayRef<unsigned> VFs);
1237 
1238   /// Collect values we want to ignore in the cost model.
1239   void collectValuesToIgnore();
1240 
1241   /// \returns The smallest bitwidth each instruction can be represented with.
1242   /// The vector equivalents of these instructions should be truncated to this
1243   /// type.
1244   const MapVector<Instruction *, uint64_t> &getMinimalBitwidths() const {
1245     return MinBWs;
1246   }
1247 
1248   /// \returns True if it is more profitable to scalarize instruction \p I for
1249   /// vectorization factor \p VF.
1250   bool isProfitableToScalarize(Instruction *I, unsigned VF) const {
1251     assert(VF > 1 && "Profitable to scalarize relevant only for VF > 1.");
1252     auto Scalars = InstsToScalarize.find(VF);
1253     assert(Scalars != InstsToScalarize.end() &&
1254            "VF not yet analyzed for scalarization profitability");
1255     return Scalars->second.find(I) != Scalars->second.end();
1256   }
1257 
1258   /// Returns true if \p I is known to be uniform after vectorization.
1259   bool isUniformAfterVectorization(Instruction *I, unsigned VF) const {
1260     if (VF == 1)
1261       return true;
1262     auto UniformsPerVF = Uniforms.find(VF);
1263     assert(UniformsPerVF != Uniforms.end() &&
1264            "VF not yet analyzed for uniformity");
1265     return UniformsPerVF->second.find(I) != UniformsPerVF->second.end();
1266   }
1267 
1268   /// Returns true if \p I is known to be scalar after vectorization.
1269   bool isScalarAfterVectorization(Instruction *I, unsigned VF) const {
1270     if (VF == 1)
1271       return true;
1272     auto ScalarsPerVF = Scalars.find(VF);
1273     assert(ScalarsPerVF != Scalars.end() &&
1274            "Scalar values are not calculated for VF");
1275     return ScalarsPerVF->second.find(I) != ScalarsPerVF->second.end();
1276   }
1277 
1278   /// \returns True if instruction \p I can be truncated to a smaller bitwidth
1279   /// for vectorization factor \p VF.
1280   bool canTruncateToMinimalBitwidth(Instruction *I, unsigned VF) const {
1281     return VF > 1 && MinBWs.find(I) != MinBWs.end() &&
1282            !isProfitableToScalarize(I, VF) &&
1283            !isScalarAfterVectorization(I, VF);
1284   }
1285 
1286   /// Decision that was taken during cost calculation for memory instruction.
1287   enum InstWidening {
1288     CM_Unknown,
1289     CM_Widen,         // For consecutive accesses with stride +1.
1290     CM_Widen_Reverse, // For consecutive accesses with stride -1.
1291     CM_Interleave,
1292     CM_GatherScatter,
1293     CM_Scalarize
1294   };
1295 
1296   /// Save vectorization decision \p W and \p Cost taken by the cost model for
1297   /// instruction \p I and vector width \p VF.
1298   void setWideningDecision(Instruction *I, unsigned VF, InstWidening W,
1299                            unsigned Cost) {
1300     assert(VF >= 2 && "Expected VF >=2");
1301     WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost);
1302   }
1303 
1304   /// Save vectorization decision \p W and \p Cost taken by the cost model for
1305   /// interleaving group \p Grp and vector width \p VF.
1306   void setWideningDecision(const InterleaveGroup *Grp, unsigned VF,
1307                            InstWidening W, unsigned Cost) {
1308     assert(VF >= 2 && "Expected VF >=2");
1309     /// Broadcast this decicion to all instructions inside the group.
1310     /// But the cost will be assigned to one instruction only.
1311     for (unsigned i = 0; i < Grp->getFactor(); ++i) {
1312       if (auto *I = Grp->getMember(i)) {
1313         if (Grp->getInsertPos() == I)
1314           WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost);
1315         else
1316           WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, 0);
1317       }
1318     }
1319   }
1320 
1321   /// Return the cost model decision for the given instruction \p I and vector
1322   /// width \p VF. Return CM_Unknown if this instruction did not pass
1323   /// through the cost modeling.
1324   InstWidening getWideningDecision(Instruction *I, unsigned VF) {
1325     assert(VF >= 2 && "Expected VF >=2");
1326     std::pair<Instruction *, unsigned> InstOnVF = std::make_pair(I, VF);
1327     auto Itr = WideningDecisions.find(InstOnVF);
1328     if (Itr == WideningDecisions.end())
1329       return CM_Unknown;
1330     return Itr->second.first;
1331   }
1332 
1333   /// Return the vectorization cost for the given instruction \p I and vector
1334   /// width \p VF.
1335   unsigned getWideningCost(Instruction *I, unsigned VF) {
1336     assert(VF >= 2 && "Expected VF >=2");
1337     std::pair<Instruction *, unsigned> InstOnVF = std::make_pair(I, VF);
1338     assert(WideningDecisions.find(InstOnVF) != WideningDecisions.end() &&
1339            "The cost is not calculated");
1340     return WideningDecisions[InstOnVF].second;
1341   }
1342 
1343   /// Return True if instruction \p I is an optimizable truncate whose operand
1344   /// is an induction variable. Such a truncate will be removed by adding a new
1345   /// induction variable with the destination type.
1346   bool isOptimizableIVTruncate(Instruction *I, unsigned VF) {
1347     // If the instruction is not a truncate, return false.
1348     auto *Trunc = dyn_cast<TruncInst>(I);
1349     if (!Trunc)
1350       return false;
1351 
1352     // Get the source and destination types of the truncate.
1353     Type *SrcTy = ToVectorTy(cast<CastInst>(I)->getSrcTy(), VF);
1354     Type *DestTy = ToVectorTy(cast<CastInst>(I)->getDestTy(), VF);
1355 
1356     // If the truncate is free for the given types, return false. Replacing a
1357     // free truncate with an induction variable would add an induction variable
1358     // update instruction to each iteration of the loop. We exclude from this
1359     // check the primary induction variable since it will need an update
1360     // instruction regardless.
1361     Value *Op = Trunc->getOperand(0);
1362     if (Op != Legal->getPrimaryInduction() && TTI.isTruncateFree(SrcTy, DestTy))
1363       return false;
1364 
1365     // If the truncated value is not an induction variable, return false.
1366     return Legal->isInductionPhi(Op);
1367   }
1368 
1369   /// Collects the instructions to scalarize for each predicated instruction in
1370   /// the loop.
1371   void collectInstsToScalarize(unsigned VF);
1372 
1373   /// Collect Uniform and Scalar values for the given \p VF.
1374   /// The sets depend on CM decision for Load/Store instructions
1375   /// that may be vectorized as interleave, gather-scatter or scalarized.
1376   void collectUniformsAndScalars(unsigned VF) {
1377     // Do the analysis once.
1378     if (VF == 1 || Uniforms.find(VF) != Uniforms.end())
1379       return;
1380     setCostBasedWideningDecision(VF);
1381     collectLoopUniforms(VF);
1382     collectLoopScalars(VF);
1383   }
1384 
1385   /// Returns true if the target machine supports masked store operation
1386   /// for the given \p DataType and kind of access to \p Ptr.
1387   bool isLegalMaskedStore(Type *DataType, Value *Ptr) {
1388     return Legal->isConsecutivePtr(Ptr) && TTI.isLegalMaskedStore(DataType);
1389   }
1390 
1391   /// Returns true if the target machine supports masked load operation
1392   /// for the given \p DataType and kind of access to \p Ptr.
1393   bool isLegalMaskedLoad(Type *DataType, Value *Ptr) {
1394     return Legal->isConsecutivePtr(Ptr) && TTI.isLegalMaskedLoad(DataType);
1395   }
1396 
1397   /// Returns true if the target machine supports masked scatter operation
1398   /// for the given \p DataType.
1399   bool isLegalMaskedScatter(Type *DataType) {
1400     return TTI.isLegalMaskedScatter(DataType);
1401   }
1402 
1403   /// Returns true if the target machine supports masked gather operation
1404   /// for the given \p DataType.
1405   bool isLegalMaskedGather(Type *DataType) {
1406     return TTI.isLegalMaskedGather(DataType);
1407   }
1408 
1409   /// Returns true if the target machine can represent \p V as a masked gather
1410   /// or scatter operation.
1411   bool isLegalGatherOrScatter(Value *V) {
1412     bool LI = isa<LoadInst>(V);
1413     bool SI = isa<StoreInst>(V);
1414     if (!LI && !SI)
1415       return false;
1416     auto *Ty = getMemInstValueType(V);
1417     return (LI && isLegalMaskedGather(Ty)) || (SI && isLegalMaskedScatter(Ty));
1418   }
1419 
1420   /// Returns true if \p I is an instruction that will be scalarized with
1421   /// predication. Such instructions include conditional stores and
1422   /// instructions that may divide by zero.
1423   bool isScalarWithPredication(Instruction *I);
1424 
1425   /// Returns true if \p I is a memory instruction with consecutive memory
1426   /// access that can be widened.
1427   bool memoryInstructionCanBeWidened(Instruction *I, unsigned VF = 1);
1428 
1429   /// Check if \p Instr belongs to any interleaved access group.
1430   bool isAccessInterleaved(Instruction *Instr) {
1431     return InterleaveInfo.isInterleaved(Instr);
1432   }
1433 
1434   /// Get the interleaved access group that \p Instr belongs to.
1435   const InterleaveGroup *getInterleavedAccessGroup(Instruction *Instr) {
1436     return InterleaveInfo.getInterleaveGroup(Instr);
1437   }
1438 
1439   /// Returns true if an interleaved group requires a scalar iteration
1440   /// to handle accesses with gaps.
1441   bool requiresScalarEpilogue() const {
1442     return InterleaveInfo.requiresScalarEpilogue();
1443   }
1444 
1445 private:
1446   unsigned NumPredStores = 0;
1447 
1448   /// \return An upper bound for the vectorization factor, larger than zero.
1449   /// One is returned if vectorization should best be avoided due to cost.
1450   unsigned computeFeasibleMaxVF(bool OptForSize, unsigned ConstTripCount);
1451 
1452   /// The vectorization cost is a combination of the cost itself and a boolean
1453   /// indicating whether any of the contributing operations will actually
1454   /// operate on
1455   /// vector values after type legalization in the backend. If this latter value
1456   /// is
1457   /// false, then all operations will be scalarized (i.e. no vectorization has
1458   /// actually taken place).
1459   using VectorizationCostTy = std::pair<unsigned, bool>;
1460 
1461   /// Returns the expected execution cost. The unit of the cost does
1462   /// not matter because we use the 'cost' units to compare different
1463   /// vector widths. The cost that is returned is *not* normalized by
1464   /// the factor width.
1465   VectorizationCostTy expectedCost(unsigned VF);
1466 
1467   /// Returns the execution time cost of an instruction for a given vector
1468   /// width. Vector width of one means scalar.
1469   VectorizationCostTy getInstructionCost(Instruction *I, unsigned VF);
1470 
1471   /// The cost-computation logic from getInstructionCost which provides
1472   /// the vector type as an output parameter.
1473   unsigned getInstructionCost(Instruction *I, unsigned VF, Type *&VectorTy);
1474 
1475   /// Calculate vectorization cost of memory instruction \p I.
1476   unsigned getMemoryInstructionCost(Instruction *I, unsigned VF);
1477 
1478   /// The cost computation for scalarized memory instruction.
1479   unsigned getMemInstScalarizationCost(Instruction *I, unsigned VF);
1480 
1481   /// The cost computation for interleaving group of memory instructions.
1482   unsigned getInterleaveGroupCost(Instruction *I, unsigned VF);
1483 
1484   /// The cost computation for Gather/Scatter instruction.
1485   unsigned getGatherScatterCost(Instruction *I, unsigned VF);
1486 
1487   /// The cost computation for widening instruction \p I with consecutive
1488   /// memory access.
1489   unsigned getConsecutiveMemOpCost(Instruction *I, unsigned VF);
1490 
1491   /// The cost calculation for Load instruction \p I with uniform pointer -
1492   /// scalar load + broadcast.
1493   unsigned getUniformMemOpCost(Instruction *I, unsigned VF);
1494 
1495   /// Returns whether the instruction is a load or store and will be a emitted
1496   /// as a vector operation.
1497   bool isConsecutiveLoadOrStore(Instruction *I);
1498 
1499   /// Returns true if an artificially high cost for emulated masked memrefs
1500   /// should be used.
1501   bool useEmulatedMaskMemRefHack(Instruction *I);
1502 
1503   /// Create an analysis remark that explains why vectorization failed
1504   ///
1505   /// \p RemarkName is the identifier for the remark.  \return the remark object
1506   /// that can be streamed to.
1507   OptimizationRemarkAnalysis createMissedAnalysis(StringRef RemarkName) {
1508     return createLVMissedAnalysis(Hints->vectorizeAnalysisPassName(),
1509                                   RemarkName, TheLoop);
1510   }
1511 
1512   /// Map of scalar integer values to the smallest bitwidth they can be legally
1513   /// represented as. The vector equivalents of these values should be truncated
1514   /// to this type.
1515   MapVector<Instruction *, uint64_t> MinBWs;
1516 
1517   /// A type representing the costs for instructions if they were to be
1518   /// scalarized rather than vectorized. The entries are Instruction-Cost
1519   /// pairs.
1520   using ScalarCostsTy = DenseMap<Instruction *, unsigned>;
1521 
1522   /// A set containing all BasicBlocks that are known to present after
1523   /// vectorization as a predicated block.
1524   SmallPtrSet<BasicBlock *, 4> PredicatedBBsAfterVectorization;
1525 
1526   /// A map holding scalar costs for different vectorization factors. The
1527   /// presence of a cost for an instruction in the mapping indicates that the
1528   /// instruction will be scalarized when vectorizing with the associated
1529   /// vectorization factor. The entries are VF-ScalarCostTy pairs.
1530   DenseMap<unsigned, ScalarCostsTy> InstsToScalarize;
1531 
1532   /// Holds the instructions known to be uniform after vectorization.
1533   /// The data is collected per VF.
1534   DenseMap<unsigned, SmallPtrSet<Instruction *, 4>> Uniforms;
1535 
1536   /// Holds the instructions known to be scalar after vectorization.
1537   /// The data is collected per VF.
1538   DenseMap<unsigned, SmallPtrSet<Instruction *, 4>> Scalars;
1539 
1540   /// Holds the instructions (address computations) that are forced to be
1541   /// scalarized.
1542   DenseMap<unsigned, SmallPtrSet<Instruction *, 4>> ForcedScalars;
1543 
1544   /// Returns the expected difference in cost from scalarizing the expression
1545   /// feeding a predicated instruction \p PredInst. The instructions to
1546   /// scalarize and their scalar costs are collected in \p ScalarCosts. A
1547   /// non-negative return value implies the expression will be scalarized.
1548   /// Currently, only single-use chains are considered for scalarization.
1549   int computePredInstDiscount(Instruction *PredInst, ScalarCostsTy &ScalarCosts,
1550                               unsigned VF);
1551 
1552   /// Collect the instructions that are uniform after vectorization. An
1553   /// instruction is uniform if we represent it with a single scalar value in
1554   /// the vectorized loop corresponding to each vector iteration. Examples of
1555   /// uniform instructions include pointer operands of consecutive or
1556   /// interleaved memory accesses. Note that although uniformity implies an
1557   /// instruction will be scalar, the reverse is not true. In general, a
1558   /// scalarized instruction will be represented by VF scalar values in the
1559   /// vectorized loop, each corresponding to an iteration of the original
1560   /// scalar loop.
1561   void collectLoopUniforms(unsigned VF);
1562 
1563   /// Collect the instructions that are scalar after vectorization. An
1564   /// instruction is scalar if it is known to be uniform or will be scalarized
1565   /// during vectorization. Non-uniform scalarized instructions will be
1566   /// represented by VF values in the vectorized loop, each corresponding to an
1567   /// iteration of the original scalar loop.
1568   void collectLoopScalars(unsigned VF);
1569 
1570   /// Keeps cost model vectorization decision and cost for instructions.
1571   /// Right now it is used for memory instructions only.
1572   using DecisionList = DenseMap<std::pair<Instruction *, unsigned>,
1573                                 std::pair<InstWidening, unsigned>>;
1574 
1575   DecisionList WideningDecisions;
1576 
1577 public:
1578   /// The loop that we evaluate.
1579   Loop *TheLoop;
1580 
1581   /// Predicated scalar evolution analysis.
1582   PredicatedScalarEvolution &PSE;
1583 
1584   /// Loop Info analysis.
1585   LoopInfo *LI;
1586 
1587   /// Vectorization legality.
1588   LoopVectorizationLegality *Legal;
1589 
1590   /// Vector target information.
1591   const TargetTransformInfo &TTI;
1592 
1593   /// Target Library Info.
1594   const TargetLibraryInfo *TLI;
1595 
1596   /// Demanded bits analysis.
1597   DemandedBits *DB;
1598 
1599   /// Assumption cache.
1600   AssumptionCache *AC;
1601 
1602   /// Interface to emit optimization remarks.
1603   OptimizationRemarkEmitter *ORE;
1604 
1605   const Function *TheFunction;
1606 
1607   /// Loop Vectorize Hint.
1608   const LoopVectorizeHints *Hints;
1609 
1610   /// The interleave access information contains groups of interleaved accesses
1611   /// with the same stride and close to each other.
1612   InterleavedAccessInfo &InterleaveInfo;
1613 
1614   /// Values to ignore in the cost model.
1615   SmallPtrSet<const Value *, 16> ValuesToIgnore;
1616 
1617   /// Values to ignore in the cost model when VF > 1.
1618   SmallPtrSet<const Value *, 16> VecValuesToIgnore;
1619 };
1620 
1621 } // end namespace llvm
1622 
1623 // Return true if \p OuterLp is an outer loop annotated with hints for explicit
1624 // vectorization. The loop needs to be annotated with #pragma omp simd
1625 // simdlen(#) or #pragma clang vectorize(enable) vectorize_width(#). If the
1626 // vector length information is not provided, vectorization is not considered
1627 // explicit. Interleave hints are not allowed either. These limitations will be
1628 // relaxed in the future.
1629 // Please, note that we are currently forced to abuse the pragma 'clang
1630 // vectorize' semantics. This pragma provides *auto-vectorization hints*
1631 // (i.e., LV must check that vectorization is legal) whereas pragma 'omp simd'
1632 // provides *explicit vectorization hints* (LV can bypass legal checks and
1633 // assume that vectorization is legal). However, both hints are implemented
1634 // using the same metadata (llvm.loop.vectorize, processed by
1635 // LoopVectorizeHints). This will be fixed in the future when the native IR
1636 // representation for pragma 'omp simd' is introduced.
1637 static bool isExplicitVecOuterLoop(Loop *OuterLp,
1638                                    OptimizationRemarkEmitter *ORE) {
1639   assert(!OuterLp->empty() && "This is not an outer loop");
1640   LoopVectorizeHints Hints(OuterLp, true /*DisableInterleaving*/, *ORE);
1641 
1642   // Only outer loops with an explicit vectorization hint are supported.
1643   // Unannotated outer loops are ignored.
1644   if (Hints.getForce() == LoopVectorizeHints::FK_Undefined)
1645     return false;
1646 
1647   Function *Fn = OuterLp->getHeader()->getParent();
1648   if (!Hints.allowVectorization(Fn, OuterLp, false /*AlwaysVectorize*/)) {
1649     LLVM_DEBUG(dbgs() << "LV: Loop hints prevent outer loop vectorization.\n");
1650     return false;
1651   }
1652 
1653   if (!Hints.getWidth()) {
1654     LLVM_DEBUG(dbgs() << "LV: Not vectorizing: No user vector width.\n");
1655     emitMissedWarning(Fn, OuterLp, Hints, ORE);
1656     return false;
1657   }
1658 
1659   if (Hints.getInterleave() > 1) {
1660     // TODO: Interleave support is future work.
1661     LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Interleave is not supported for "
1662                          "outer loops.\n");
1663     emitMissedWarning(Fn, OuterLp, Hints, ORE);
1664     return false;
1665   }
1666 
1667   return true;
1668 }
1669 
1670 static void collectSupportedLoops(Loop &L, LoopInfo *LI,
1671                                   OptimizationRemarkEmitter *ORE,
1672                                   SmallVectorImpl<Loop *> &V) {
1673   // Collect inner loops and outer loops without irreducible control flow. For
1674   // now, only collect outer loops that have explicit vectorization hints. If we
1675   // are stress testing the VPlan H-CFG construction, we collect the outermost
1676   // loop of every loop nest.
1677   if (L.empty() || VPlanBuildStressTest ||
1678       (EnableVPlanNativePath && isExplicitVecOuterLoop(&L, ORE))) {
1679     LoopBlocksRPO RPOT(&L);
1680     RPOT.perform(LI);
1681     if (!containsIrreducibleCFG<const BasicBlock *>(RPOT, *LI)) {
1682       V.push_back(&L);
1683       // TODO: Collect inner loops inside marked outer loops in case
1684       // vectorization fails for the outer loop. Do not invoke
1685       // 'containsIrreducibleCFG' again for inner loops when the outer loop is
1686       // already known to be reducible. We can use an inherited attribute for
1687       // that.
1688       return;
1689     }
1690   }
1691   for (Loop *InnerL : L)
1692     collectSupportedLoops(*InnerL, LI, ORE, V);
1693 }
1694 
1695 namespace {
1696 
1697 /// The LoopVectorize Pass.
1698 struct LoopVectorize : public FunctionPass {
1699   /// Pass identification, replacement for typeid
1700   static char ID;
1701 
1702   LoopVectorizePass Impl;
1703 
1704   explicit LoopVectorize(bool NoUnrolling = false, bool AlwaysVectorize = true)
1705       : FunctionPass(ID) {
1706     Impl.DisableUnrolling = NoUnrolling;
1707     Impl.AlwaysVectorize = AlwaysVectorize;
1708     initializeLoopVectorizePass(*PassRegistry::getPassRegistry());
1709   }
1710 
1711   bool runOnFunction(Function &F) override {
1712     if (skipFunction(F))
1713       return false;
1714 
1715     auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
1716     auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
1717     auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
1718     auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
1719     auto *BFI = &getAnalysis<BlockFrequencyInfoWrapperPass>().getBFI();
1720     auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
1721     auto *TLI = TLIP ? &TLIP->getTLI() : nullptr;
1722     auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
1723     auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
1724     auto *LAA = &getAnalysis<LoopAccessLegacyAnalysis>();
1725     auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits();
1726     auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE();
1727 
1728     std::function<const LoopAccessInfo &(Loop &)> GetLAA =
1729         [&](Loop &L) -> const LoopAccessInfo & { return LAA->getInfo(&L); };
1730 
1731     return Impl.runImpl(F, *SE, *LI, *TTI, *DT, *BFI, TLI, *DB, *AA, *AC,
1732                         GetLAA, *ORE);
1733   }
1734 
1735   void getAnalysisUsage(AnalysisUsage &AU) const override {
1736     AU.addRequired<AssumptionCacheTracker>();
1737     AU.addRequired<BlockFrequencyInfoWrapperPass>();
1738     AU.addRequired<DominatorTreeWrapperPass>();
1739     AU.addRequired<LoopInfoWrapperPass>();
1740     AU.addRequired<ScalarEvolutionWrapperPass>();
1741     AU.addRequired<TargetTransformInfoWrapperPass>();
1742     AU.addRequired<AAResultsWrapperPass>();
1743     AU.addRequired<LoopAccessLegacyAnalysis>();
1744     AU.addRequired<DemandedBitsWrapperPass>();
1745     AU.addRequired<OptimizationRemarkEmitterWrapperPass>();
1746     AU.addPreserved<LoopInfoWrapperPass>();
1747     AU.addPreserved<DominatorTreeWrapperPass>();
1748     AU.addPreserved<BasicAAWrapperPass>();
1749     AU.addPreserved<GlobalsAAWrapperPass>();
1750   }
1751 };
1752 
1753 } // end anonymous namespace
1754 
1755 //===----------------------------------------------------------------------===//
1756 // Implementation of LoopVectorizationLegality, InnerLoopVectorizer and
1757 // LoopVectorizationCostModel and LoopVectorizationPlanner.
1758 //===----------------------------------------------------------------------===//
1759 
1760 Value *InnerLoopVectorizer::getBroadcastInstrs(Value *V) {
1761   // We need to place the broadcast of invariant variables outside the loop,
1762   // but only if it's proven safe to do so. Else, broadcast will be inside
1763   // vector loop body.
1764   Instruction *Instr = dyn_cast<Instruction>(V);
1765   bool SafeToHoist = OrigLoop->isLoopInvariant(V) &&
1766                      (!Instr ||
1767                       DT->dominates(Instr->getParent(), LoopVectorPreHeader));
1768   // Place the code for broadcasting invariant variables in the new preheader.
1769   IRBuilder<>::InsertPointGuard Guard(Builder);
1770   if (SafeToHoist)
1771     Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
1772 
1773   // Broadcast the scalar into all locations in the vector.
1774   Value *Shuf = Builder.CreateVectorSplat(VF, V, "broadcast");
1775 
1776   return Shuf;
1777 }
1778 
1779 void InnerLoopVectorizer::createVectorIntOrFpInductionPHI(
1780     const InductionDescriptor &II, Value *Step, Instruction *EntryVal) {
1781   assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) &&
1782          "Expected either an induction phi-node or a truncate of it!");
1783   Value *Start = II.getStartValue();
1784 
1785   // Construct the initial value of the vector IV in the vector loop preheader
1786   auto CurrIP = Builder.saveIP();
1787   Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
1788   if (isa<TruncInst>(EntryVal)) {
1789     assert(Start->getType()->isIntegerTy() &&
1790            "Truncation requires an integer type");
1791     auto *TruncType = cast<IntegerType>(EntryVal->getType());
1792     Step = Builder.CreateTrunc(Step, TruncType);
1793     Start = Builder.CreateCast(Instruction::Trunc, Start, TruncType);
1794   }
1795   Value *SplatStart = Builder.CreateVectorSplat(VF, Start);
1796   Value *SteppedStart =
1797       getStepVector(SplatStart, 0, Step, II.getInductionOpcode());
1798 
1799   // We create vector phi nodes for both integer and floating-point induction
1800   // variables. Here, we determine the kind of arithmetic we will perform.
1801   Instruction::BinaryOps AddOp;
1802   Instruction::BinaryOps MulOp;
1803   if (Step->getType()->isIntegerTy()) {
1804     AddOp = Instruction::Add;
1805     MulOp = Instruction::Mul;
1806   } else {
1807     AddOp = II.getInductionOpcode();
1808     MulOp = Instruction::FMul;
1809   }
1810 
1811   // Multiply the vectorization factor by the step using integer or
1812   // floating-point arithmetic as appropriate.
1813   Value *ConstVF = getSignedIntOrFpConstant(Step->getType(), VF);
1814   Value *Mul = addFastMathFlag(Builder.CreateBinOp(MulOp, Step, ConstVF));
1815 
1816   // Create a vector splat to use in the induction update.
1817   //
1818   // FIXME: If the step is non-constant, we create the vector splat with
1819   //        IRBuilder. IRBuilder can constant-fold the multiply, but it doesn't
1820   //        handle a constant vector splat.
1821   Value *SplatVF = isa<Constant>(Mul)
1822                        ? ConstantVector::getSplat(VF, cast<Constant>(Mul))
1823                        : Builder.CreateVectorSplat(VF, Mul);
1824   Builder.restoreIP(CurrIP);
1825 
1826   // We may need to add the step a number of times, depending on the unroll
1827   // factor. The last of those goes into the PHI.
1828   PHINode *VecInd = PHINode::Create(SteppedStart->getType(), 2, "vec.ind",
1829                                     &*LoopVectorBody->getFirstInsertionPt());
1830   VecInd->setDebugLoc(EntryVal->getDebugLoc());
1831   Instruction *LastInduction = VecInd;
1832   for (unsigned Part = 0; Part < UF; ++Part) {
1833     VectorLoopValueMap.setVectorValue(EntryVal, Part, LastInduction);
1834 
1835     if (isa<TruncInst>(EntryVal))
1836       addMetadata(LastInduction, EntryVal);
1837     recordVectorLoopValueForInductionCast(II, EntryVal, LastInduction, Part);
1838 
1839     LastInduction = cast<Instruction>(addFastMathFlag(
1840         Builder.CreateBinOp(AddOp, LastInduction, SplatVF, "step.add")));
1841     LastInduction->setDebugLoc(EntryVal->getDebugLoc());
1842   }
1843 
1844   // Move the last step to the end of the latch block. This ensures consistent
1845   // placement of all induction updates.
1846   auto *LoopVectorLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch();
1847   auto *Br = cast<BranchInst>(LoopVectorLatch->getTerminator());
1848   auto *ICmp = cast<Instruction>(Br->getCondition());
1849   LastInduction->moveBefore(ICmp);
1850   LastInduction->setName("vec.ind.next");
1851 
1852   VecInd->addIncoming(SteppedStart, LoopVectorPreHeader);
1853   VecInd->addIncoming(LastInduction, LoopVectorLatch);
1854 }
1855 
1856 bool InnerLoopVectorizer::shouldScalarizeInstruction(Instruction *I) const {
1857   return Cost->isScalarAfterVectorization(I, VF) ||
1858          Cost->isProfitableToScalarize(I, VF);
1859 }
1860 
1861 bool InnerLoopVectorizer::needsScalarInduction(Instruction *IV) const {
1862   if (shouldScalarizeInstruction(IV))
1863     return true;
1864   auto isScalarInst = [&](User *U) -> bool {
1865     auto *I = cast<Instruction>(U);
1866     return (OrigLoop->contains(I) && shouldScalarizeInstruction(I));
1867   };
1868   return llvm::any_of(IV->users(), isScalarInst);
1869 }
1870 
1871 void InnerLoopVectorizer::recordVectorLoopValueForInductionCast(
1872     const InductionDescriptor &ID, const Instruction *EntryVal,
1873     Value *VectorLoopVal, unsigned Part, unsigned Lane) {
1874   assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) &&
1875          "Expected either an induction phi-node or a truncate of it!");
1876 
1877   // This induction variable is not the phi from the original loop but the
1878   // newly-created IV based on the proof that casted Phi is equal to the
1879   // uncasted Phi in the vectorized loop (under a runtime guard possibly). It
1880   // re-uses the same InductionDescriptor that original IV uses but we don't
1881   // have to do any recording in this case - that is done when original IV is
1882   // processed.
1883   if (isa<TruncInst>(EntryVal))
1884     return;
1885 
1886   const SmallVectorImpl<Instruction *> &Casts = ID.getCastInsts();
1887   if (Casts.empty())
1888     return;
1889   // Only the first Cast instruction in the Casts vector is of interest.
1890   // The rest of the Casts (if exist) have no uses outside the
1891   // induction update chain itself.
1892   Instruction *CastInst = *Casts.begin();
1893   if (Lane < UINT_MAX)
1894     VectorLoopValueMap.setScalarValue(CastInst, {Part, Lane}, VectorLoopVal);
1895   else
1896     VectorLoopValueMap.setVectorValue(CastInst, Part, VectorLoopVal);
1897 }
1898 
1899 void InnerLoopVectorizer::widenIntOrFpInduction(PHINode *IV, TruncInst *Trunc) {
1900   assert((IV->getType()->isIntegerTy() || IV != OldInduction) &&
1901          "Primary induction variable must have an integer type");
1902 
1903   auto II = Legal->getInductionVars()->find(IV);
1904   assert(II != Legal->getInductionVars()->end() && "IV is not an induction");
1905 
1906   auto ID = II->second;
1907   assert(IV->getType() == ID.getStartValue()->getType() && "Types must match");
1908 
1909   // The scalar value to broadcast. This will be derived from the canonical
1910   // induction variable.
1911   Value *ScalarIV = nullptr;
1912 
1913   // The value from the original loop to which we are mapping the new induction
1914   // variable.
1915   Instruction *EntryVal = Trunc ? cast<Instruction>(Trunc) : IV;
1916 
1917   // True if we have vectorized the induction variable.
1918   auto VectorizedIV = false;
1919 
1920   // Determine if we want a scalar version of the induction variable. This is
1921   // true if the induction variable itself is not widened, or if it has at
1922   // least one user in the loop that is not widened.
1923   auto NeedsScalarIV = VF > 1 && needsScalarInduction(EntryVal);
1924 
1925   // Generate code for the induction step. Note that induction steps are
1926   // required to be loop-invariant
1927   assert(PSE.getSE()->isLoopInvariant(ID.getStep(), OrigLoop) &&
1928          "Induction step should be loop invariant");
1929   auto &DL = OrigLoop->getHeader()->getModule()->getDataLayout();
1930   Value *Step = nullptr;
1931   if (PSE.getSE()->isSCEVable(IV->getType())) {
1932     SCEVExpander Exp(*PSE.getSE(), DL, "induction");
1933     Step = Exp.expandCodeFor(ID.getStep(), ID.getStep()->getType(),
1934                              LoopVectorPreHeader->getTerminator());
1935   } else {
1936     Step = cast<SCEVUnknown>(ID.getStep())->getValue();
1937   }
1938 
1939   // Try to create a new independent vector induction variable. If we can't
1940   // create the phi node, we will splat the scalar induction variable in each
1941   // loop iteration.
1942   if (VF > 1 && !shouldScalarizeInstruction(EntryVal)) {
1943     createVectorIntOrFpInductionPHI(ID, Step, EntryVal);
1944     VectorizedIV = true;
1945   }
1946 
1947   // If we haven't yet vectorized the induction variable, or if we will create
1948   // a scalar one, we need to define the scalar induction variable and step
1949   // values. If we were given a truncation type, truncate the canonical
1950   // induction variable and step. Otherwise, derive these values from the
1951   // induction descriptor.
1952   if (!VectorizedIV || NeedsScalarIV) {
1953     ScalarIV = Induction;
1954     if (IV != OldInduction) {
1955       ScalarIV = IV->getType()->isIntegerTy()
1956                      ? Builder.CreateSExtOrTrunc(Induction, IV->getType())
1957                      : Builder.CreateCast(Instruction::SIToFP, Induction,
1958                                           IV->getType());
1959       ScalarIV = ID.transform(Builder, ScalarIV, PSE.getSE(), DL);
1960       ScalarIV->setName("offset.idx");
1961     }
1962     if (Trunc) {
1963       auto *TruncType = cast<IntegerType>(Trunc->getType());
1964       assert(Step->getType()->isIntegerTy() &&
1965              "Truncation requires an integer step");
1966       ScalarIV = Builder.CreateTrunc(ScalarIV, TruncType);
1967       Step = Builder.CreateTrunc(Step, TruncType);
1968     }
1969   }
1970 
1971   // If we haven't yet vectorized the induction variable, splat the scalar
1972   // induction variable, and build the necessary step vectors.
1973   // TODO: Don't do it unless the vectorized IV is really required.
1974   if (!VectorizedIV) {
1975     Value *Broadcasted = getBroadcastInstrs(ScalarIV);
1976     for (unsigned Part = 0; Part < UF; ++Part) {
1977       Value *EntryPart =
1978           getStepVector(Broadcasted, VF * Part, Step, ID.getInductionOpcode());
1979       VectorLoopValueMap.setVectorValue(EntryVal, Part, EntryPart);
1980       if (Trunc)
1981         addMetadata(EntryPart, Trunc);
1982       recordVectorLoopValueForInductionCast(ID, EntryVal, EntryPart, Part);
1983     }
1984   }
1985 
1986   // If an induction variable is only used for counting loop iterations or
1987   // calculating addresses, it doesn't need to be widened. Create scalar steps
1988   // that can be used by instructions we will later scalarize. Note that the
1989   // addition of the scalar steps will not increase the number of instructions
1990   // in the loop in the common case prior to InstCombine. We will be trading
1991   // one vector extract for each scalar step.
1992   if (NeedsScalarIV)
1993     buildScalarSteps(ScalarIV, Step, EntryVal, ID);
1994 }
1995 
1996 Value *InnerLoopVectorizer::getStepVector(Value *Val, int StartIdx, Value *Step,
1997                                           Instruction::BinaryOps BinOp) {
1998   // Create and check the types.
1999   assert(Val->getType()->isVectorTy() && "Must be a vector");
2000   int VLen = Val->getType()->getVectorNumElements();
2001 
2002   Type *STy = Val->getType()->getScalarType();
2003   assert((STy->isIntegerTy() || STy->isFloatingPointTy()) &&
2004          "Induction Step must be an integer or FP");
2005   assert(Step->getType() == STy && "Step has wrong type");
2006 
2007   SmallVector<Constant *, 8> Indices;
2008 
2009   if (STy->isIntegerTy()) {
2010     // Create a vector of consecutive numbers from zero to VF.
2011     for (int i = 0; i < VLen; ++i)
2012       Indices.push_back(ConstantInt::get(STy, StartIdx + i));
2013 
2014     // Add the consecutive indices to the vector value.
2015     Constant *Cv = ConstantVector::get(Indices);
2016     assert(Cv->getType() == Val->getType() && "Invalid consecutive vec");
2017     Step = Builder.CreateVectorSplat(VLen, Step);
2018     assert(Step->getType() == Val->getType() && "Invalid step vec");
2019     // FIXME: The newly created binary instructions should contain nsw/nuw flags,
2020     // which can be found from the original scalar operations.
2021     Step = Builder.CreateMul(Cv, Step);
2022     return Builder.CreateAdd(Val, Step, "induction");
2023   }
2024 
2025   // Floating point induction.
2026   assert((BinOp == Instruction::FAdd || BinOp == Instruction::FSub) &&
2027          "Binary Opcode should be specified for FP induction");
2028   // Create a vector of consecutive numbers from zero to VF.
2029   for (int i = 0; i < VLen; ++i)
2030     Indices.push_back(ConstantFP::get(STy, (double)(StartIdx + i)));
2031 
2032   // Add the consecutive indices to the vector value.
2033   Constant *Cv = ConstantVector::get(Indices);
2034 
2035   Step = Builder.CreateVectorSplat(VLen, Step);
2036 
2037   // Floating point operations had to be 'fast' to enable the induction.
2038   FastMathFlags Flags;
2039   Flags.setFast();
2040 
2041   Value *MulOp = Builder.CreateFMul(Cv, Step);
2042   if (isa<Instruction>(MulOp))
2043     // Have to check, MulOp may be a constant
2044     cast<Instruction>(MulOp)->setFastMathFlags(Flags);
2045 
2046   Value *BOp = Builder.CreateBinOp(BinOp, Val, MulOp, "induction");
2047   if (isa<Instruction>(BOp))
2048     cast<Instruction>(BOp)->setFastMathFlags(Flags);
2049   return BOp;
2050 }
2051 
2052 void InnerLoopVectorizer::buildScalarSteps(Value *ScalarIV, Value *Step,
2053                                            Instruction *EntryVal,
2054                                            const InductionDescriptor &ID) {
2055   // We shouldn't have to build scalar steps if we aren't vectorizing.
2056   assert(VF > 1 && "VF should be greater than one");
2057 
2058   // Get the value type and ensure it and the step have the same integer type.
2059   Type *ScalarIVTy = ScalarIV->getType()->getScalarType();
2060   assert(ScalarIVTy == Step->getType() &&
2061          "Val and Step should have the same type");
2062 
2063   // We build scalar steps for both integer and floating-point induction
2064   // variables. Here, we determine the kind of arithmetic we will perform.
2065   Instruction::BinaryOps AddOp;
2066   Instruction::BinaryOps MulOp;
2067   if (ScalarIVTy->isIntegerTy()) {
2068     AddOp = Instruction::Add;
2069     MulOp = Instruction::Mul;
2070   } else {
2071     AddOp = ID.getInductionOpcode();
2072     MulOp = Instruction::FMul;
2073   }
2074 
2075   // Determine the number of scalars we need to generate for each unroll
2076   // iteration. If EntryVal is uniform, we only need to generate the first
2077   // lane. Otherwise, we generate all VF values.
2078   unsigned Lanes =
2079       Cost->isUniformAfterVectorization(cast<Instruction>(EntryVal), VF) ? 1
2080                                                                          : VF;
2081   // Compute the scalar steps and save the results in VectorLoopValueMap.
2082   for (unsigned Part = 0; Part < UF; ++Part) {
2083     for (unsigned Lane = 0; Lane < Lanes; ++Lane) {
2084       auto *StartIdx = getSignedIntOrFpConstant(ScalarIVTy, VF * Part + Lane);
2085       auto *Mul = addFastMathFlag(Builder.CreateBinOp(MulOp, StartIdx, Step));
2086       auto *Add = addFastMathFlag(Builder.CreateBinOp(AddOp, ScalarIV, Mul));
2087       VectorLoopValueMap.setScalarValue(EntryVal, {Part, Lane}, Add);
2088       recordVectorLoopValueForInductionCast(ID, EntryVal, Add, Part, Lane);
2089     }
2090   }
2091 }
2092 
2093 Value *InnerLoopVectorizer::getOrCreateVectorValue(Value *V, unsigned Part) {
2094   assert(V != Induction && "The new induction variable should not be used.");
2095   assert(!V->getType()->isVectorTy() && "Can't widen a vector");
2096   assert(!V->getType()->isVoidTy() && "Type does not produce a value");
2097 
2098   // If we have a stride that is replaced by one, do it here.
2099   if (Legal->hasStride(V))
2100     V = ConstantInt::get(V->getType(), 1);
2101 
2102   // If we have a vector mapped to this value, return it.
2103   if (VectorLoopValueMap.hasVectorValue(V, Part))
2104     return VectorLoopValueMap.getVectorValue(V, Part);
2105 
2106   // If the value has not been vectorized, check if it has been scalarized
2107   // instead. If it has been scalarized, and we actually need the value in
2108   // vector form, we will construct the vector values on demand.
2109   if (VectorLoopValueMap.hasAnyScalarValue(V)) {
2110     Value *ScalarValue = VectorLoopValueMap.getScalarValue(V, {Part, 0});
2111 
2112     // If we've scalarized a value, that value should be an instruction.
2113     auto *I = cast<Instruction>(V);
2114 
2115     // If we aren't vectorizing, we can just copy the scalar map values over to
2116     // the vector map.
2117     if (VF == 1) {
2118       VectorLoopValueMap.setVectorValue(V, Part, ScalarValue);
2119       return ScalarValue;
2120     }
2121 
2122     // Get the last scalar instruction we generated for V and Part. If the value
2123     // is known to be uniform after vectorization, this corresponds to lane zero
2124     // of the Part unroll iteration. Otherwise, the last instruction is the one
2125     // we created for the last vector lane of the Part unroll iteration.
2126     unsigned LastLane = Cost->isUniformAfterVectorization(I, VF) ? 0 : VF - 1;
2127     auto *LastInst = cast<Instruction>(
2128         VectorLoopValueMap.getScalarValue(V, {Part, LastLane}));
2129 
2130     // Set the insert point after the last scalarized instruction. This ensures
2131     // the insertelement sequence will directly follow the scalar definitions.
2132     auto OldIP = Builder.saveIP();
2133     auto NewIP = std::next(BasicBlock::iterator(LastInst));
2134     Builder.SetInsertPoint(&*NewIP);
2135 
2136     // However, if we are vectorizing, we need to construct the vector values.
2137     // If the value is known to be uniform after vectorization, we can just
2138     // broadcast the scalar value corresponding to lane zero for each unroll
2139     // iteration. Otherwise, we construct the vector values using insertelement
2140     // instructions. Since the resulting vectors are stored in
2141     // VectorLoopValueMap, we will only generate the insertelements once.
2142     Value *VectorValue = nullptr;
2143     if (Cost->isUniformAfterVectorization(I, VF)) {
2144       VectorValue = getBroadcastInstrs(ScalarValue);
2145       VectorLoopValueMap.setVectorValue(V, Part, VectorValue);
2146     } else {
2147       // Initialize packing with insertelements to start from undef.
2148       Value *Undef = UndefValue::get(VectorType::get(V->getType(), VF));
2149       VectorLoopValueMap.setVectorValue(V, Part, Undef);
2150       for (unsigned Lane = 0; Lane < VF; ++Lane)
2151         packScalarIntoVectorValue(V, {Part, Lane});
2152       VectorValue = VectorLoopValueMap.getVectorValue(V, Part);
2153     }
2154     Builder.restoreIP(OldIP);
2155     return VectorValue;
2156   }
2157 
2158   // If this scalar is unknown, assume that it is a constant or that it is
2159   // loop invariant. Broadcast V and save the value for future uses.
2160   Value *B = getBroadcastInstrs(V);
2161   VectorLoopValueMap.setVectorValue(V, Part, B);
2162   return B;
2163 }
2164 
2165 Value *
2166 InnerLoopVectorizer::getOrCreateScalarValue(Value *V,
2167                                             const VPIteration &Instance) {
2168   // If the value is not an instruction contained in the loop, it should
2169   // already be scalar.
2170   if (OrigLoop->isLoopInvariant(V))
2171     return V;
2172 
2173   assert(Instance.Lane > 0
2174              ? !Cost->isUniformAfterVectorization(cast<Instruction>(V), VF)
2175              : true && "Uniform values only have lane zero");
2176 
2177   // If the value from the original loop has not been vectorized, it is
2178   // represented by UF x VF scalar values in the new loop. Return the requested
2179   // scalar value.
2180   if (VectorLoopValueMap.hasScalarValue(V, Instance))
2181     return VectorLoopValueMap.getScalarValue(V, Instance);
2182 
2183   // If the value has not been scalarized, get its entry in VectorLoopValueMap
2184   // for the given unroll part. If this entry is not a vector type (i.e., the
2185   // vectorization factor is one), there is no need to generate an
2186   // extractelement instruction.
2187   auto *U = getOrCreateVectorValue(V, Instance.Part);
2188   if (!U->getType()->isVectorTy()) {
2189     assert(VF == 1 && "Value not scalarized has non-vector type");
2190     return U;
2191   }
2192 
2193   // Otherwise, the value from the original loop has been vectorized and is
2194   // represented by UF vector values. Extract and return the requested scalar
2195   // value from the appropriate vector lane.
2196   return Builder.CreateExtractElement(U, Builder.getInt32(Instance.Lane));
2197 }
2198 
2199 void InnerLoopVectorizer::packScalarIntoVectorValue(
2200     Value *V, const VPIteration &Instance) {
2201   assert(V != Induction && "The new induction variable should not be used.");
2202   assert(!V->getType()->isVectorTy() && "Can't pack a vector");
2203   assert(!V->getType()->isVoidTy() && "Type does not produce a value");
2204 
2205   Value *ScalarInst = VectorLoopValueMap.getScalarValue(V, Instance);
2206   Value *VectorValue = VectorLoopValueMap.getVectorValue(V, Instance.Part);
2207   VectorValue = Builder.CreateInsertElement(VectorValue, ScalarInst,
2208                                             Builder.getInt32(Instance.Lane));
2209   VectorLoopValueMap.resetVectorValue(V, Instance.Part, VectorValue);
2210 }
2211 
2212 Value *InnerLoopVectorizer::reverseVector(Value *Vec) {
2213   assert(Vec->getType()->isVectorTy() && "Invalid type");
2214   SmallVector<Constant *, 8> ShuffleMask;
2215   for (unsigned i = 0; i < VF; ++i)
2216     ShuffleMask.push_back(Builder.getInt32(VF - i - 1));
2217 
2218   return Builder.CreateShuffleVector(Vec, UndefValue::get(Vec->getType()),
2219                                      ConstantVector::get(ShuffleMask),
2220                                      "reverse");
2221 }
2222 
2223 // Try to vectorize the interleave group that \p Instr belongs to.
2224 //
2225 // E.g. Translate following interleaved load group (factor = 3):
2226 //   for (i = 0; i < N; i+=3) {
2227 //     R = Pic[i];             // Member of index 0
2228 //     G = Pic[i+1];           // Member of index 1
2229 //     B = Pic[i+2];           // Member of index 2
2230 //     ... // do something to R, G, B
2231 //   }
2232 // To:
2233 //   %wide.vec = load <12 x i32>                       ; Read 4 tuples of R,G,B
2234 //   %R.vec = shuffle %wide.vec, undef, <0, 3, 6, 9>   ; R elements
2235 //   %G.vec = shuffle %wide.vec, undef, <1, 4, 7, 10>  ; G elements
2236 //   %B.vec = shuffle %wide.vec, undef, <2, 5, 8, 11>  ; B elements
2237 //
2238 // Or translate following interleaved store group (factor = 3):
2239 //   for (i = 0; i < N; i+=3) {
2240 //     ... do something to R, G, B
2241 //     Pic[i]   = R;           // Member of index 0
2242 //     Pic[i+1] = G;           // Member of index 1
2243 //     Pic[i+2] = B;           // Member of index 2
2244 //   }
2245 // To:
2246 //   %R_G.vec = shuffle %R.vec, %G.vec, <0, 1, 2, ..., 7>
2247 //   %B_U.vec = shuffle %B.vec, undef, <0, 1, 2, 3, u, u, u, u>
2248 //   %interleaved.vec = shuffle %R_G.vec, %B_U.vec,
2249 //        <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11>    ; Interleave R,G,B elements
2250 //   store <12 x i32> %interleaved.vec              ; Write 4 tuples of R,G,B
2251 void InnerLoopVectorizer::vectorizeInterleaveGroup(Instruction *Instr) {
2252   const InterleaveGroup *Group = Cost->getInterleavedAccessGroup(Instr);
2253   assert(Group && "Fail to get an interleaved access group.");
2254 
2255   // Skip if current instruction is not the insert position.
2256   if (Instr != Group->getInsertPos())
2257     return;
2258 
2259   const DataLayout &DL = Instr->getModule()->getDataLayout();
2260   Value *Ptr = getLoadStorePointerOperand(Instr);
2261 
2262   // Prepare for the vector type of the interleaved load/store.
2263   Type *ScalarTy = getMemInstValueType(Instr);
2264   unsigned InterleaveFactor = Group->getFactor();
2265   Type *VecTy = VectorType::get(ScalarTy, InterleaveFactor * VF);
2266   Type *PtrTy = VecTy->getPointerTo(getMemInstAddressSpace(Instr));
2267 
2268   // Prepare for the new pointers.
2269   setDebugLocFromInst(Builder, Ptr);
2270   SmallVector<Value *, 2> NewPtrs;
2271   unsigned Index = Group->getIndex(Instr);
2272 
2273   // If the group is reverse, adjust the index to refer to the last vector lane
2274   // instead of the first. We adjust the index from the first vector lane,
2275   // rather than directly getting the pointer for lane VF - 1, because the
2276   // pointer operand of the interleaved access is supposed to be uniform. For
2277   // uniform instructions, we're only required to generate a value for the
2278   // first vector lane in each unroll iteration.
2279   if (Group->isReverse())
2280     Index += (VF - 1) * Group->getFactor();
2281 
2282   bool InBounds = false;
2283   if (auto *gep = dyn_cast<GetElementPtrInst>(Ptr->stripPointerCasts()))
2284     InBounds = gep->isInBounds();
2285 
2286   for (unsigned Part = 0; Part < UF; Part++) {
2287     Value *NewPtr = getOrCreateScalarValue(Ptr, {Part, 0});
2288 
2289     // Notice current instruction could be any index. Need to adjust the address
2290     // to the member of index 0.
2291     //
2292     // E.g.  a = A[i+1];     // Member of index 1 (Current instruction)
2293     //       b = A[i];       // Member of index 0
2294     // Current pointer is pointed to A[i+1], adjust it to A[i].
2295     //
2296     // E.g.  A[i+1] = a;     // Member of index 1
2297     //       A[i]   = b;     // Member of index 0
2298     //       A[i+2] = c;     // Member of index 2 (Current instruction)
2299     // Current pointer is pointed to A[i+2], adjust it to A[i].
2300     NewPtr = Builder.CreateGEP(NewPtr, Builder.getInt32(-Index));
2301     if (InBounds)
2302       cast<GetElementPtrInst>(NewPtr)->setIsInBounds(true);
2303 
2304     // Cast to the vector pointer type.
2305     NewPtrs.push_back(Builder.CreateBitCast(NewPtr, PtrTy));
2306   }
2307 
2308   setDebugLocFromInst(Builder, Instr);
2309   Value *UndefVec = UndefValue::get(VecTy);
2310 
2311   // Vectorize the interleaved load group.
2312   if (isa<LoadInst>(Instr)) {
2313     // For each unroll part, create a wide load for the group.
2314     SmallVector<Value *, 2> NewLoads;
2315     for (unsigned Part = 0; Part < UF; Part++) {
2316       auto *NewLoad = Builder.CreateAlignedLoad(
2317           NewPtrs[Part], Group->getAlignment(), "wide.vec");
2318       Group->addMetadata(NewLoad);
2319       NewLoads.push_back(NewLoad);
2320     }
2321 
2322     // For each member in the group, shuffle out the appropriate data from the
2323     // wide loads.
2324     for (unsigned I = 0; I < InterleaveFactor; ++I) {
2325       Instruction *Member = Group->getMember(I);
2326 
2327       // Skip the gaps in the group.
2328       if (!Member)
2329         continue;
2330 
2331       Constant *StrideMask = createStrideMask(Builder, I, InterleaveFactor, VF);
2332       for (unsigned Part = 0; Part < UF; Part++) {
2333         Value *StridedVec = Builder.CreateShuffleVector(
2334             NewLoads[Part], UndefVec, StrideMask, "strided.vec");
2335 
2336         // If this member has different type, cast the result type.
2337         if (Member->getType() != ScalarTy) {
2338           VectorType *OtherVTy = VectorType::get(Member->getType(), VF);
2339           StridedVec = createBitOrPointerCast(StridedVec, OtherVTy, DL);
2340         }
2341 
2342         if (Group->isReverse())
2343           StridedVec = reverseVector(StridedVec);
2344 
2345         VectorLoopValueMap.setVectorValue(Member, Part, StridedVec);
2346       }
2347     }
2348     return;
2349   }
2350 
2351   // The sub vector type for current instruction.
2352   VectorType *SubVT = VectorType::get(ScalarTy, VF);
2353 
2354   // Vectorize the interleaved store group.
2355   for (unsigned Part = 0; Part < UF; Part++) {
2356     // Collect the stored vector from each member.
2357     SmallVector<Value *, 4> StoredVecs;
2358     for (unsigned i = 0; i < InterleaveFactor; i++) {
2359       // Interleaved store group doesn't allow a gap, so each index has a member
2360       Instruction *Member = Group->getMember(i);
2361       assert(Member && "Fail to get a member from an interleaved store group");
2362 
2363       Value *StoredVec = getOrCreateVectorValue(
2364           cast<StoreInst>(Member)->getValueOperand(), Part);
2365       if (Group->isReverse())
2366         StoredVec = reverseVector(StoredVec);
2367 
2368       // If this member has different type, cast it to a unified type.
2369 
2370       if (StoredVec->getType() != SubVT)
2371         StoredVec = createBitOrPointerCast(StoredVec, SubVT, DL);
2372 
2373       StoredVecs.push_back(StoredVec);
2374     }
2375 
2376     // Concatenate all vectors into a wide vector.
2377     Value *WideVec = concatenateVectors(Builder, StoredVecs);
2378 
2379     // Interleave the elements in the wide vector.
2380     Constant *IMask = createInterleaveMask(Builder, VF, InterleaveFactor);
2381     Value *IVec = Builder.CreateShuffleVector(WideVec, UndefVec, IMask,
2382                                               "interleaved.vec");
2383 
2384     Instruction *NewStoreInstr =
2385         Builder.CreateAlignedStore(IVec, NewPtrs[Part], Group->getAlignment());
2386 
2387     Group->addMetadata(NewStoreInstr);
2388   }
2389 }
2390 
2391 void InnerLoopVectorizer::vectorizeMemoryInstruction(Instruction *Instr,
2392                                                      VectorParts *BlockInMask) {
2393   // Attempt to issue a wide load.
2394   LoadInst *LI = dyn_cast<LoadInst>(Instr);
2395   StoreInst *SI = dyn_cast<StoreInst>(Instr);
2396 
2397   assert((LI || SI) && "Invalid Load/Store instruction");
2398 
2399   LoopVectorizationCostModel::InstWidening Decision =
2400       Cost->getWideningDecision(Instr, VF);
2401   assert(Decision != LoopVectorizationCostModel::CM_Unknown &&
2402          "CM decision should be taken at this point");
2403   if (Decision == LoopVectorizationCostModel::CM_Interleave)
2404     return vectorizeInterleaveGroup(Instr);
2405 
2406   Type *ScalarDataTy = getMemInstValueType(Instr);
2407   Type *DataTy = VectorType::get(ScalarDataTy, VF);
2408   Value *Ptr = getLoadStorePointerOperand(Instr);
2409   unsigned Alignment = getMemInstAlignment(Instr);
2410   // An alignment of 0 means target abi alignment. We need to use the scalar's
2411   // target abi alignment in such a case.
2412   const DataLayout &DL = Instr->getModule()->getDataLayout();
2413   if (!Alignment)
2414     Alignment = DL.getABITypeAlignment(ScalarDataTy);
2415   unsigned AddressSpace = getMemInstAddressSpace(Instr);
2416 
2417   // Determine if the pointer operand of the access is either consecutive or
2418   // reverse consecutive.
2419   bool Reverse = (Decision == LoopVectorizationCostModel::CM_Widen_Reverse);
2420   bool ConsecutiveStride =
2421       Reverse || (Decision == LoopVectorizationCostModel::CM_Widen);
2422   bool CreateGatherScatter =
2423       (Decision == LoopVectorizationCostModel::CM_GatherScatter);
2424 
2425   // Either Ptr feeds a vector load/store, or a vector GEP should feed a vector
2426   // gather/scatter. Otherwise Decision should have been to Scalarize.
2427   assert((ConsecutiveStride || CreateGatherScatter) &&
2428          "The instruction should be scalarized");
2429 
2430   // Handle consecutive loads/stores.
2431   if (ConsecutiveStride)
2432     Ptr = getOrCreateScalarValue(Ptr, {0, 0});
2433 
2434   VectorParts Mask;
2435   bool isMaskRequired = BlockInMask;
2436   if (isMaskRequired)
2437     Mask = *BlockInMask;
2438 
2439   bool InBounds = false;
2440   if (auto *gep = dyn_cast<GetElementPtrInst>(
2441           getLoadStorePointerOperand(Instr)->stripPointerCasts()))
2442     InBounds = gep->isInBounds();
2443 
2444   const auto CreateVecPtr = [&](unsigned Part, Value *Ptr) -> Value * {
2445     // Calculate the pointer for the specific unroll-part.
2446     GetElementPtrInst *PartPtr = nullptr;
2447 
2448     if (Reverse) {
2449       // If the address is consecutive but reversed, then the
2450       // wide store needs to start at the last vector element.
2451       PartPtr = cast<GetElementPtrInst>(
2452           Builder.CreateGEP(Ptr, Builder.getInt32(-Part * VF)));
2453       PartPtr->setIsInBounds(InBounds);
2454       PartPtr = cast<GetElementPtrInst>(
2455           Builder.CreateGEP(PartPtr, Builder.getInt32(1 - VF)));
2456       PartPtr->setIsInBounds(InBounds);
2457       if (isMaskRequired) // Reverse of a null all-one mask is a null mask.
2458         Mask[Part] = reverseVector(Mask[Part]);
2459     } else {
2460       PartPtr = cast<GetElementPtrInst>(
2461           Builder.CreateGEP(Ptr, Builder.getInt32(Part * VF)));
2462       PartPtr->setIsInBounds(InBounds);
2463     }
2464 
2465     return Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace));
2466   };
2467 
2468   // Handle Stores:
2469   if (SI) {
2470     setDebugLocFromInst(Builder, SI);
2471 
2472     for (unsigned Part = 0; Part < UF; ++Part) {
2473       Instruction *NewSI = nullptr;
2474       Value *StoredVal = getOrCreateVectorValue(SI->getValueOperand(), Part);
2475       if (CreateGatherScatter) {
2476         Value *MaskPart = isMaskRequired ? Mask[Part] : nullptr;
2477         Value *VectorGep = getOrCreateVectorValue(Ptr, Part);
2478         NewSI = Builder.CreateMaskedScatter(StoredVal, VectorGep, Alignment,
2479                                             MaskPart);
2480       } else {
2481         if (Reverse) {
2482           // If we store to reverse consecutive memory locations, then we need
2483           // to reverse the order of elements in the stored value.
2484           StoredVal = reverseVector(StoredVal);
2485           // We don't want to update the value in the map as it might be used in
2486           // another expression. So don't call resetVectorValue(StoredVal).
2487         }
2488         auto *VecPtr = CreateVecPtr(Part, Ptr);
2489         if (isMaskRequired)
2490           NewSI = Builder.CreateMaskedStore(StoredVal, VecPtr, Alignment,
2491                                             Mask[Part]);
2492         else
2493           NewSI = Builder.CreateAlignedStore(StoredVal, VecPtr, Alignment);
2494       }
2495       addMetadata(NewSI, SI);
2496     }
2497     return;
2498   }
2499 
2500   // Handle loads.
2501   assert(LI && "Must have a load instruction");
2502   setDebugLocFromInst(Builder, LI);
2503   for (unsigned Part = 0; Part < UF; ++Part) {
2504     Value *NewLI;
2505     if (CreateGatherScatter) {
2506       Value *MaskPart = isMaskRequired ? Mask[Part] : nullptr;
2507       Value *VectorGep = getOrCreateVectorValue(Ptr, Part);
2508       NewLI = Builder.CreateMaskedGather(VectorGep, Alignment, MaskPart,
2509                                          nullptr, "wide.masked.gather");
2510       addMetadata(NewLI, LI);
2511     } else {
2512       auto *VecPtr = CreateVecPtr(Part, Ptr);
2513       if (isMaskRequired)
2514         NewLI = Builder.CreateMaskedLoad(VecPtr, Alignment, Mask[Part],
2515                                          UndefValue::get(DataTy),
2516                                          "wide.masked.load");
2517       else
2518         NewLI = Builder.CreateAlignedLoad(VecPtr, Alignment, "wide.load");
2519 
2520       // Add metadata to the load, but setVectorValue to the reverse shuffle.
2521       addMetadata(NewLI, LI);
2522       if (Reverse)
2523         NewLI = reverseVector(NewLI);
2524     }
2525     VectorLoopValueMap.setVectorValue(Instr, Part, NewLI);
2526   }
2527 }
2528 
2529 void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr,
2530                                                const VPIteration &Instance,
2531                                                bool IfPredicateInstr) {
2532   assert(!Instr->getType()->isAggregateType() && "Can't handle vectors");
2533 
2534   setDebugLocFromInst(Builder, Instr);
2535 
2536   // Does this instruction return a value ?
2537   bool IsVoidRetTy = Instr->getType()->isVoidTy();
2538 
2539   Instruction *Cloned = Instr->clone();
2540   if (!IsVoidRetTy)
2541     Cloned->setName(Instr->getName() + ".cloned");
2542 
2543   // Replace the operands of the cloned instructions with their scalar
2544   // equivalents in the new loop.
2545   for (unsigned op = 0, e = Instr->getNumOperands(); op != e; ++op) {
2546     auto *NewOp = getOrCreateScalarValue(Instr->getOperand(op), Instance);
2547     Cloned->setOperand(op, NewOp);
2548   }
2549   addNewMetadata(Cloned, Instr);
2550 
2551   // Place the cloned scalar in the new loop.
2552   Builder.Insert(Cloned);
2553 
2554   // Add the cloned scalar to the scalar map entry.
2555   VectorLoopValueMap.setScalarValue(Instr, Instance, Cloned);
2556 
2557   // If we just cloned a new assumption, add it the assumption cache.
2558   if (auto *II = dyn_cast<IntrinsicInst>(Cloned))
2559     if (II->getIntrinsicID() == Intrinsic::assume)
2560       AC->registerAssumption(II);
2561 
2562   // End if-block.
2563   if (IfPredicateInstr)
2564     PredicatedInstructions.push_back(Cloned);
2565 }
2566 
2567 PHINode *InnerLoopVectorizer::createInductionVariable(Loop *L, Value *Start,
2568                                                       Value *End, Value *Step,
2569                                                       Instruction *DL) {
2570   BasicBlock *Header = L->getHeader();
2571   BasicBlock *Latch = L->getLoopLatch();
2572   // As we're just creating this loop, it's possible no latch exists
2573   // yet. If so, use the header as this will be a single block loop.
2574   if (!Latch)
2575     Latch = Header;
2576 
2577   IRBuilder<> Builder(&*Header->getFirstInsertionPt());
2578   Instruction *OldInst = getDebugLocFromInstOrOperands(OldInduction);
2579   setDebugLocFromInst(Builder, OldInst);
2580   auto *Induction = Builder.CreatePHI(Start->getType(), 2, "index");
2581 
2582   Builder.SetInsertPoint(Latch->getTerminator());
2583   setDebugLocFromInst(Builder, OldInst);
2584 
2585   // Create i+1 and fill the PHINode.
2586   Value *Next = Builder.CreateAdd(Induction, Step, "index.next");
2587   Induction->addIncoming(Start, L->getLoopPreheader());
2588   Induction->addIncoming(Next, Latch);
2589   // Create the compare.
2590   Value *ICmp = Builder.CreateICmpEQ(Next, End);
2591   Builder.CreateCondBr(ICmp, L->getExitBlock(), Header);
2592 
2593   // Now we have two terminators. Remove the old one from the block.
2594   Latch->getTerminator()->eraseFromParent();
2595 
2596   return Induction;
2597 }
2598 
2599 Value *InnerLoopVectorizer::getOrCreateTripCount(Loop *L) {
2600   if (TripCount)
2601     return TripCount;
2602 
2603   IRBuilder<> Builder(L->getLoopPreheader()->getTerminator());
2604   // Find the loop boundaries.
2605   ScalarEvolution *SE = PSE.getSE();
2606   const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount();
2607   assert(BackedgeTakenCount != SE->getCouldNotCompute() &&
2608          "Invalid loop count");
2609 
2610   Type *IdxTy = Legal->getWidestInductionType();
2611 
2612   // The exit count might have the type of i64 while the phi is i32. This can
2613   // happen if we have an induction variable that is sign extended before the
2614   // compare. The only way that we get a backedge taken count is that the
2615   // induction variable was signed and as such will not overflow. In such a case
2616   // truncation is legal.
2617   if (BackedgeTakenCount->getType()->getPrimitiveSizeInBits() >
2618       IdxTy->getPrimitiveSizeInBits())
2619     BackedgeTakenCount = SE->getTruncateOrNoop(BackedgeTakenCount, IdxTy);
2620   BackedgeTakenCount = SE->getNoopOrZeroExtend(BackedgeTakenCount, IdxTy);
2621 
2622   // Get the total trip count from the count by adding 1.
2623   const SCEV *ExitCount = SE->getAddExpr(
2624       BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType()));
2625 
2626   const DataLayout &DL = L->getHeader()->getModule()->getDataLayout();
2627 
2628   // Expand the trip count and place the new instructions in the preheader.
2629   // Notice that the pre-header does not change, only the loop body.
2630   SCEVExpander Exp(*SE, DL, "induction");
2631 
2632   // Count holds the overall loop count (N).
2633   TripCount = Exp.expandCodeFor(ExitCount, ExitCount->getType(),
2634                                 L->getLoopPreheader()->getTerminator());
2635 
2636   if (TripCount->getType()->isPointerTy())
2637     TripCount =
2638         CastInst::CreatePointerCast(TripCount, IdxTy, "exitcount.ptrcnt.to.int",
2639                                     L->getLoopPreheader()->getTerminator());
2640 
2641   return TripCount;
2642 }
2643 
2644 Value *InnerLoopVectorizer::getOrCreateVectorTripCount(Loop *L) {
2645   if (VectorTripCount)
2646     return VectorTripCount;
2647 
2648   Value *TC = getOrCreateTripCount(L);
2649   IRBuilder<> Builder(L->getLoopPreheader()->getTerminator());
2650 
2651   // Now we need to generate the expression for the part of the loop that the
2652   // vectorized body will execute. This is equal to N - (N % Step) if scalar
2653   // iterations are not required for correctness, or N - Step, otherwise. Step
2654   // is equal to the vectorization factor (number of SIMD elements) times the
2655   // unroll factor (number of SIMD instructions).
2656   Constant *Step = ConstantInt::get(TC->getType(), VF * UF);
2657   Value *R = Builder.CreateURem(TC, Step, "n.mod.vf");
2658 
2659   // If there is a non-reversed interleaved group that may speculatively access
2660   // memory out-of-bounds, we need to ensure that there will be at least one
2661   // iteration of the scalar epilogue loop. Thus, if the step evenly divides
2662   // the trip count, we set the remainder to be equal to the step. If the step
2663   // does not evenly divide the trip count, no adjustment is necessary since
2664   // there will already be scalar iterations. Note that the minimum iterations
2665   // check ensures that N >= Step.
2666   if (VF > 1 && Cost->requiresScalarEpilogue()) {
2667     auto *IsZero = Builder.CreateICmpEQ(R, ConstantInt::get(R->getType(), 0));
2668     R = Builder.CreateSelect(IsZero, Step, R);
2669   }
2670 
2671   VectorTripCount = Builder.CreateSub(TC, R, "n.vec");
2672 
2673   return VectorTripCount;
2674 }
2675 
2676 Value *InnerLoopVectorizer::createBitOrPointerCast(Value *V, VectorType *DstVTy,
2677                                                    const DataLayout &DL) {
2678   // Verify that V is a vector type with same number of elements as DstVTy.
2679   unsigned VF = DstVTy->getNumElements();
2680   VectorType *SrcVecTy = cast<VectorType>(V->getType());
2681   assert((VF == SrcVecTy->getNumElements()) && "Vector dimensions do not match");
2682   Type *SrcElemTy = SrcVecTy->getElementType();
2683   Type *DstElemTy = DstVTy->getElementType();
2684   assert((DL.getTypeSizeInBits(SrcElemTy) == DL.getTypeSizeInBits(DstElemTy)) &&
2685          "Vector elements must have same size");
2686 
2687   // Do a direct cast if element types are castable.
2688   if (CastInst::isBitOrNoopPointerCastable(SrcElemTy, DstElemTy, DL)) {
2689     return Builder.CreateBitOrPointerCast(V, DstVTy);
2690   }
2691   // V cannot be directly casted to desired vector type.
2692   // May happen when V is a floating point vector but DstVTy is a vector of
2693   // pointers or vice-versa. Handle this using a two-step bitcast using an
2694   // intermediate Integer type for the bitcast i.e. Ptr <-> Int <-> Float.
2695   assert((DstElemTy->isPointerTy() != SrcElemTy->isPointerTy()) &&
2696          "Only one type should be a pointer type");
2697   assert((DstElemTy->isFloatingPointTy() != SrcElemTy->isFloatingPointTy()) &&
2698          "Only one type should be a floating point type");
2699   Type *IntTy =
2700       IntegerType::getIntNTy(V->getContext(), DL.getTypeSizeInBits(SrcElemTy));
2701   VectorType *VecIntTy = VectorType::get(IntTy, VF);
2702   Value *CastVal = Builder.CreateBitOrPointerCast(V, VecIntTy);
2703   return Builder.CreateBitOrPointerCast(CastVal, DstVTy);
2704 }
2705 
2706 void InnerLoopVectorizer::emitMinimumIterationCountCheck(Loop *L,
2707                                                          BasicBlock *Bypass) {
2708   Value *Count = getOrCreateTripCount(L);
2709   BasicBlock *BB = L->getLoopPreheader();
2710   IRBuilder<> Builder(BB->getTerminator());
2711 
2712   // Generate code to check if the loop's trip count is less than VF * UF, or
2713   // equal to it in case a scalar epilogue is required; this implies that the
2714   // vector trip count is zero. This check also covers the case where adding one
2715   // to the backedge-taken count overflowed leading to an incorrect trip count
2716   // of zero. In this case we will also jump to the scalar loop.
2717   auto P = Cost->requiresScalarEpilogue() ? ICmpInst::ICMP_ULE
2718                                           : ICmpInst::ICMP_ULT;
2719   Value *CheckMinIters = Builder.CreateICmp(
2720       P, Count, ConstantInt::get(Count->getType(), VF * UF), "min.iters.check");
2721 
2722   BasicBlock *NewBB = BB->splitBasicBlock(BB->getTerminator(), "vector.ph");
2723   // Update dominator tree immediately if the generated block is a
2724   // LoopBypassBlock because SCEV expansions to generate loop bypass
2725   // checks may query it before the current function is finished.
2726   DT->addNewBlock(NewBB, BB);
2727   if (L->getParentLoop())
2728     L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI);
2729   ReplaceInstWithInst(BB->getTerminator(),
2730                       BranchInst::Create(Bypass, NewBB, CheckMinIters));
2731   LoopBypassBlocks.push_back(BB);
2732 }
2733 
2734 void InnerLoopVectorizer::emitSCEVChecks(Loop *L, BasicBlock *Bypass) {
2735   BasicBlock *BB = L->getLoopPreheader();
2736 
2737   // Generate the code to check that the SCEV assumptions that we made.
2738   // We want the new basic block to start at the first instruction in a
2739   // sequence of instructions that form a check.
2740   SCEVExpander Exp(*PSE.getSE(), Bypass->getModule()->getDataLayout(),
2741                    "scev.check");
2742   Value *SCEVCheck =
2743       Exp.expandCodeForPredicate(&PSE.getUnionPredicate(), BB->getTerminator());
2744 
2745   if (auto *C = dyn_cast<ConstantInt>(SCEVCheck))
2746     if (C->isZero())
2747       return;
2748 
2749   // Create a new block containing the stride check.
2750   BB->setName("vector.scevcheck");
2751   auto *NewBB = BB->splitBasicBlock(BB->getTerminator(), "vector.ph");
2752   // Update dominator tree immediately if the generated block is a
2753   // LoopBypassBlock because SCEV expansions to generate loop bypass
2754   // checks may query it before the current function is finished.
2755   DT->addNewBlock(NewBB, BB);
2756   if (L->getParentLoop())
2757     L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI);
2758   ReplaceInstWithInst(BB->getTerminator(),
2759                       BranchInst::Create(Bypass, NewBB, SCEVCheck));
2760   LoopBypassBlocks.push_back(BB);
2761   AddedSafetyChecks = true;
2762 }
2763 
2764 void InnerLoopVectorizer::emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass) {
2765   BasicBlock *BB = L->getLoopPreheader();
2766 
2767   // Generate the code that checks in runtime if arrays overlap. We put the
2768   // checks into a separate block to make the more common case of few elements
2769   // faster.
2770   Instruction *FirstCheckInst;
2771   Instruction *MemRuntimeCheck;
2772   std::tie(FirstCheckInst, MemRuntimeCheck) =
2773       Legal->getLAI()->addRuntimeChecks(BB->getTerminator());
2774   if (!MemRuntimeCheck)
2775     return;
2776 
2777   // Create a new block containing the memory check.
2778   BB->setName("vector.memcheck");
2779   auto *NewBB = BB->splitBasicBlock(BB->getTerminator(), "vector.ph");
2780   // Update dominator tree immediately if the generated block is a
2781   // LoopBypassBlock because SCEV expansions to generate loop bypass
2782   // checks may query it before the current function is finished.
2783   DT->addNewBlock(NewBB, BB);
2784   if (L->getParentLoop())
2785     L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI);
2786   ReplaceInstWithInst(BB->getTerminator(),
2787                       BranchInst::Create(Bypass, NewBB, MemRuntimeCheck));
2788   LoopBypassBlocks.push_back(BB);
2789   AddedSafetyChecks = true;
2790 
2791   // We currently don't use LoopVersioning for the actual loop cloning but we
2792   // still use it to add the noalias metadata.
2793   LVer = llvm::make_unique<LoopVersioning>(*Legal->getLAI(), OrigLoop, LI, DT,
2794                                            PSE.getSE());
2795   LVer->prepareNoAliasMetadata();
2796 }
2797 
2798 BasicBlock *InnerLoopVectorizer::createVectorizedLoopSkeleton() {
2799   /*
2800    In this function we generate a new loop. The new loop will contain
2801    the vectorized instructions while the old loop will continue to run the
2802    scalar remainder.
2803 
2804        [ ] <-- loop iteration number check.
2805     /   |
2806    /    v
2807   |    [ ] <-- vector loop bypass (may consist of multiple blocks).
2808   |  /  |
2809   | /   v
2810   ||   [ ]     <-- vector pre header.
2811   |/    |
2812   |     v
2813   |    [  ] \
2814   |    [  ]_|   <-- vector loop.
2815   |     |
2816   |     v
2817   |   -[ ]   <--- middle-block.
2818   |  /  |
2819   | /   v
2820   -|- >[ ]     <--- new preheader.
2821    |    |
2822    |    v
2823    |   [ ] \
2824    |   [ ]_|   <-- old scalar loop to handle remainder.
2825     \   |
2826      \  v
2827       >[ ]     <-- exit block.
2828    ...
2829    */
2830 
2831   BasicBlock *OldBasicBlock = OrigLoop->getHeader();
2832   BasicBlock *VectorPH = OrigLoop->getLoopPreheader();
2833   BasicBlock *ExitBlock = OrigLoop->getExitBlock();
2834   assert(VectorPH && "Invalid loop structure");
2835   assert(ExitBlock && "Must have an exit block");
2836 
2837   // Some loops have a single integer induction variable, while other loops
2838   // don't. One example is c++ iterators that often have multiple pointer
2839   // induction variables. In the code below we also support a case where we
2840   // don't have a single induction variable.
2841   //
2842   // We try to obtain an induction variable from the original loop as hard
2843   // as possible. However if we don't find one that:
2844   //   - is an integer
2845   //   - counts from zero, stepping by one
2846   //   - is the size of the widest induction variable type
2847   // then we create a new one.
2848   OldInduction = Legal->getPrimaryInduction();
2849   Type *IdxTy = Legal->getWidestInductionType();
2850 
2851   // Split the single block loop into the two loop structure described above.
2852   BasicBlock *VecBody =
2853       VectorPH->splitBasicBlock(VectorPH->getTerminator(), "vector.body");
2854   BasicBlock *MiddleBlock =
2855       VecBody->splitBasicBlock(VecBody->getTerminator(), "middle.block");
2856   BasicBlock *ScalarPH =
2857       MiddleBlock->splitBasicBlock(MiddleBlock->getTerminator(), "scalar.ph");
2858 
2859   // Create and register the new vector loop.
2860   Loop *Lp = LI->AllocateLoop();
2861   Loop *ParentLoop = OrigLoop->getParentLoop();
2862 
2863   // Insert the new loop into the loop nest and register the new basic blocks
2864   // before calling any utilities such as SCEV that require valid LoopInfo.
2865   if (ParentLoop) {
2866     ParentLoop->addChildLoop(Lp);
2867     ParentLoop->addBasicBlockToLoop(ScalarPH, *LI);
2868     ParentLoop->addBasicBlockToLoop(MiddleBlock, *LI);
2869   } else {
2870     LI->addTopLevelLoop(Lp);
2871   }
2872   Lp->addBasicBlockToLoop(VecBody, *LI);
2873 
2874   // Find the loop boundaries.
2875   Value *Count = getOrCreateTripCount(Lp);
2876 
2877   Value *StartIdx = ConstantInt::get(IdxTy, 0);
2878 
2879   // Now, compare the new count to zero. If it is zero skip the vector loop and
2880   // jump to the scalar loop. This check also covers the case where the
2881   // backedge-taken count is uint##_max: adding one to it will overflow leading
2882   // to an incorrect trip count of zero. In this (rare) case we will also jump
2883   // to the scalar loop.
2884   emitMinimumIterationCountCheck(Lp, ScalarPH);
2885 
2886   // Generate the code to check any assumptions that we've made for SCEV
2887   // expressions.
2888   emitSCEVChecks(Lp, ScalarPH);
2889 
2890   // Generate the code that checks in runtime if arrays overlap. We put the
2891   // checks into a separate block to make the more common case of few elements
2892   // faster.
2893   emitMemRuntimeChecks(Lp, ScalarPH);
2894 
2895   // Generate the induction variable.
2896   // The loop step is equal to the vectorization factor (num of SIMD elements)
2897   // times the unroll factor (num of SIMD instructions).
2898   Value *CountRoundDown = getOrCreateVectorTripCount(Lp);
2899   Constant *Step = ConstantInt::get(IdxTy, VF * UF);
2900   Induction =
2901       createInductionVariable(Lp, StartIdx, CountRoundDown, Step,
2902                               getDebugLocFromInstOrOperands(OldInduction));
2903 
2904   // We are going to resume the execution of the scalar loop.
2905   // Go over all of the induction variables that we found and fix the
2906   // PHIs that are left in the scalar version of the loop.
2907   // The starting values of PHI nodes depend on the counter of the last
2908   // iteration in the vectorized loop.
2909   // If we come from a bypass edge then we need to start from the original
2910   // start value.
2911 
2912   // This variable saves the new starting index for the scalar loop. It is used
2913   // to test if there are any tail iterations left once the vector loop has
2914   // completed.
2915   LoopVectorizationLegality::InductionList *List = Legal->getInductionVars();
2916   for (auto &InductionEntry : *List) {
2917     PHINode *OrigPhi = InductionEntry.first;
2918     InductionDescriptor II = InductionEntry.second;
2919 
2920     // Create phi nodes to merge from the  backedge-taken check block.
2921     PHINode *BCResumeVal = PHINode::Create(
2922         OrigPhi->getType(), 3, "bc.resume.val", ScalarPH->getTerminator());
2923     // Copy original phi DL over to the new one.
2924     BCResumeVal->setDebugLoc(OrigPhi->getDebugLoc());
2925     Value *&EndValue = IVEndValues[OrigPhi];
2926     if (OrigPhi == OldInduction) {
2927       // We know what the end value is.
2928       EndValue = CountRoundDown;
2929     } else {
2930       IRBuilder<> B(Lp->getLoopPreheader()->getTerminator());
2931       Type *StepType = II.getStep()->getType();
2932       Instruction::CastOps CastOp =
2933         CastInst::getCastOpcode(CountRoundDown, true, StepType, true);
2934       Value *CRD = B.CreateCast(CastOp, CountRoundDown, StepType, "cast.crd");
2935       const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout();
2936       EndValue = II.transform(B, CRD, PSE.getSE(), DL);
2937       EndValue->setName("ind.end");
2938     }
2939 
2940     // The new PHI merges the original incoming value, in case of a bypass,
2941     // or the value at the end of the vectorized loop.
2942     BCResumeVal->addIncoming(EndValue, MiddleBlock);
2943 
2944     // Fix the scalar body counter (PHI node).
2945     unsigned BlockIdx = OrigPhi->getBasicBlockIndex(ScalarPH);
2946 
2947     // The old induction's phi node in the scalar body needs the truncated
2948     // value.
2949     for (BasicBlock *BB : LoopBypassBlocks)
2950       BCResumeVal->addIncoming(II.getStartValue(), BB);
2951     OrigPhi->setIncomingValue(BlockIdx, BCResumeVal);
2952   }
2953 
2954   // Add a check in the middle block to see if we have completed
2955   // all of the iterations in the first vector loop.
2956   // If (N - N%VF) == N, then we *don't* need to run the remainder.
2957   Value *CmpN =
2958       CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ, Count,
2959                       CountRoundDown, "cmp.n", MiddleBlock->getTerminator());
2960   ReplaceInstWithInst(MiddleBlock->getTerminator(),
2961                       BranchInst::Create(ExitBlock, ScalarPH, CmpN));
2962 
2963   // Get ready to start creating new instructions into the vectorized body.
2964   Builder.SetInsertPoint(&*VecBody->getFirstInsertionPt());
2965 
2966   // Save the state.
2967   LoopVectorPreHeader = Lp->getLoopPreheader();
2968   LoopScalarPreHeader = ScalarPH;
2969   LoopMiddleBlock = MiddleBlock;
2970   LoopExitBlock = ExitBlock;
2971   LoopVectorBody = VecBody;
2972   LoopScalarBody = OldBasicBlock;
2973 
2974   // Keep all loop hints from the original loop on the vector loop (we'll
2975   // replace the vectorizer-specific hints below).
2976   if (MDNode *LID = OrigLoop->getLoopID())
2977     Lp->setLoopID(LID);
2978 
2979   LoopVectorizeHints Hints(Lp, true, *ORE);
2980   Hints.setAlreadyVectorized();
2981 
2982   return LoopVectorPreHeader;
2983 }
2984 
2985 // Fix up external users of the induction variable. At this point, we are
2986 // in LCSSA form, with all external PHIs that use the IV having one input value,
2987 // coming from the remainder loop. We need those PHIs to also have a correct
2988 // value for the IV when arriving directly from the middle block.
2989 void InnerLoopVectorizer::fixupIVUsers(PHINode *OrigPhi,
2990                                        const InductionDescriptor &II,
2991                                        Value *CountRoundDown, Value *EndValue,
2992                                        BasicBlock *MiddleBlock) {
2993   // There are two kinds of external IV usages - those that use the value
2994   // computed in the last iteration (the PHI) and those that use the penultimate
2995   // value (the value that feeds into the phi from the loop latch).
2996   // We allow both, but they, obviously, have different values.
2997 
2998   assert(OrigLoop->getExitBlock() && "Expected a single exit block");
2999 
3000   DenseMap<Value *, Value *> MissingVals;
3001 
3002   // An external user of the last iteration's value should see the value that
3003   // the remainder loop uses to initialize its own IV.
3004   Value *PostInc = OrigPhi->getIncomingValueForBlock(OrigLoop->getLoopLatch());
3005   for (User *U : PostInc->users()) {
3006     Instruction *UI = cast<Instruction>(U);
3007     if (!OrigLoop->contains(UI)) {
3008       assert(isa<PHINode>(UI) && "Expected LCSSA form");
3009       MissingVals[UI] = EndValue;
3010     }
3011   }
3012 
3013   // An external user of the penultimate value need to see EndValue - Step.
3014   // The simplest way to get this is to recompute it from the constituent SCEVs,
3015   // that is Start + (Step * (CRD - 1)).
3016   for (User *U : OrigPhi->users()) {
3017     auto *UI = cast<Instruction>(U);
3018     if (!OrigLoop->contains(UI)) {
3019       const DataLayout &DL =
3020           OrigLoop->getHeader()->getModule()->getDataLayout();
3021       assert(isa<PHINode>(UI) && "Expected LCSSA form");
3022 
3023       IRBuilder<> B(MiddleBlock->getTerminator());
3024       Value *CountMinusOne = B.CreateSub(
3025           CountRoundDown, ConstantInt::get(CountRoundDown->getType(), 1));
3026       Value *CMO =
3027           !II.getStep()->getType()->isIntegerTy()
3028               ? B.CreateCast(Instruction::SIToFP, CountMinusOne,
3029                              II.getStep()->getType())
3030               : B.CreateSExtOrTrunc(CountMinusOne, II.getStep()->getType());
3031       CMO->setName("cast.cmo");
3032       Value *Escape = II.transform(B, CMO, PSE.getSE(), DL);
3033       Escape->setName("ind.escape");
3034       MissingVals[UI] = Escape;
3035     }
3036   }
3037 
3038   for (auto &I : MissingVals) {
3039     PHINode *PHI = cast<PHINode>(I.first);
3040     // One corner case we have to handle is two IVs "chasing" each-other,
3041     // that is %IV2 = phi [...], [ %IV1, %latch ]
3042     // In this case, if IV1 has an external use, we need to avoid adding both
3043     // "last value of IV1" and "penultimate value of IV2". So, verify that we
3044     // don't already have an incoming value for the middle block.
3045     if (PHI->getBasicBlockIndex(MiddleBlock) == -1)
3046       PHI->addIncoming(I.second, MiddleBlock);
3047   }
3048 }
3049 
3050 namespace {
3051 
3052 struct CSEDenseMapInfo {
3053   static bool canHandle(const Instruction *I) {
3054     return isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) ||
3055            isa<ShuffleVectorInst>(I) || isa<GetElementPtrInst>(I);
3056   }
3057 
3058   static inline Instruction *getEmptyKey() {
3059     return DenseMapInfo<Instruction *>::getEmptyKey();
3060   }
3061 
3062   static inline Instruction *getTombstoneKey() {
3063     return DenseMapInfo<Instruction *>::getTombstoneKey();
3064   }
3065 
3066   static unsigned getHashValue(const Instruction *I) {
3067     assert(canHandle(I) && "Unknown instruction!");
3068     return hash_combine(I->getOpcode(), hash_combine_range(I->value_op_begin(),
3069                                                            I->value_op_end()));
3070   }
3071 
3072   static bool isEqual(const Instruction *LHS, const Instruction *RHS) {
3073     if (LHS == getEmptyKey() || RHS == getEmptyKey() ||
3074         LHS == getTombstoneKey() || RHS == getTombstoneKey())
3075       return LHS == RHS;
3076     return LHS->isIdenticalTo(RHS);
3077   }
3078 };
3079 
3080 } // end anonymous namespace
3081 
3082 ///Perform cse of induction variable instructions.
3083 static void cse(BasicBlock *BB) {
3084   // Perform simple cse.
3085   SmallDenseMap<Instruction *, Instruction *, 4, CSEDenseMapInfo> CSEMap;
3086   for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) {
3087     Instruction *In = &*I++;
3088 
3089     if (!CSEDenseMapInfo::canHandle(In))
3090       continue;
3091 
3092     // Check if we can replace this instruction with any of the
3093     // visited instructions.
3094     if (Instruction *V = CSEMap.lookup(In)) {
3095       In->replaceAllUsesWith(V);
3096       In->eraseFromParent();
3097       continue;
3098     }
3099 
3100     CSEMap[In] = In;
3101   }
3102 }
3103 
3104 /// Estimate the overhead of scalarizing an instruction. This is a
3105 /// convenience wrapper for the type-based getScalarizationOverhead API.
3106 static unsigned getScalarizationOverhead(Instruction *I, unsigned VF,
3107                                          const TargetTransformInfo &TTI) {
3108   if (VF == 1)
3109     return 0;
3110 
3111   unsigned Cost = 0;
3112   Type *RetTy = ToVectorTy(I->getType(), VF);
3113   if (!RetTy->isVoidTy() &&
3114       (!isa<LoadInst>(I) ||
3115        !TTI.supportsEfficientVectorElementLoadStore()))
3116     Cost += TTI.getScalarizationOverhead(RetTy, true, false);
3117 
3118   if (CallInst *CI = dyn_cast<CallInst>(I)) {
3119     SmallVector<const Value *, 4> Operands(CI->arg_operands());
3120     Cost += TTI.getOperandsScalarizationOverhead(Operands, VF);
3121   }
3122   else if (!isa<StoreInst>(I) ||
3123            !TTI.supportsEfficientVectorElementLoadStore()) {
3124     SmallVector<const Value *, 4> Operands(I->operand_values());
3125     Cost += TTI.getOperandsScalarizationOverhead(Operands, VF);
3126   }
3127 
3128   return Cost;
3129 }
3130 
3131 // Estimate cost of a call instruction CI if it were vectorized with factor VF.
3132 // Return the cost of the instruction, including scalarization overhead if it's
3133 // needed. The flag NeedToScalarize shows if the call needs to be scalarized -
3134 // i.e. either vector version isn't available, or is too expensive.
3135 static unsigned getVectorCallCost(CallInst *CI, unsigned VF,
3136                                   const TargetTransformInfo &TTI,
3137                                   const TargetLibraryInfo *TLI,
3138                                   bool &NeedToScalarize) {
3139   Function *F = CI->getCalledFunction();
3140   StringRef FnName = CI->getCalledFunction()->getName();
3141   Type *ScalarRetTy = CI->getType();
3142   SmallVector<Type *, 4> Tys, ScalarTys;
3143   for (auto &ArgOp : CI->arg_operands())
3144     ScalarTys.push_back(ArgOp->getType());
3145 
3146   // Estimate cost of scalarized vector call. The source operands are assumed
3147   // to be vectors, so we need to extract individual elements from there,
3148   // execute VF scalar calls, and then gather the result into the vector return
3149   // value.
3150   unsigned ScalarCallCost = TTI.getCallInstrCost(F, ScalarRetTy, ScalarTys);
3151   if (VF == 1)
3152     return ScalarCallCost;
3153 
3154   // Compute corresponding vector type for return value and arguments.
3155   Type *RetTy = ToVectorTy(ScalarRetTy, VF);
3156   for (Type *ScalarTy : ScalarTys)
3157     Tys.push_back(ToVectorTy(ScalarTy, VF));
3158 
3159   // Compute costs of unpacking argument values for the scalar calls and
3160   // packing the return values to a vector.
3161   unsigned ScalarizationCost = getScalarizationOverhead(CI, VF, TTI);
3162 
3163   unsigned Cost = ScalarCallCost * VF + ScalarizationCost;
3164 
3165   // If we can't emit a vector call for this function, then the currently found
3166   // cost is the cost we need to return.
3167   NeedToScalarize = true;
3168   if (!TLI || !TLI->isFunctionVectorizable(FnName, VF) || CI->isNoBuiltin())
3169     return Cost;
3170 
3171   // If the corresponding vector cost is cheaper, return its cost.
3172   unsigned VectorCallCost = TTI.getCallInstrCost(nullptr, RetTy, Tys);
3173   if (VectorCallCost < Cost) {
3174     NeedToScalarize = false;
3175     return VectorCallCost;
3176   }
3177   return Cost;
3178 }
3179 
3180 // Estimate cost of an intrinsic call instruction CI if it were vectorized with
3181 // factor VF.  Return the cost of the instruction, including scalarization
3182 // overhead if it's needed.
3183 static unsigned getVectorIntrinsicCost(CallInst *CI, unsigned VF,
3184                                        const TargetTransformInfo &TTI,
3185                                        const TargetLibraryInfo *TLI) {
3186   Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
3187   assert(ID && "Expected intrinsic call!");
3188 
3189   FastMathFlags FMF;
3190   if (auto *FPMO = dyn_cast<FPMathOperator>(CI))
3191     FMF = FPMO->getFastMathFlags();
3192 
3193   SmallVector<Value *, 4> Operands(CI->arg_operands());
3194   return TTI.getIntrinsicInstrCost(ID, CI->getType(), Operands, FMF, VF);
3195 }
3196 
3197 static Type *smallestIntegerVectorType(Type *T1, Type *T2) {
3198   auto *I1 = cast<IntegerType>(T1->getVectorElementType());
3199   auto *I2 = cast<IntegerType>(T2->getVectorElementType());
3200   return I1->getBitWidth() < I2->getBitWidth() ? T1 : T2;
3201 }
3202 static Type *largestIntegerVectorType(Type *T1, Type *T2) {
3203   auto *I1 = cast<IntegerType>(T1->getVectorElementType());
3204   auto *I2 = cast<IntegerType>(T2->getVectorElementType());
3205   return I1->getBitWidth() > I2->getBitWidth() ? T1 : T2;
3206 }
3207 
3208 void InnerLoopVectorizer::truncateToMinimalBitwidths() {
3209   // For every instruction `I` in MinBWs, truncate the operands, create a
3210   // truncated version of `I` and reextend its result. InstCombine runs
3211   // later and will remove any ext/trunc pairs.
3212   SmallPtrSet<Value *, 4> Erased;
3213   for (const auto &KV : Cost->getMinimalBitwidths()) {
3214     // If the value wasn't vectorized, we must maintain the original scalar
3215     // type. The absence of the value from VectorLoopValueMap indicates that it
3216     // wasn't vectorized.
3217     if (!VectorLoopValueMap.hasAnyVectorValue(KV.first))
3218       continue;
3219     for (unsigned Part = 0; Part < UF; ++Part) {
3220       Value *I = getOrCreateVectorValue(KV.first, Part);
3221       if (Erased.find(I) != Erased.end() || I->use_empty() ||
3222           !isa<Instruction>(I))
3223         continue;
3224       Type *OriginalTy = I->getType();
3225       Type *ScalarTruncatedTy =
3226           IntegerType::get(OriginalTy->getContext(), KV.second);
3227       Type *TruncatedTy = VectorType::get(ScalarTruncatedTy,
3228                                           OriginalTy->getVectorNumElements());
3229       if (TruncatedTy == OriginalTy)
3230         continue;
3231 
3232       IRBuilder<> B(cast<Instruction>(I));
3233       auto ShrinkOperand = [&](Value *V) -> Value * {
3234         if (auto *ZI = dyn_cast<ZExtInst>(V))
3235           if (ZI->getSrcTy() == TruncatedTy)
3236             return ZI->getOperand(0);
3237         return B.CreateZExtOrTrunc(V, TruncatedTy);
3238       };
3239 
3240       // The actual instruction modification depends on the instruction type,
3241       // unfortunately.
3242       Value *NewI = nullptr;
3243       if (auto *BO = dyn_cast<BinaryOperator>(I)) {
3244         NewI = B.CreateBinOp(BO->getOpcode(), ShrinkOperand(BO->getOperand(0)),
3245                              ShrinkOperand(BO->getOperand(1)));
3246 
3247         // Any wrapping introduced by shrinking this operation shouldn't be
3248         // considered undefined behavior. So, we can't unconditionally copy
3249         // arithmetic wrapping flags to NewI.
3250         cast<BinaryOperator>(NewI)->copyIRFlags(I, /*IncludeWrapFlags=*/false);
3251       } else if (auto *CI = dyn_cast<ICmpInst>(I)) {
3252         NewI =
3253             B.CreateICmp(CI->getPredicate(), ShrinkOperand(CI->getOperand(0)),
3254                          ShrinkOperand(CI->getOperand(1)));
3255       } else if (auto *SI = dyn_cast<SelectInst>(I)) {
3256         NewI = B.CreateSelect(SI->getCondition(),
3257                               ShrinkOperand(SI->getTrueValue()),
3258                               ShrinkOperand(SI->getFalseValue()));
3259       } else if (auto *CI = dyn_cast<CastInst>(I)) {
3260         switch (CI->getOpcode()) {
3261         default:
3262           llvm_unreachable("Unhandled cast!");
3263         case Instruction::Trunc:
3264           NewI = ShrinkOperand(CI->getOperand(0));
3265           break;
3266         case Instruction::SExt:
3267           NewI = B.CreateSExtOrTrunc(
3268               CI->getOperand(0),
3269               smallestIntegerVectorType(OriginalTy, TruncatedTy));
3270           break;
3271         case Instruction::ZExt:
3272           NewI = B.CreateZExtOrTrunc(
3273               CI->getOperand(0),
3274               smallestIntegerVectorType(OriginalTy, TruncatedTy));
3275           break;
3276         }
3277       } else if (auto *SI = dyn_cast<ShuffleVectorInst>(I)) {
3278         auto Elements0 = SI->getOperand(0)->getType()->getVectorNumElements();
3279         auto *O0 = B.CreateZExtOrTrunc(
3280             SI->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements0));
3281         auto Elements1 = SI->getOperand(1)->getType()->getVectorNumElements();
3282         auto *O1 = B.CreateZExtOrTrunc(
3283             SI->getOperand(1), VectorType::get(ScalarTruncatedTy, Elements1));
3284 
3285         NewI = B.CreateShuffleVector(O0, O1, SI->getMask());
3286       } else if (isa<LoadInst>(I) || isa<PHINode>(I)) {
3287         // Don't do anything with the operands, just extend the result.
3288         continue;
3289       } else if (auto *IE = dyn_cast<InsertElementInst>(I)) {
3290         auto Elements = IE->getOperand(0)->getType()->getVectorNumElements();
3291         auto *O0 = B.CreateZExtOrTrunc(
3292             IE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements));
3293         auto *O1 = B.CreateZExtOrTrunc(IE->getOperand(1), ScalarTruncatedTy);
3294         NewI = B.CreateInsertElement(O0, O1, IE->getOperand(2));
3295       } else if (auto *EE = dyn_cast<ExtractElementInst>(I)) {
3296         auto Elements = EE->getOperand(0)->getType()->getVectorNumElements();
3297         auto *O0 = B.CreateZExtOrTrunc(
3298             EE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements));
3299         NewI = B.CreateExtractElement(O0, EE->getOperand(2));
3300       } else {
3301         // If we don't know what to do, be conservative and don't do anything.
3302         continue;
3303       }
3304 
3305       // Lastly, extend the result.
3306       NewI->takeName(cast<Instruction>(I));
3307       Value *Res = B.CreateZExtOrTrunc(NewI, OriginalTy);
3308       I->replaceAllUsesWith(Res);
3309       cast<Instruction>(I)->eraseFromParent();
3310       Erased.insert(I);
3311       VectorLoopValueMap.resetVectorValue(KV.first, Part, Res);
3312     }
3313   }
3314 
3315   // We'll have created a bunch of ZExts that are now parentless. Clean up.
3316   for (const auto &KV : Cost->getMinimalBitwidths()) {
3317     // If the value wasn't vectorized, we must maintain the original scalar
3318     // type. The absence of the value from VectorLoopValueMap indicates that it
3319     // wasn't vectorized.
3320     if (!VectorLoopValueMap.hasAnyVectorValue(KV.first))
3321       continue;
3322     for (unsigned Part = 0; Part < UF; ++Part) {
3323       Value *I = getOrCreateVectorValue(KV.first, Part);
3324       ZExtInst *Inst = dyn_cast<ZExtInst>(I);
3325       if (Inst && Inst->use_empty()) {
3326         Value *NewI = Inst->getOperand(0);
3327         Inst->eraseFromParent();
3328         VectorLoopValueMap.resetVectorValue(KV.first, Part, NewI);
3329       }
3330     }
3331   }
3332 }
3333 
3334 void InnerLoopVectorizer::fixVectorizedLoop() {
3335   // Insert truncates and extends for any truncated instructions as hints to
3336   // InstCombine.
3337   if (VF > 1)
3338     truncateToMinimalBitwidths();
3339 
3340   // At this point every instruction in the original loop is widened to a
3341   // vector form. Now we need to fix the recurrences in the loop. These PHI
3342   // nodes are currently empty because we did not want to introduce cycles.
3343   // This is the second stage of vectorizing recurrences.
3344   fixCrossIterationPHIs();
3345 
3346   // Update the dominator tree.
3347   //
3348   // FIXME: After creating the structure of the new loop, the dominator tree is
3349   //        no longer up-to-date, and it remains that way until we update it
3350   //        here. An out-of-date dominator tree is problematic for SCEV,
3351   //        because SCEVExpander uses it to guide code generation. The
3352   //        vectorizer use SCEVExpanders in several places. Instead, we should
3353   //        keep the dominator tree up-to-date as we go.
3354   updateAnalysis();
3355 
3356   // Fix-up external users of the induction variables.
3357   for (auto &Entry : *Legal->getInductionVars())
3358     fixupIVUsers(Entry.first, Entry.second,
3359                  getOrCreateVectorTripCount(LI->getLoopFor(LoopVectorBody)),
3360                  IVEndValues[Entry.first], LoopMiddleBlock);
3361 
3362   fixLCSSAPHIs();
3363   for (Instruction *PI : PredicatedInstructions)
3364     sinkScalarOperands(&*PI);
3365 
3366   // Remove redundant induction instructions.
3367   cse(LoopVectorBody);
3368 }
3369 
3370 void InnerLoopVectorizer::fixCrossIterationPHIs() {
3371   // In order to support recurrences we need to be able to vectorize Phi nodes.
3372   // Phi nodes have cycles, so we need to vectorize them in two stages. This is
3373   // stage #2: We now need to fix the recurrences by adding incoming edges to
3374   // the currently empty PHI nodes. At this point every instruction in the
3375   // original loop is widened to a vector form so we can use them to construct
3376   // the incoming edges.
3377   for (PHINode &Phi : OrigLoop->getHeader()->phis()) {
3378     // Handle first-order recurrences and reductions that need to be fixed.
3379     if (Legal->isFirstOrderRecurrence(&Phi))
3380       fixFirstOrderRecurrence(&Phi);
3381     else if (Legal->isReductionVariable(&Phi))
3382       fixReduction(&Phi);
3383   }
3384 }
3385 
3386 void InnerLoopVectorizer::fixFirstOrderRecurrence(PHINode *Phi) {
3387   // This is the second phase of vectorizing first-order recurrences. An
3388   // overview of the transformation is described below. Suppose we have the
3389   // following loop.
3390   //
3391   //   for (int i = 0; i < n; ++i)
3392   //     b[i] = a[i] - a[i - 1];
3393   //
3394   // There is a first-order recurrence on "a". For this loop, the shorthand
3395   // scalar IR looks like:
3396   //
3397   //   scalar.ph:
3398   //     s_init = a[-1]
3399   //     br scalar.body
3400   //
3401   //   scalar.body:
3402   //     i = phi [0, scalar.ph], [i+1, scalar.body]
3403   //     s1 = phi [s_init, scalar.ph], [s2, scalar.body]
3404   //     s2 = a[i]
3405   //     b[i] = s2 - s1
3406   //     br cond, scalar.body, ...
3407   //
3408   // In this example, s1 is a recurrence because it's value depends on the
3409   // previous iteration. In the first phase of vectorization, we created a
3410   // temporary value for s1. We now complete the vectorization and produce the
3411   // shorthand vector IR shown below (for VF = 4, UF = 1).
3412   //
3413   //   vector.ph:
3414   //     v_init = vector(..., ..., ..., a[-1])
3415   //     br vector.body
3416   //
3417   //   vector.body
3418   //     i = phi [0, vector.ph], [i+4, vector.body]
3419   //     v1 = phi [v_init, vector.ph], [v2, vector.body]
3420   //     v2 = a[i, i+1, i+2, i+3];
3421   //     v3 = vector(v1(3), v2(0, 1, 2))
3422   //     b[i, i+1, i+2, i+3] = v2 - v3
3423   //     br cond, vector.body, middle.block
3424   //
3425   //   middle.block:
3426   //     x = v2(3)
3427   //     br scalar.ph
3428   //
3429   //   scalar.ph:
3430   //     s_init = phi [x, middle.block], [a[-1], otherwise]
3431   //     br scalar.body
3432   //
3433   // After execution completes the vector loop, we extract the next value of
3434   // the recurrence (x) to use as the initial value in the scalar loop.
3435 
3436   // Get the original loop preheader and single loop latch.
3437   auto *Preheader = OrigLoop->getLoopPreheader();
3438   auto *Latch = OrigLoop->getLoopLatch();
3439 
3440   // Get the initial and previous values of the scalar recurrence.
3441   auto *ScalarInit = Phi->getIncomingValueForBlock(Preheader);
3442   auto *Previous = Phi->getIncomingValueForBlock(Latch);
3443 
3444   // Create a vector from the initial value.
3445   auto *VectorInit = ScalarInit;
3446   if (VF > 1) {
3447     Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
3448     VectorInit = Builder.CreateInsertElement(
3449         UndefValue::get(VectorType::get(VectorInit->getType(), VF)), VectorInit,
3450         Builder.getInt32(VF - 1), "vector.recur.init");
3451   }
3452 
3453   // We constructed a temporary phi node in the first phase of vectorization.
3454   // This phi node will eventually be deleted.
3455   Builder.SetInsertPoint(
3456       cast<Instruction>(VectorLoopValueMap.getVectorValue(Phi, 0)));
3457 
3458   // Create a phi node for the new recurrence. The current value will either be
3459   // the initial value inserted into a vector or loop-varying vector value.
3460   auto *VecPhi = Builder.CreatePHI(VectorInit->getType(), 2, "vector.recur");
3461   VecPhi->addIncoming(VectorInit, LoopVectorPreHeader);
3462 
3463   // Get the vectorized previous value of the last part UF - 1. It appears last
3464   // among all unrolled iterations, due to the order of their construction.
3465   Value *PreviousLastPart = getOrCreateVectorValue(Previous, UF - 1);
3466 
3467   // Set the insertion point after the previous value if it is an instruction.
3468   // Note that the previous value may have been constant-folded so it is not
3469   // guaranteed to be an instruction in the vector loop. Also, if the previous
3470   // value is a phi node, we should insert after all the phi nodes to avoid
3471   // breaking basic block verification.
3472   if (LI->getLoopFor(LoopVectorBody)->isLoopInvariant(PreviousLastPart) ||
3473       isa<PHINode>(PreviousLastPart))
3474     Builder.SetInsertPoint(&*LoopVectorBody->getFirstInsertionPt());
3475   else
3476     Builder.SetInsertPoint(
3477         &*++BasicBlock::iterator(cast<Instruction>(PreviousLastPart)));
3478 
3479   // We will construct a vector for the recurrence by combining the values for
3480   // the current and previous iterations. This is the required shuffle mask.
3481   SmallVector<Constant *, 8> ShuffleMask(VF);
3482   ShuffleMask[0] = Builder.getInt32(VF - 1);
3483   for (unsigned I = 1; I < VF; ++I)
3484     ShuffleMask[I] = Builder.getInt32(I + VF - 1);
3485 
3486   // The vector from which to take the initial value for the current iteration
3487   // (actual or unrolled). Initially, this is the vector phi node.
3488   Value *Incoming = VecPhi;
3489 
3490   // Shuffle the current and previous vector and update the vector parts.
3491   for (unsigned Part = 0; Part < UF; ++Part) {
3492     Value *PreviousPart = getOrCreateVectorValue(Previous, Part);
3493     Value *PhiPart = VectorLoopValueMap.getVectorValue(Phi, Part);
3494     auto *Shuffle =
3495         VF > 1 ? Builder.CreateShuffleVector(Incoming, PreviousPart,
3496                                              ConstantVector::get(ShuffleMask))
3497                : Incoming;
3498     PhiPart->replaceAllUsesWith(Shuffle);
3499     cast<Instruction>(PhiPart)->eraseFromParent();
3500     VectorLoopValueMap.resetVectorValue(Phi, Part, Shuffle);
3501     Incoming = PreviousPart;
3502   }
3503 
3504   // Fix the latch value of the new recurrence in the vector loop.
3505   VecPhi->addIncoming(Incoming, LI->getLoopFor(LoopVectorBody)->getLoopLatch());
3506 
3507   // Extract the last vector element in the middle block. This will be the
3508   // initial value for the recurrence when jumping to the scalar loop.
3509   auto *ExtractForScalar = Incoming;
3510   if (VF > 1) {
3511     Builder.SetInsertPoint(LoopMiddleBlock->getTerminator());
3512     ExtractForScalar = Builder.CreateExtractElement(
3513         ExtractForScalar, Builder.getInt32(VF - 1), "vector.recur.extract");
3514   }
3515   // Extract the second last element in the middle block if the
3516   // Phi is used outside the loop. We need to extract the phi itself
3517   // and not the last element (the phi update in the current iteration). This
3518   // will be the value when jumping to the exit block from the LoopMiddleBlock,
3519   // when the scalar loop is not run at all.
3520   Value *ExtractForPhiUsedOutsideLoop = nullptr;
3521   if (VF > 1)
3522     ExtractForPhiUsedOutsideLoop = Builder.CreateExtractElement(
3523         Incoming, Builder.getInt32(VF - 2), "vector.recur.extract.for.phi");
3524   // When loop is unrolled without vectorizing, initialize
3525   // ExtractForPhiUsedOutsideLoop with the value just prior to unrolled value of
3526   // `Incoming`. This is analogous to the vectorized case above: extracting the
3527   // second last element when VF > 1.
3528   else if (UF > 1)
3529     ExtractForPhiUsedOutsideLoop = getOrCreateVectorValue(Previous, UF - 2);
3530 
3531   // Fix the initial value of the original recurrence in the scalar loop.
3532   Builder.SetInsertPoint(&*LoopScalarPreHeader->begin());
3533   auto *Start = Builder.CreatePHI(Phi->getType(), 2, "scalar.recur.init");
3534   for (auto *BB : predecessors(LoopScalarPreHeader)) {
3535     auto *Incoming = BB == LoopMiddleBlock ? ExtractForScalar : ScalarInit;
3536     Start->addIncoming(Incoming, BB);
3537   }
3538 
3539   Phi->setIncomingValue(Phi->getBasicBlockIndex(LoopScalarPreHeader), Start);
3540   Phi->setName("scalar.recur");
3541 
3542   // Finally, fix users of the recurrence outside the loop. The users will need
3543   // either the last value of the scalar recurrence or the last value of the
3544   // vector recurrence we extracted in the middle block. Since the loop is in
3545   // LCSSA form, we just need to find all the phi nodes for the original scalar
3546   // recurrence in the exit block, and then add an edge for the middle block.
3547   for (PHINode &LCSSAPhi : LoopExitBlock->phis()) {
3548     if (LCSSAPhi.getIncomingValue(0) == Phi) {
3549       LCSSAPhi.addIncoming(ExtractForPhiUsedOutsideLoop, LoopMiddleBlock);
3550     }
3551   }
3552 }
3553 
3554 void InnerLoopVectorizer::fixReduction(PHINode *Phi) {
3555   Constant *Zero = Builder.getInt32(0);
3556 
3557   // Get it's reduction variable descriptor.
3558   assert(Legal->isReductionVariable(Phi) &&
3559          "Unable to find the reduction variable");
3560   RecurrenceDescriptor RdxDesc = (*Legal->getReductionVars())[Phi];
3561 
3562   RecurrenceDescriptor::RecurrenceKind RK = RdxDesc.getRecurrenceKind();
3563   TrackingVH<Value> ReductionStartValue = RdxDesc.getRecurrenceStartValue();
3564   Instruction *LoopExitInst = RdxDesc.getLoopExitInstr();
3565   RecurrenceDescriptor::MinMaxRecurrenceKind MinMaxKind =
3566     RdxDesc.getMinMaxRecurrenceKind();
3567   setDebugLocFromInst(Builder, ReductionStartValue);
3568 
3569   // We need to generate a reduction vector from the incoming scalar.
3570   // To do so, we need to generate the 'identity' vector and override
3571   // one of the elements with the incoming scalar reduction. We need
3572   // to do it in the vector-loop preheader.
3573   Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
3574 
3575   // This is the vector-clone of the value that leaves the loop.
3576   Type *VecTy = getOrCreateVectorValue(LoopExitInst, 0)->getType();
3577 
3578   // Find the reduction identity variable. Zero for addition, or, xor,
3579   // one for multiplication, -1 for And.
3580   Value *Identity;
3581   Value *VectorStart;
3582   if (RK == RecurrenceDescriptor::RK_IntegerMinMax ||
3583       RK == RecurrenceDescriptor::RK_FloatMinMax) {
3584     // MinMax reduction have the start value as their identify.
3585     if (VF == 1) {
3586       VectorStart = Identity = ReductionStartValue;
3587     } else {
3588       VectorStart = Identity =
3589         Builder.CreateVectorSplat(VF, ReductionStartValue, "minmax.ident");
3590     }
3591   } else {
3592     // Handle other reduction kinds:
3593     Constant *Iden = RecurrenceDescriptor::getRecurrenceIdentity(
3594         RK, VecTy->getScalarType());
3595     if (VF == 1) {
3596       Identity = Iden;
3597       // This vector is the Identity vector where the first element is the
3598       // incoming scalar reduction.
3599       VectorStart = ReductionStartValue;
3600     } else {
3601       Identity = ConstantVector::getSplat(VF, Iden);
3602 
3603       // This vector is the Identity vector where the first element is the
3604       // incoming scalar reduction.
3605       VectorStart =
3606         Builder.CreateInsertElement(Identity, ReductionStartValue, Zero);
3607     }
3608   }
3609 
3610   // Fix the vector-loop phi.
3611 
3612   // Reductions do not have to start at zero. They can start with
3613   // any loop invariant values.
3614   BasicBlock *Latch = OrigLoop->getLoopLatch();
3615   Value *LoopVal = Phi->getIncomingValueForBlock(Latch);
3616   for (unsigned Part = 0; Part < UF; ++Part) {
3617     Value *VecRdxPhi = getOrCreateVectorValue(Phi, Part);
3618     Value *Val = getOrCreateVectorValue(LoopVal, Part);
3619     // Make sure to add the reduction stat value only to the
3620     // first unroll part.
3621     Value *StartVal = (Part == 0) ? VectorStart : Identity;
3622     cast<PHINode>(VecRdxPhi)->addIncoming(StartVal, LoopVectorPreHeader);
3623     cast<PHINode>(VecRdxPhi)
3624       ->addIncoming(Val, LI->getLoopFor(LoopVectorBody)->getLoopLatch());
3625   }
3626 
3627   // Before each round, move the insertion point right between
3628   // the PHIs and the values we are going to write.
3629   // This allows us to write both PHINodes and the extractelement
3630   // instructions.
3631   Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt());
3632 
3633   setDebugLocFromInst(Builder, LoopExitInst);
3634 
3635   // If the vector reduction can be performed in a smaller type, we truncate
3636   // then extend the loop exit value to enable InstCombine to evaluate the
3637   // entire expression in the smaller type.
3638   if (VF > 1 && Phi->getType() != RdxDesc.getRecurrenceType()) {
3639     Type *RdxVecTy = VectorType::get(RdxDesc.getRecurrenceType(), VF);
3640     Builder.SetInsertPoint(
3641         LI->getLoopFor(LoopVectorBody)->getLoopLatch()->getTerminator());
3642     VectorParts RdxParts(UF);
3643     for (unsigned Part = 0; Part < UF; ++Part) {
3644       RdxParts[Part] = VectorLoopValueMap.getVectorValue(LoopExitInst, Part);
3645       Value *Trunc = Builder.CreateTrunc(RdxParts[Part], RdxVecTy);
3646       Value *Extnd = RdxDesc.isSigned() ? Builder.CreateSExt(Trunc, VecTy)
3647                                         : Builder.CreateZExt(Trunc, VecTy);
3648       for (Value::user_iterator UI = RdxParts[Part]->user_begin();
3649            UI != RdxParts[Part]->user_end();)
3650         if (*UI != Trunc) {
3651           (*UI++)->replaceUsesOfWith(RdxParts[Part], Extnd);
3652           RdxParts[Part] = Extnd;
3653         } else {
3654           ++UI;
3655         }
3656     }
3657     Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt());
3658     for (unsigned Part = 0; Part < UF; ++Part) {
3659       RdxParts[Part] = Builder.CreateTrunc(RdxParts[Part], RdxVecTy);
3660       VectorLoopValueMap.resetVectorValue(LoopExitInst, Part, RdxParts[Part]);
3661     }
3662   }
3663 
3664   // Reduce all of the unrolled parts into a single vector.
3665   Value *ReducedPartRdx = VectorLoopValueMap.getVectorValue(LoopExitInst, 0);
3666   unsigned Op = RecurrenceDescriptor::getRecurrenceBinOp(RK);
3667   setDebugLocFromInst(Builder, ReducedPartRdx);
3668   for (unsigned Part = 1; Part < UF; ++Part) {
3669     Value *RdxPart = VectorLoopValueMap.getVectorValue(LoopExitInst, Part);
3670     if (Op != Instruction::ICmp && Op != Instruction::FCmp)
3671       // Floating point operations had to be 'fast' to enable the reduction.
3672       ReducedPartRdx = addFastMathFlag(
3673           Builder.CreateBinOp((Instruction::BinaryOps)Op, RdxPart,
3674                               ReducedPartRdx, "bin.rdx"));
3675     else
3676       ReducedPartRdx = RecurrenceDescriptor::createMinMaxOp(
3677           Builder, MinMaxKind, ReducedPartRdx, RdxPart);
3678   }
3679 
3680   if (VF > 1) {
3681     bool NoNaN = Legal->hasFunNoNaNAttr();
3682     ReducedPartRdx =
3683         createTargetReduction(Builder, TTI, RdxDesc, ReducedPartRdx, NoNaN);
3684     // If the reduction can be performed in a smaller type, we need to extend
3685     // the reduction to the wider type before we branch to the original loop.
3686     if (Phi->getType() != RdxDesc.getRecurrenceType())
3687       ReducedPartRdx =
3688         RdxDesc.isSigned()
3689         ? Builder.CreateSExt(ReducedPartRdx, Phi->getType())
3690         : Builder.CreateZExt(ReducedPartRdx, Phi->getType());
3691   }
3692 
3693   // Create a phi node that merges control-flow from the backedge-taken check
3694   // block and the middle block.
3695   PHINode *BCBlockPhi = PHINode::Create(Phi->getType(), 2, "bc.merge.rdx",
3696                                         LoopScalarPreHeader->getTerminator());
3697   for (unsigned I = 0, E = LoopBypassBlocks.size(); I != E; ++I)
3698     BCBlockPhi->addIncoming(ReductionStartValue, LoopBypassBlocks[I]);
3699   BCBlockPhi->addIncoming(ReducedPartRdx, LoopMiddleBlock);
3700 
3701   // Now, we need to fix the users of the reduction variable
3702   // inside and outside of the scalar remainder loop.
3703   // We know that the loop is in LCSSA form. We need to update the
3704   // PHI nodes in the exit blocks.
3705   for (PHINode &LCSSAPhi : LoopExitBlock->phis()) {
3706     // All PHINodes need to have a single entry edge, or two if
3707     // we already fixed them.
3708     assert(LCSSAPhi.getNumIncomingValues() < 3 && "Invalid LCSSA PHI");
3709 
3710     // We found a reduction value exit-PHI. Update it with the
3711     // incoming bypass edge.
3712     if (LCSSAPhi.getIncomingValue(0) == LoopExitInst)
3713       LCSSAPhi.addIncoming(ReducedPartRdx, LoopMiddleBlock);
3714   } // end of the LCSSA phi scan.
3715 
3716     // Fix the scalar loop reduction variable with the incoming reduction sum
3717     // from the vector body and from the backedge value.
3718   int IncomingEdgeBlockIdx =
3719     Phi->getBasicBlockIndex(OrigLoop->getLoopLatch());
3720   assert(IncomingEdgeBlockIdx >= 0 && "Invalid block index");
3721   // Pick the other block.
3722   int SelfEdgeBlockIdx = (IncomingEdgeBlockIdx ? 0 : 1);
3723   Phi->setIncomingValue(SelfEdgeBlockIdx, BCBlockPhi);
3724   Phi->setIncomingValue(IncomingEdgeBlockIdx, LoopExitInst);
3725 }
3726 
3727 void InnerLoopVectorizer::fixLCSSAPHIs() {
3728   for (PHINode &LCSSAPhi : LoopExitBlock->phis()) {
3729     if (LCSSAPhi.getNumIncomingValues() == 1) {
3730       auto *IncomingValue = LCSSAPhi.getIncomingValue(0);
3731       // Non-instruction incoming values will have only one value.
3732       unsigned LastLane = 0;
3733       if (isa<Instruction>(IncomingValue))
3734           LastLane = Cost->isUniformAfterVectorization(
3735                          cast<Instruction>(IncomingValue), VF)
3736                          ? 0
3737                          : VF - 1;
3738       // Can be a loop invariant incoming value or the last scalar value to be
3739       // extracted from the vectorized loop.
3740       Builder.SetInsertPoint(LoopMiddleBlock->getTerminator());
3741       Value *lastIncomingValue =
3742           getOrCreateScalarValue(IncomingValue, { UF - 1, LastLane });
3743       LCSSAPhi.addIncoming(lastIncomingValue, LoopMiddleBlock);
3744     }
3745   }
3746 }
3747 
3748 void InnerLoopVectorizer::sinkScalarOperands(Instruction *PredInst) {
3749   // The basic block and loop containing the predicated instruction.
3750   auto *PredBB = PredInst->getParent();
3751   auto *VectorLoop = LI->getLoopFor(PredBB);
3752 
3753   // Initialize a worklist with the operands of the predicated instruction.
3754   SetVector<Value *> Worklist(PredInst->op_begin(), PredInst->op_end());
3755 
3756   // Holds instructions that we need to analyze again. An instruction may be
3757   // reanalyzed if we don't yet know if we can sink it or not.
3758   SmallVector<Instruction *, 8> InstsToReanalyze;
3759 
3760   // Returns true if a given use occurs in the predicated block. Phi nodes use
3761   // their operands in their corresponding predecessor blocks.
3762   auto isBlockOfUsePredicated = [&](Use &U) -> bool {
3763     auto *I = cast<Instruction>(U.getUser());
3764     BasicBlock *BB = I->getParent();
3765     if (auto *Phi = dyn_cast<PHINode>(I))
3766       BB = Phi->getIncomingBlock(
3767           PHINode::getIncomingValueNumForOperand(U.getOperandNo()));
3768     return BB == PredBB;
3769   };
3770 
3771   // Iteratively sink the scalarized operands of the predicated instruction
3772   // into the block we created for it. When an instruction is sunk, it's
3773   // operands are then added to the worklist. The algorithm ends after one pass
3774   // through the worklist doesn't sink a single instruction.
3775   bool Changed;
3776   do {
3777     // Add the instructions that need to be reanalyzed to the worklist, and
3778     // reset the changed indicator.
3779     Worklist.insert(InstsToReanalyze.begin(), InstsToReanalyze.end());
3780     InstsToReanalyze.clear();
3781     Changed = false;
3782 
3783     while (!Worklist.empty()) {
3784       auto *I = dyn_cast<Instruction>(Worklist.pop_back_val());
3785 
3786       // We can't sink an instruction if it is a phi node, is already in the
3787       // predicated block, is not in the loop, or may have side effects.
3788       if (!I || isa<PHINode>(I) || I->getParent() == PredBB ||
3789           !VectorLoop->contains(I) || I->mayHaveSideEffects())
3790         continue;
3791 
3792       // It's legal to sink the instruction if all its uses occur in the
3793       // predicated block. Otherwise, there's nothing to do yet, and we may
3794       // need to reanalyze the instruction.
3795       if (!llvm::all_of(I->uses(), isBlockOfUsePredicated)) {
3796         InstsToReanalyze.push_back(I);
3797         continue;
3798       }
3799 
3800       // Move the instruction to the beginning of the predicated block, and add
3801       // it's operands to the worklist.
3802       I->moveBefore(&*PredBB->getFirstInsertionPt());
3803       Worklist.insert(I->op_begin(), I->op_end());
3804 
3805       // The sinking may have enabled other instructions to be sunk, so we will
3806       // need to iterate.
3807       Changed = true;
3808     }
3809   } while (Changed);
3810 }
3811 
3812 void InnerLoopVectorizer::widenPHIInstruction(Instruction *PN, unsigned UF,
3813                                               unsigned VF) {
3814   assert(PN->getParent() == OrigLoop->getHeader() &&
3815          "Non-header phis should have been handled elsewhere");
3816 
3817   PHINode *P = cast<PHINode>(PN);
3818   // In order to support recurrences we need to be able to vectorize Phi nodes.
3819   // Phi nodes have cycles, so we need to vectorize them in two stages. This is
3820   // stage #1: We create a new vector PHI node with no incoming edges. We'll use
3821   // this value when we vectorize all of the instructions that use the PHI.
3822   if (Legal->isReductionVariable(P) || Legal->isFirstOrderRecurrence(P)) {
3823     for (unsigned Part = 0; Part < UF; ++Part) {
3824       // This is phase one of vectorizing PHIs.
3825       Type *VecTy =
3826           (VF == 1) ? PN->getType() : VectorType::get(PN->getType(), VF);
3827       Value *EntryPart = PHINode::Create(
3828           VecTy, 2, "vec.phi", &*LoopVectorBody->getFirstInsertionPt());
3829       VectorLoopValueMap.setVectorValue(P, Part, EntryPart);
3830     }
3831     return;
3832   }
3833 
3834   setDebugLocFromInst(Builder, P);
3835 
3836   // This PHINode must be an induction variable.
3837   // Make sure that we know about it.
3838   assert(Legal->getInductionVars()->count(P) && "Not an induction variable");
3839 
3840   InductionDescriptor II = Legal->getInductionVars()->lookup(P);
3841   const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout();
3842 
3843   // FIXME: The newly created binary instructions should contain nsw/nuw flags,
3844   // which can be found from the original scalar operations.
3845   switch (II.getKind()) {
3846   case InductionDescriptor::IK_NoInduction:
3847     llvm_unreachable("Unknown induction");
3848   case InductionDescriptor::IK_IntInduction:
3849   case InductionDescriptor::IK_FpInduction:
3850     llvm_unreachable("Integer/fp induction is handled elsewhere.");
3851   case InductionDescriptor::IK_PtrInduction: {
3852     // Handle the pointer induction variable case.
3853     assert(P->getType()->isPointerTy() && "Unexpected type.");
3854     // This is the normalized GEP that starts counting at zero.
3855     Value *PtrInd = Induction;
3856     PtrInd = Builder.CreateSExtOrTrunc(PtrInd, II.getStep()->getType());
3857     // Determine the number of scalars we need to generate for each unroll
3858     // iteration. If the instruction is uniform, we only need to generate the
3859     // first lane. Otherwise, we generate all VF values.
3860     unsigned Lanes = Cost->isUniformAfterVectorization(P, VF) ? 1 : VF;
3861     // These are the scalar results. Notice that we don't generate vector GEPs
3862     // because scalar GEPs result in better code.
3863     for (unsigned Part = 0; Part < UF; ++Part) {
3864       for (unsigned Lane = 0; Lane < Lanes; ++Lane) {
3865         Constant *Idx = ConstantInt::get(PtrInd->getType(), Lane + Part * VF);
3866         Value *GlobalIdx = Builder.CreateAdd(PtrInd, Idx);
3867         Value *SclrGep = II.transform(Builder, GlobalIdx, PSE.getSE(), DL);
3868         SclrGep->setName("next.gep");
3869         VectorLoopValueMap.setScalarValue(P, {Part, Lane}, SclrGep);
3870       }
3871     }
3872     return;
3873   }
3874   }
3875 }
3876 
3877 /// A helper function for checking whether an integer division-related
3878 /// instruction may divide by zero (in which case it must be predicated if
3879 /// executed conditionally in the scalar code).
3880 /// TODO: It may be worthwhile to generalize and check isKnownNonZero().
3881 /// Non-zero divisors that are non compile-time constants will not be
3882 /// converted into multiplication, so we will still end up scalarizing
3883 /// the division, but can do so w/o predication.
3884 static bool mayDivideByZero(Instruction &I) {
3885   assert((I.getOpcode() == Instruction::UDiv ||
3886           I.getOpcode() == Instruction::SDiv ||
3887           I.getOpcode() == Instruction::URem ||
3888           I.getOpcode() == Instruction::SRem) &&
3889          "Unexpected instruction");
3890   Value *Divisor = I.getOperand(1);
3891   auto *CInt = dyn_cast<ConstantInt>(Divisor);
3892   return !CInt || CInt->isZero();
3893 }
3894 
3895 void InnerLoopVectorizer::widenInstruction(Instruction &I) {
3896   switch (I.getOpcode()) {
3897   case Instruction::Br:
3898   case Instruction::PHI:
3899     llvm_unreachable("This instruction is handled by a different recipe.");
3900   case Instruction::GetElementPtr: {
3901     // Construct a vector GEP by widening the operands of the scalar GEP as
3902     // necessary. We mark the vector GEP 'inbounds' if appropriate. A GEP
3903     // results in a vector of pointers when at least one operand of the GEP
3904     // is vector-typed. Thus, to keep the representation compact, we only use
3905     // vector-typed operands for loop-varying values.
3906     auto *GEP = cast<GetElementPtrInst>(&I);
3907 
3908     if (VF > 1 && OrigLoop->hasLoopInvariantOperands(GEP)) {
3909       // If we are vectorizing, but the GEP has only loop-invariant operands,
3910       // the GEP we build (by only using vector-typed operands for
3911       // loop-varying values) would be a scalar pointer. Thus, to ensure we
3912       // produce a vector of pointers, we need to either arbitrarily pick an
3913       // operand to broadcast, or broadcast a clone of the original GEP.
3914       // Here, we broadcast a clone of the original.
3915       //
3916       // TODO: If at some point we decide to scalarize instructions having
3917       //       loop-invariant operands, this special case will no longer be
3918       //       required. We would add the scalarization decision to
3919       //       collectLoopScalars() and teach getVectorValue() to broadcast
3920       //       the lane-zero scalar value.
3921       auto *Clone = Builder.Insert(GEP->clone());
3922       for (unsigned Part = 0; Part < UF; ++Part) {
3923         Value *EntryPart = Builder.CreateVectorSplat(VF, Clone);
3924         VectorLoopValueMap.setVectorValue(&I, Part, EntryPart);
3925         addMetadata(EntryPart, GEP);
3926       }
3927     } else {
3928       // If the GEP has at least one loop-varying operand, we are sure to
3929       // produce a vector of pointers. But if we are only unrolling, we want
3930       // to produce a scalar GEP for each unroll part. Thus, the GEP we
3931       // produce with the code below will be scalar (if VF == 1) or vector
3932       // (otherwise). Note that for the unroll-only case, we still maintain
3933       // values in the vector mapping with initVector, as we do for other
3934       // instructions.
3935       for (unsigned Part = 0; Part < UF; ++Part) {
3936         // The pointer operand of the new GEP. If it's loop-invariant, we
3937         // won't broadcast it.
3938         auto *Ptr =
3939             OrigLoop->isLoopInvariant(GEP->getPointerOperand())
3940                 ? GEP->getPointerOperand()
3941                 : getOrCreateVectorValue(GEP->getPointerOperand(), Part);
3942 
3943         // Collect all the indices for the new GEP. If any index is
3944         // loop-invariant, we won't broadcast it.
3945         SmallVector<Value *, 4> Indices;
3946         for (auto &U : make_range(GEP->idx_begin(), GEP->idx_end())) {
3947           if (OrigLoop->isLoopInvariant(U.get()))
3948             Indices.push_back(U.get());
3949           else
3950             Indices.push_back(getOrCreateVectorValue(U.get(), Part));
3951         }
3952 
3953         // Create the new GEP. Note that this GEP may be a scalar if VF == 1,
3954         // but it should be a vector, otherwise.
3955         auto *NewGEP = GEP->isInBounds()
3956                            ? Builder.CreateInBoundsGEP(Ptr, Indices)
3957                            : Builder.CreateGEP(Ptr, Indices);
3958         assert((VF == 1 || NewGEP->getType()->isVectorTy()) &&
3959                "NewGEP is not a pointer vector");
3960         VectorLoopValueMap.setVectorValue(&I, Part, NewGEP);
3961         addMetadata(NewGEP, GEP);
3962       }
3963     }
3964 
3965     break;
3966   }
3967   case Instruction::UDiv:
3968   case Instruction::SDiv:
3969   case Instruction::SRem:
3970   case Instruction::URem:
3971   case Instruction::Add:
3972   case Instruction::FAdd:
3973   case Instruction::Sub:
3974   case Instruction::FSub:
3975   case Instruction::Mul:
3976   case Instruction::FMul:
3977   case Instruction::FDiv:
3978   case Instruction::FRem:
3979   case Instruction::Shl:
3980   case Instruction::LShr:
3981   case Instruction::AShr:
3982   case Instruction::And:
3983   case Instruction::Or:
3984   case Instruction::Xor: {
3985     // Just widen binops.
3986     auto *BinOp = cast<BinaryOperator>(&I);
3987     setDebugLocFromInst(Builder, BinOp);
3988 
3989     for (unsigned Part = 0; Part < UF; ++Part) {
3990       Value *A = getOrCreateVectorValue(BinOp->getOperand(0), Part);
3991       Value *B = getOrCreateVectorValue(BinOp->getOperand(1), Part);
3992       Value *V = Builder.CreateBinOp(BinOp->getOpcode(), A, B);
3993 
3994       if (BinaryOperator *VecOp = dyn_cast<BinaryOperator>(V))
3995         VecOp->copyIRFlags(BinOp);
3996 
3997       // Use this vector value for all users of the original instruction.
3998       VectorLoopValueMap.setVectorValue(&I, Part, V);
3999       addMetadata(V, BinOp);
4000     }
4001 
4002     break;
4003   }
4004   case Instruction::Select: {
4005     // Widen selects.
4006     // If the selector is loop invariant we can create a select
4007     // instruction with a scalar condition. Otherwise, use vector-select.
4008     auto *SE = PSE.getSE();
4009     bool InvariantCond =
4010         SE->isLoopInvariant(PSE.getSCEV(I.getOperand(0)), OrigLoop);
4011     setDebugLocFromInst(Builder, &I);
4012 
4013     // The condition can be loop invariant  but still defined inside the
4014     // loop. This means that we can't just use the original 'cond' value.
4015     // We have to take the 'vectorized' value and pick the first lane.
4016     // Instcombine will make this a no-op.
4017 
4018     auto *ScalarCond = getOrCreateScalarValue(I.getOperand(0), {0, 0});
4019 
4020     for (unsigned Part = 0; Part < UF; ++Part) {
4021       Value *Cond = getOrCreateVectorValue(I.getOperand(0), Part);
4022       Value *Op0 = getOrCreateVectorValue(I.getOperand(1), Part);
4023       Value *Op1 = getOrCreateVectorValue(I.getOperand(2), Part);
4024       Value *Sel =
4025           Builder.CreateSelect(InvariantCond ? ScalarCond : Cond, Op0, Op1);
4026       VectorLoopValueMap.setVectorValue(&I, Part, Sel);
4027       addMetadata(Sel, &I);
4028     }
4029 
4030     break;
4031   }
4032 
4033   case Instruction::ICmp:
4034   case Instruction::FCmp: {
4035     // Widen compares. Generate vector compares.
4036     bool FCmp = (I.getOpcode() == Instruction::FCmp);
4037     auto *Cmp = dyn_cast<CmpInst>(&I);
4038     setDebugLocFromInst(Builder, Cmp);
4039     for (unsigned Part = 0; Part < UF; ++Part) {
4040       Value *A = getOrCreateVectorValue(Cmp->getOperand(0), Part);
4041       Value *B = getOrCreateVectorValue(Cmp->getOperand(1), Part);
4042       Value *C = nullptr;
4043       if (FCmp) {
4044         // Propagate fast math flags.
4045         IRBuilder<>::FastMathFlagGuard FMFG(Builder);
4046         Builder.setFastMathFlags(Cmp->getFastMathFlags());
4047         C = Builder.CreateFCmp(Cmp->getPredicate(), A, B);
4048       } else {
4049         C = Builder.CreateICmp(Cmp->getPredicate(), A, B);
4050       }
4051       VectorLoopValueMap.setVectorValue(&I, Part, C);
4052       addMetadata(C, &I);
4053     }
4054 
4055     break;
4056   }
4057 
4058   case Instruction::ZExt:
4059   case Instruction::SExt:
4060   case Instruction::FPToUI:
4061   case Instruction::FPToSI:
4062   case Instruction::FPExt:
4063   case Instruction::PtrToInt:
4064   case Instruction::IntToPtr:
4065   case Instruction::SIToFP:
4066   case Instruction::UIToFP:
4067   case Instruction::Trunc:
4068   case Instruction::FPTrunc:
4069   case Instruction::BitCast: {
4070     auto *CI = dyn_cast<CastInst>(&I);
4071     setDebugLocFromInst(Builder, CI);
4072 
4073     /// Vectorize casts.
4074     Type *DestTy =
4075         (VF == 1) ? CI->getType() : VectorType::get(CI->getType(), VF);
4076 
4077     for (unsigned Part = 0; Part < UF; ++Part) {
4078       Value *A = getOrCreateVectorValue(CI->getOperand(0), Part);
4079       Value *Cast = Builder.CreateCast(CI->getOpcode(), A, DestTy);
4080       VectorLoopValueMap.setVectorValue(&I, Part, Cast);
4081       addMetadata(Cast, &I);
4082     }
4083     break;
4084   }
4085 
4086   case Instruction::Call: {
4087     // Ignore dbg intrinsics.
4088     if (isa<DbgInfoIntrinsic>(I))
4089       break;
4090     setDebugLocFromInst(Builder, &I);
4091 
4092     Module *M = I.getParent()->getParent()->getParent();
4093     auto *CI = cast<CallInst>(&I);
4094 
4095     StringRef FnName = CI->getCalledFunction()->getName();
4096     Function *F = CI->getCalledFunction();
4097     Type *RetTy = ToVectorTy(CI->getType(), VF);
4098     SmallVector<Type *, 4> Tys;
4099     for (Value *ArgOperand : CI->arg_operands())
4100       Tys.push_back(ToVectorTy(ArgOperand->getType(), VF));
4101 
4102     Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
4103 
4104     // The flag shows whether we use Intrinsic or a usual Call for vectorized
4105     // version of the instruction.
4106     // Is it beneficial to perform intrinsic call compared to lib call?
4107     bool NeedToScalarize;
4108     unsigned CallCost = getVectorCallCost(CI, VF, *TTI, TLI, NeedToScalarize);
4109     bool UseVectorIntrinsic =
4110         ID && getVectorIntrinsicCost(CI, VF, *TTI, TLI) <= CallCost;
4111     assert((UseVectorIntrinsic || !NeedToScalarize) &&
4112            "Instruction should be scalarized elsewhere.");
4113 
4114     for (unsigned Part = 0; Part < UF; ++Part) {
4115       SmallVector<Value *, 4> Args;
4116       for (unsigned i = 0, ie = CI->getNumArgOperands(); i != ie; ++i) {
4117         Value *Arg = CI->getArgOperand(i);
4118         // Some intrinsics have a scalar argument - don't replace it with a
4119         // vector.
4120         if (!UseVectorIntrinsic || !hasVectorInstrinsicScalarOpd(ID, i))
4121           Arg = getOrCreateVectorValue(CI->getArgOperand(i), Part);
4122         Args.push_back(Arg);
4123       }
4124 
4125       Function *VectorF;
4126       if (UseVectorIntrinsic) {
4127         // Use vector version of the intrinsic.
4128         Type *TysForDecl[] = {CI->getType()};
4129         if (VF > 1)
4130           TysForDecl[0] = VectorType::get(CI->getType()->getScalarType(), VF);
4131         VectorF = Intrinsic::getDeclaration(M, ID, TysForDecl);
4132       } else {
4133         // Use vector version of the library call.
4134         StringRef VFnName = TLI->getVectorizedFunction(FnName, VF);
4135         assert(!VFnName.empty() && "Vector function name is empty.");
4136         VectorF = M->getFunction(VFnName);
4137         if (!VectorF) {
4138           // Generate a declaration
4139           FunctionType *FTy = FunctionType::get(RetTy, Tys, false);
4140           VectorF =
4141               Function::Create(FTy, Function::ExternalLinkage, VFnName, M);
4142           VectorF->copyAttributesFrom(F);
4143         }
4144       }
4145       assert(VectorF && "Can't create vector function.");
4146 
4147       SmallVector<OperandBundleDef, 1> OpBundles;
4148       CI->getOperandBundlesAsDefs(OpBundles);
4149       CallInst *V = Builder.CreateCall(VectorF, Args, OpBundles);
4150 
4151       if (isa<FPMathOperator>(V))
4152         V->copyFastMathFlags(CI);
4153 
4154       VectorLoopValueMap.setVectorValue(&I, Part, V);
4155       addMetadata(V, &I);
4156     }
4157 
4158     break;
4159   }
4160 
4161   default:
4162     // This instruction is not vectorized by simple widening.
4163     LLVM_DEBUG(dbgs() << "LV: Found an unhandled instruction: " << I);
4164     llvm_unreachable("Unhandled instruction!");
4165   } // end of switch.
4166 }
4167 
4168 void InnerLoopVectorizer::updateAnalysis() {
4169   // Forget the original basic block.
4170   PSE.getSE()->forgetLoop(OrigLoop);
4171 
4172   // Update the dominator tree information.
4173   assert(DT->properlyDominates(LoopBypassBlocks.front(), LoopExitBlock) &&
4174          "Entry does not dominate exit.");
4175 
4176   DT->addNewBlock(LoopMiddleBlock,
4177                   LI->getLoopFor(LoopVectorBody)->getLoopLatch());
4178   DT->addNewBlock(LoopScalarPreHeader, LoopBypassBlocks[0]);
4179   DT->changeImmediateDominator(LoopScalarBody, LoopScalarPreHeader);
4180   DT->changeImmediateDominator(LoopExitBlock, LoopBypassBlocks[0]);
4181   assert(DT->verify(DominatorTree::VerificationLevel::Fast));
4182 }
4183 
4184 void LoopVectorizationCostModel::collectLoopScalars(unsigned VF) {
4185   // We should not collect Scalars more than once per VF. Right now, this
4186   // function is called from collectUniformsAndScalars(), which already does
4187   // this check. Collecting Scalars for VF=1 does not make any sense.
4188   assert(VF >= 2 && Scalars.find(VF) == Scalars.end() &&
4189          "This function should not be visited twice for the same VF");
4190 
4191   SmallSetVector<Instruction *, 8> Worklist;
4192 
4193   // These sets are used to seed the analysis with pointers used by memory
4194   // accesses that will remain scalar.
4195   SmallSetVector<Instruction *, 8> ScalarPtrs;
4196   SmallPtrSet<Instruction *, 8> PossibleNonScalarPtrs;
4197 
4198   // A helper that returns true if the use of Ptr by MemAccess will be scalar.
4199   // The pointer operands of loads and stores will be scalar as long as the
4200   // memory access is not a gather or scatter operation. The value operand of a
4201   // store will remain scalar if the store is scalarized.
4202   auto isScalarUse = [&](Instruction *MemAccess, Value *Ptr) {
4203     InstWidening WideningDecision = getWideningDecision(MemAccess, VF);
4204     assert(WideningDecision != CM_Unknown &&
4205            "Widening decision should be ready at this moment");
4206     if (auto *Store = dyn_cast<StoreInst>(MemAccess))
4207       if (Ptr == Store->getValueOperand())
4208         return WideningDecision == CM_Scalarize;
4209     assert(Ptr == getLoadStorePointerOperand(MemAccess) &&
4210            "Ptr is neither a value or pointer operand");
4211     return WideningDecision != CM_GatherScatter;
4212   };
4213 
4214   // A helper that returns true if the given value is a bitcast or
4215   // getelementptr instruction contained in the loop.
4216   auto isLoopVaryingBitCastOrGEP = [&](Value *V) {
4217     return ((isa<BitCastInst>(V) && V->getType()->isPointerTy()) ||
4218             isa<GetElementPtrInst>(V)) &&
4219            !TheLoop->isLoopInvariant(V);
4220   };
4221 
4222   // A helper that evaluates a memory access's use of a pointer. If the use
4223   // will be a scalar use, and the pointer is only used by memory accesses, we
4224   // place the pointer in ScalarPtrs. Otherwise, the pointer is placed in
4225   // PossibleNonScalarPtrs.
4226   auto evaluatePtrUse = [&](Instruction *MemAccess, Value *Ptr) {
4227     // We only care about bitcast and getelementptr instructions contained in
4228     // the loop.
4229     if (!isLoopVaryingBitCastOrGEP(Ptr))
4230       return;
4231 
4232     // If the pointer has already been identified as scalar (e.g., if it was
4233     // also identified as uniform), there's nothing to do.
4234     auto *I = cast<Instruction>(Ptr);
4235     if (Worklist.count(I))
4236       return;
4237 
4238     // If the use of the pointer will be a scalar use, and all users of the
4239     // pointer are memory accesses, place the pointer in ScalarPtrs. Otherwise,
4240     // place the pointer in PossibleNonScalarPtrs.
4241     if (isScalarUse(MemAccess, Ptr) && llvm::all_of(I->users(), [&](User *U) {
4242           return isa<LoadInst>(U) || isa<StoreInst>(U);
4243         }))
4244       ScalarPtrs.insert(I);
4245     else
4246       PossibleNonScalarPtrs.insert(I);
4247   };
4248 
4249   // We seed the scalars analysis with three classes of instructions: (1)
4250   // instructions marked uniform-after-vectorization, (2) bitcast and
4251   // getelementptr instructions used by memory accesses requiring a scalar use,
4252   // and (3) pointer induction variables and their update instructions (we
4253   // currently only scalarize these).
4254   //
4255   // (1) Add to the worklist all instructions that have been identified as
4256   // uniform-after-vectorization.
4257   Worklist.insert(Uniforms[VF].begin(), Uniforms[VF].end());
4258 
4259   // (2) Add to the worklist all bitcast and getelementptr instructions used by
4260   // memory accesses requiring a scalar use. The pointer operands of loads and
4261   // stores will be scalar as long as the memory accesses is not a gather or
4262   // scatter operation. The value operand of a store will remain scalar if the
4263   // store is scalarized.
4264   for (auto *BB : TheLoop->blocks())
4265     for (auto &I : *BB) {
4266       if (auto *Load = dyn_cast<LoadInst>(&I)) {
4267         evaluatePtrUse(Load, Load->getPointerOperand());
4268       } else if (auto *Store = dyn_cast<StoreInst>(&I)) {
4269         evaluatePtrUse(Store, Store->getPointerOperand());
4270         evaluatePtrUse(Store, Store->getValueOperand());
4271       }
4272     }
4273   for (auto *I : ScalarPtrs)
4274     if (PossibleNonScalarPtrs.find(I) == PossibleNonScalarPtrs.end()) {
4275       LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *I << "\n");
4276       Worklist.insert(I);
4277     }
4278 
4279   // (3) Add to the worklist all pointer induction variables and their update
4280   // instructions.
4281   //
4282   // TODO: Once we are able to vectorize pointer induction variables we should
4283   //       no longer insert them into the worklist here.
4284   auto *Latch = TheLoop->getLoopLatch();
4285   for (auto &Induction : *Legal->getInductionVars()) {
4286     auto *Ind = Induction.first;
4287     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
4288     if (Induction.second.getKind() != InductionDescriptor::IK_PtrInduction)
4289       continue;
4290     Worklist.insert(Ind);
4291     Worklist.insert(IndUpdate);
4292     LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n");
4293     LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate
4294                       << "\n");
4295   }
4296 
4297   // Insert the forced scalars.
4298   // FIXME: Currently widenPHIInstruction() often creates a dead vector
4299   // induction variable when the PHI user is scalarized.
4300   auto ForcedScalar = ForcedScalars.find(VF);
4301   if (ForcedScalar != ForcedScalars.end())
4302     for (auto *I : ForcedScalar->second)
4303       Worklist.insert(I);
4304 
4305   // Expand the worklist by looking through any bitcasts and getelementptr
4306   // instructions we've already identified as scalar. This is similar to the
4307   // expansion step in collectLoopUniforms(); however, here we're only
4308   // expanding to include additional bitcasts and getelementptr instructions.
4309   unsigned Idx = 0;
4310   while (Idx != Worklist.size()) {
4311     Instruction *Dst = Worklist[Idx++];
4312     if (!isLoopVaryingBitCastOrGEP(Dst->getOperand(0)))
4313       continue;
4314     auto *Src = cast<Instruction>(Dst->getOperand(0));
4315     if (llvm::all_of(Src->users(), [&](User *U) -> bool {
4316           auto *J = cast<Instruction>(U);
4317           return !TheLoop->contains(J) || Worklist.count(J) ||
4318                  ((isa<LoadInst>(J) || isa<StoreInst>(J)) &&
4319                   isScalarUse(J, Src));
4320         })) {
4321       Worklist.insert(Src);
4322       LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Src << "\n");
4323     }
4324   }
4325 
4326   // An induction variable will remain scalar if all users of the induction
4327   // variable and induction variable update remain scalar.
4328   for (auto &Induction : *Legal->getInductionVars()) {
4329     auto *Ind = Induction.first;
4330     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
4331 
4332     // We already considered pointer induction variables, so there's no reason
4333     // to look at their users again.
4334     //
4335     // TODO: Once we are able to vectorize pointer induction variables we
4336     //       should no longer skip over them here.
4337     if (Induction.second.getKind() == InductionDescriptor::IK_PtrInduction)
4338       continue;
4339 
4340     // Determine if all users of the induction variable are scalar after
4341     // vectorization.
4342     auto ScalarInd = llvm::all_of(Ind->users(), [&](User *U) -> bool {
4343       auto *I = cast<Instruction>(U);
4344       return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I);
4345     });
4346     if (!ScalarInd)
4347       continue;
4348 
4349     // Determine if all users of the induction variable update instruction are
4350     // scalar after vectorization.
4351     auto ScalarIndUpdate =
4352         llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
4353           auto *I = cast<Instruction>(U);
4354           return I == Ind || !TheLoop->contains(I) || Worklist.count(I);
4355         });
4356     if (!ScalarIndUpdate)
4357       continue;
4358 
4359     // The induction variable and its update instruction will remain scalar.
4360     Worklist.insert(Ind);
4361     Worklist.insert(IndUpdate);
4362     LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n");
4363     LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate
4364                       << "\n");
4365   }
4366 
4367   Scalars[VF].insert(Worklist.begin(), Worklist.end());
4368 }
4369 
4370 bool LoopVectorizationCostModel::isScalarWithPredication(Instruction *I) {
4371   if (!Legal->blockNeedsPredication(I->getParent()))
4372     return false;
4373   switch(I->getOpcode()) {
4374   default:
4375     break;
4376   case Instruction::Load:
4377   case Instruction::Store: {
4378     if (!Legal->isMaskRequired(I))
4379       return false;
4380     auto *Ptr = getLoadStorePointerOperand(I);
4381     auto *Ty = getMemInstValueType(I);
4382     return isa<LoadInst>(I) ?
4383         !(isLegalMaskedLoad(Ty, Ptr)  || isLegalMaskedGather(Ty))
4384       : !(isLegalMaskedStore(Ty, Ptr) || isLegalMaskedScatter(Ty));
4385   }
4386   case Instruction::UDiv:
4387   case Instruction::SDiv:
4388   case Instruction::SRem:
4389   case Instruction::URem:
4390     return mayDivideByZero(*I);
4391   }
4392   return false;
4393 }
4394 
4395 bool LoopVectorizationCostModel::memoryInstructionCanBeWidened(Instruction *I,
4396                                                                unsigned VF) {
4397   // Get and ensure we have a valid memory instruction.
4398   LoadInst *LI = dyn_cast<LoadInst>(I);
4399   StoreInst *SI = dyn_cast<StoreInst>(I);
4400   assert((LI || SI) && "Invalid memory instruction");
4401 
4402   auto *Ptr = getLoadStorePointerOperand(I);
4403 
4404   // In order to be widened, the pointer should be consecutive, first of all.
4405   if (!Legal->isConsecutivePtr(Ptr))
4406     return false;
4407 
4408   // If the instruction is a store located in a predicated block, it will be
4409   // scalarized.
4410   if (isScalarWithPredication(I))
4411     return false;
4412 
4413   // If the instruction's allocated size doesn't equal it's type size, it
4414   // requires padding and will be scalarized.
4415   auto &DL = I->getModule()->getDataLayout();
4416   auto *ScalarTy = LI ? LI->getType() : SI->getValueOperand()->getType();
4417   if (hasIrregularType(ScalarTy, DL, VF))
4418     return false;
4419 
4420   return true;
4421 }
4422 
4423 void LoopVectorizationCostModel::collectLoopUniforms(unsigned VF) {
4424   // We should not collect Uniforms more than once per VF. Right now,
4425   // this function is called from collectUniformsAndScalars(), which
4426   // already does this check. Collecting Uniforms for VF=1 does not make any
4427   // sense.
4428 
4429   assert(VF >= 2 && Uniforms.find(VF) == Uniforms.end() &&
4430          "This function should not be visited twice for the same VF");
4431 
4432   // Visit the list of Uniforms. If we'll not find any uniform value, we'll
4433   // not analyze again.  Uniforms.count(VF) will return 1.
4434   Uniforms[VF].clear();
4435 
4436   // We now know that the loop is vectorizable!
4437   // Collect instructions inside the loop that will remain uniform after
4438   // vectorization.
4439 
4440   // Global values, params and instructions outside of current loop are out of
4441   // scope.
4442   auto isOutOfScope = [&](Value *V) -> bool {
4443     Instruction *I = dyn_cast<Instruction>(V);
4444     return (!I || !TheLoop->contains(I));
4445   };
4446 
4447   SetVector<Instruction *> Worklist;
4448   BasicBlock *Latch = TheLoop->getLoopLatch();
4449 
4450   // Start with the conditional branch. If the branch condition is an
4451   // instruction contained in the loop that is only used by the branch, it is
4452   // uniform.
4453   auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0));
4454   if (Cmp && TheLoop->contains(Cmp) && Cmp->hasOneUse()) {
4455     Worklist.insert(Cmp);
4456     LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *Cmp << "\n");
4457   }
4458 
4459   // Holds consecutive and consecutive-like pointers. Consecutive-like pointers
4460   // are pointers that are treated like consecutive pointers during
4461   // vectorization. The pointer operands of interleaved accesses are an
4462   // example.
4463   SmallSetVector<Instruction *, 8> ConsecutiveLikePtrs;
4464 
4465   // Holds pointer operands of instructions that are possibly non-uniform.
4466   SmallPtrSet<Instruction *, 8> PossibleNonUniformPtrs;
4467 
4468   auto isUniformDecision = [&](Instruction *I, unsigned VF) {
4469     InstWidening WideningDecision = getWideningDecision(I, VF);
4470     assert(WideningDecision != CM_Unknown &&
4471            "Widening decision should be ready at this moment");
4472 
4473     return (WideningDecision == CM_Widen ||
4474             WideningDecision == CM_Widen_Reverse ||
4475             WideningDecision == CM_Interleave);
4476   };
4477   // Iterate over the instructions in the loop, and collect all
4478   // consecutive-like pointer operands in ConsecutiveLikePtrs. If it's possible
4479   // that a consecutive-like pointer operand will be scalarized, we collect it
4480   // in PossibleNonUniformPtrs instead. We use two sets here because a single
4481   // getelementptr instruction can be used by both vectorized and scalarized
4482   // memory instructions. For example, if a loop loads and stores from the same
4483   // location, but the store is conditional, the store will be scalarized, and
4484   // the getelementptr won't remain uniform.
4485   for (auto *BB : TheLoop->blocks())
4486     for (auto &I : *BB) {
4487       // If there's no pointer operand, there's nothing to do.
4488       auto *Ptr = dyn_cast_or_null<Instruction>(getLoadStorePointerOperand(&I));
4489       if (!Ptr)
4490         continue;
4491 
4492       // True if all users of Ptr are memory accesses that have Ptr as their
4493       // pointer operand.
4494       auto UsersAreMemAccesses =
4495           llvm::all_of(Ptr->users(), [&](User *U) -> bool {
4496             return getLoadStorePointerOperand(U) == Ptr;
4497           });
4498 
4499       // Ensure the memory instruction will not be scalarized or used by
4500       // gather/scatter, making its pointer operand non-uniform. If the pointer
4501       // operand is used by any instruction other than a memory access, we
4502       // conservatively assume the pointer operand may be non-uniform.
4503       if (!UsersAreMemAccesses || !isUniformDecision(&I, VF))
4504         PossibleNonUniformPtrs.insert(Ptr);
4505 
4506       // If the memory instruction will be vectorized and its pointer operand
4507       // is consecutive-like, or interleaving - the pointer operand should
4508       // remain uniform.
4509       else
4510         ConsecutiveLikePtrs.insert(Ptr);
4511     }
4512 
4513   // Add to the Worklist all consecutive and consecutive-like pointers that
4514   // aren't also identified as possibly non-uniform.
4515   for (auto *V : ConsecutiveLikePtrs)
4516     if (PossibleNonUniformPtrs.find(V) == PossibleNonUniformPtrs.end()) {
4517       LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *V << "\n");
4518       Worklist.insert(V);
4519     }
4520 
4521   // Expand Worklist in topological order: whenever a new instruction
4522   // is added , its users should be already inside Worklist.  It ensures
4523   // a uniform instruction will only be used by uniform instructions.
4524   unsigned idx = 0;
4525   while (idx != Worklist.size()) {
4526     Instruction *I = Worklist[idx++];
4527 
4528     for (auto OV : I->operand_values()) {
4529       // isOutOfScope operands cannot be uniform instructions.
4530       if (isOutOfScope(OV))
4531         continue;
4532       // If all the users of the operand are uniform, then add the
4533       // operand into the uniform worklist.
4534       auto *OI = cast<Instruction>(OV);
4535       if (llvm::all_of(OI->users(), [&](User *U) -> bool {
4536             auto *J = cast<Instruction>(U);
4537             return Worklist.count(J) ||
4538                    (OI == getLoadStorePointerOperand(J) &&
4539                     isUniformDecision(J, VF));
4540           })) {
4541         Worklist.insert(OI);
4542         LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *OI << "\n");
4543       }
4544     }
4545   }
4546 
4547   // Returns true if Ptr is the pointer operand of a memory access instruction
4548   // I, and I is known to not require scalarization.
4549   auto isVectorizedMemAccessUse = [&](Instruction *I, Value *Ptr) -> bool {
4550     return getLoadStorePointerOperand(I) == Ptr && isUniformDecision(I, VF);
4551   };
4552 
4553   // For an instruction to be added into Worklist above, all its users inside
4554   // the loop should also be in Worklist. However, this condition cannot be
4555   // true for phi nodes that form a cyclic dependence. We must process phi
4556   // nodes separately. An induction variable will remain uniform if all users
4557   // of the induction variable and induction variable update remain uniform.
4558   // The code below handles both pointer and non-pointer induction variables.
4559   for (auto &Induction : *Legal->getInductionVars()) {
4560     auto *Ind = Induction.first;
4561     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
4562 
4563     // Determine if all users of the induction variable are uniform after
4564     // vectorization.
4565     auto UniformInd = llvm::all_of(Ind->users(), [&](User *U) -> bool {
4566       auto *I = cast<Instruction>(U);
4567       return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) ||
4568              isVectorizedMemAccessUse(I, Ind);
4569     });
4570     if (!UniformInd)
4571       continue;
4572 
4573     // Determine if all users of the induction variable update instruction are
4574     // uniform after vectorization.
4575     auto UniformIndUpdate =
4576         llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
4577           auto *I = cast<Instruction>(U);
4578           return I == Ind || !TheLoop->contains(I) || Worklist.count(I) ||
4579                  isVectorizedMemAccessUse(I, IndUpdate);
4580         });
4581     if (!UniformIndUpdate)
4582       continue;
4583 
4584     // The induction variable and its update instruction will remain uniform.
4585     Worklist.insert(Ind);
4586     Worklist.insert(IndUpdate);
4587     LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *Ind << "\n");
4588     LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *IndUpdate
4589                       << "\n");
4590   }
4591 
4592   Uniforms[VF].insert(Worklist.begin(), Worklist.end());
4593 }
4594 
4595 void InterleavedAccessInfo::collectConstStrideAccesses(
4596     MapVector<Instruction *, StrideDescriptor> &AccessStrideInfo,
4597     const ValueToValueMap &Strides) {
4598   auto &DL = TheLoop->getHeader()->getModule()->getDataLayout();
4599 
4600   // Since it's desired that the load/store instructions be maintained in
4601   // "program order" for the interleaved access analysis, we have to visit the
4602   // blocks in the loop in reverse postorder (i.e., in a topological order).
4603   // Such an ordering will ensure that any load/store that may be executed
4604   // before a second load/store will precede the second load/store in
4605   // AccessStrideInfo.
4606   LoopBlocksDFS DFS(TheLoop);
4607   DFS.perform(LI);
4608   for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO()))
4609     for (auto &I : *BB) {
4610       auto *LI = dyn_cast<LoadInst>(&I);
4611       auto *SI = dyn_cast<StoreInst>(&I);
4612       if (!LI && !SI)
4613         continue;
4614 
4615       Value *Ptr = getLoadStorePointerOperand(&I);
4616       // We don't check wrapping here because we don't know yet if Ptr will be
4617       // part of a full group or a group with gaps. Checking wrapping for all
4618       // pointers (even those that end up in groups with no gaps) will be overly
4619       // conservative. For full groups, wrapping should be ok since if we would
4620       // wrap around the address space we would do a memory access at nullptr
4621       // even without the transformation. The wrapping checks are therefore
4622       // deferred until after we've formed the interleaved groups.
4623       int64_t Stride = getPtrStride(PSE, Ptr, TheLoop, Strides,
4624                                     /*Assume=*/true, /*ShouldCheckWrap=*/false);
4625 
4626       const SCEV *Scev = replaceSymbolicStrideSCEV(PSE, Strides, Ptr);
4627       PointerType *PtrTy = dyn_cast<PointerType>(Ptr->getType());
4628       uint64_t Size = DL.getTypeAllocSize(PtrTy->getElementType());
4629 
4630       // An alignment of 0 means target ABI alignment.
4631       unsigned Align = getMemInstAlignment(&I);
4632       if (!Align)
4633         Align = DL.getABITypeAlignment(PtrTy->getElementType());
4634 
4635       AccessStrideInfo[&I] = StrideDescriptor(Stride, Scev, Size, Align);
4636     }
4637 }
4638 
4639 // Analyze interleaved accesses and collect them into interleaved load and
4640 // store groups.
4641 //
4642 // When generating code for an interleaved load group, we effectively hoist all
4643 // loads in the group to the location of the first load in program order. When
4644 // generating code for an interleaved store group, we sink all stores to the
4645 // location of the last store. This code motion can change the order of load
4646 // and store instructions and may break dependences.
4647 //
4648 // The code generation strategy mentioned above ensures that we won't violate
4649 // any write-after-read (WAR) dependences.
4650 //
4651 // E.g., for the WAR dependence:  a = A[i];      // (1)
4652 //                                A[i] = b;      // (2)
4653 //
4654 // The store group of (2) is always inserted at or below (2), and the load
4655 // group of (1) is always inserted at or above (1). Thus, the instructions will
4656 // never be reordered. All other dependences are checked to ensure the
4657 // correctness of the instruction reordering.
4658 //
4659 // The algorithm visits all memory accesses in the loop in bottom-up program
4660 // order. Program order is established by traversing the blocks in the loop in
4661 // reverse postorder when collecting the accesses.
4662 //
4663 // We visit the memory accesses in bottom-up order because it can simplify the
4664 // construction of store groups in the presence of write-after-write (WAW)
4665 // dependences.
4666 //
4667 // E.g., for the WAW dependence:  A[i] = a;      // (1)
4668 //                                A[i] = b;      // (2)
4669 //                                A[i + 1] = c;  // (3)
4670 //
4671 // We will first create a store group with (3) and (2). (1) can't be added to
4672 // this group because it and (2) are dependent. However, (1) can be grouped
4673 // with other accesses that may precede it in program order. Note that a
4674 // bottom-up order does not imply that WAW dependences should not be checked.
4675 void InterleavedAccessInfo::analyzeInterleaving() {
4676   LLVM_DEBUG(dbgs() << "LV: Analyzing interleaved accesses...\n");
4677   const ValueToValueMap &Strides = LAI->getSymbolicStrides();
4678 
4679   // Holds all accesses with a constant stride.
4680   MapVector<Instruction *, StrideDescriptor> AccessStrideInfo;
4681   collectConstStrideAccesses(AccessStrideInfo, Strides);
4682 
4683   if (AccessStrideInfo.empty())
4684     return;
4685 
4686   // Collect the dependences in the loop.
4687   collectDependences();
4688 
4689   // Holds all interleaved store groups temporarily.
4690   SmallSetVector<InterleaveGroup *, 4> StoreGroups;
4691   // Holds all interleaved load groups temporarily.
4692   SmallSetVector<InterleaveGroup *, 4> LoadGroups;
4693 
4694   // Search in bottom-up program order for pairs of accesses (A and B) that can
4695   // form interleaved load or store groups. In the algorithm below, access A
4696   // precedes access B in program order. We initialize a group for B in the
4697   // outer loop of the algorithm, and then in the inner loop, we attempt to
4698   // insert each A into B's group if:
4699   //
4700   //  1. A and B have the same stride,
4701   //  2. A and B have the same memory object size, and
4702   //  3. A belongs in B's group according to its distance from B.
4703   //
4704   // Special care is taken to ensure group formation will not break any
4705   // dependences.
4706   for (auto BI = AccessStrideInfo.rbegin(), E = AccessStrideInfo.rend();
4707        BI != E; ++BI) {
4708     Instruction *B = BI->first;
4709     StrideDescriptor DesB = BI->second;
4710 
4711     // Initialize a group for B if it has an allowable stride. Even if we don't
4712     // create a group for B, we continue with the bottom-up algorithm to ensure
4713     // we don't break any of B's dependences.
4714     InterleaveGroup *Group = nullptr;
4715     if (isStrided(DesB.Stride)) {
4716       Group = getInterleaveGroup(B);
4717       if (!Group) {
4718         LLVM_DEBUG(dbgs() << "LV: Creating an interleave group with:" << *B
4719                           << '\n');
4720         Group = createInterleaveGroup(B, DesB.Stride, DesB.Align);
4721       }
4722       if (B->mayWriteToMemory())
4723         StoreGroups.insert(Group);
4724       else
4725         LoadGroups.insert(Group);
4726     }
4727 
4728     for (auto AI = std::next(BI); AI != E; ++AI) {
4729       Instruction *A = AI->first;
4730       StrideDescriptor DesA = AI->second;
4731 
4732       // Our code motion strategy implies that we can't have dependences
4733       // between accesses in an interleaved group and other accesses located
4734       // between the first and last member of the group. Note that this also
4735       // means that a group can't have more than one member at a given offset.
4736       // The accesses in a group can have dependences with other accesses, but
4737       // we must ensure we don't extend the boundaries of the group such that
4738       // we encompass those dependent accesses.
4739       //
4740       // For example, assume we have the sequence of accesses shown below in a
4741       // stride-2 loop:
4742       //
4743       //  (1, 2) is a group | A[i]   = a;  // (1)
4744       //                    | A[i-1] = b;  // (2) |
4745       //                      A[i-3] = c;  // (3)
4746       //                      A[i]   = d;  // (4) | (2, 4) is not a group
4747       //
4748       // Because accesses (2) and (3) are dependent, we can group (2) with (1)
4749       // but not with (4). If we did, the dependent access (3) would be within
4750       // the boundaries of the (2, 4) group.
4751       if (!canReorderMemAccessesForInterleavedGroups(&*AI, &*BI)) {
4752         // If a dependence exists and A is already in a group, we know that A
4753         // must be a store since A precedes B and WAR dependences are allowed.
4754         // Thus, A would be sunk below B. We release A's group to prevent this
4755         // illegal code motion. A will then be free to form another group with
4756         // instructions that precede it.
4757         if (isInterleaved(A)) {
4758           InterleaveGroup *StoreGroup = getInterleaveGroup(A);
4759           StoreGroups.remove(StoreGroup);
4760           releaseGroup(StoreGroup);
4761         }
4762 
4763         // If a dependence exists and A is not already in a group (or it was
4764         // and we just released it), B might be hoisted above A (if B is a
4765         // load) or another store might be sunk below A (if B is a store). In
4766         // either case, we can't add additional instructions to B's group. B
4767         // will only form a group with instructions that it precedes.
4768         break;
4769       }
4770 
4771       // At this point, we've checked for illegal code motion. If either A or B
4772       // isn't strided, there's nothing left to do.
4773       if (!isStrided(DesA.Stride) || !isStrided(DesB.Stride))
4774         continue;
4775 
4776       // Ignore A if it's already in a group or isn't the same kind of memory
4777       // operation as B.
4778       // Note that mayReadFromMemory() isn't mutually exclusive to mayWriteToMemory
4779       // in the case of atomic loads. We shouldn't see those here, canVectorizeMemory()
4780       // should have returned false - except for the case we asked for optimization
4781       // remarks.
4782       if (isInterleaved(A) || (A->mayReadFromMemory() != B->mayReadFromMemory())
4783           || (A->mayWriteToMemory() != B->mayWriteToMemory()))
4784         continue;
4785 
4786       // Check rules 1 and 2. Ignore A if its stride or size is different from
4787       // that of B.
4788       if (DesA.Stride != DesB.Stride || DesA.Size != DesB.Size)
4789         continue;
4790 
4791       // Ignore A if the memory object of A and B don't belong to the same
4792       // address space
4793       if (getMemInstAddressSpace(A) != getMemInstAddressSpace(B))
4794         continue;
4795 
4796       // Calculate the distance from A to B.
4797       const SCEVConstant *DistToB = dyn_cast<SCEVConstant>(
4798           PSE.getSE()->getMinusSCEV(DesA.Scev, DesB.Scev));
4799       if (!DistToB)
4800         continue;
4801       int64_t DistanceToB = DistToB->getAPInt().getSExtValue();
4802 
4803       // Check rule 3. Ignore A if its distance to B is not a multiple of the
4804       // size.
4805       if (DistanceToB % static_cast<int64_t>(DesB.Size))
4806         continue;
4807 
4808       // Ignore A if either A or B is in a predicated block. Although we
4809       // currently prevent group formation for predicated accesses, we may be
4810       // able to relax this limitation in the future once we handle more
4811       // complicated blocks.
4812       if (isPredicated(A->getParent()) || isPredicated(B->getParent()))
4813         continue;
4814 
4815       // The index of A is the index of B plus A's distance to B in multiples
4816       // of the size.
4817       int IndexA =
4818           Group->getIndex(B) + DistanceToB / static_cast<int64_t>(DesB.Size);
4819 
4820       // Try to insert A into B's group.
4821       if (Group->insertMember(A, IndexA, DesA.Align)) {
4822         LLVM_DEBUG(dbgs() << "LV: Inserted:" << *A << '\n'
4823                           << "    into the interleave group with" << *B
4824                           << '\n');
4825         InterleaveGroupMap[A] = Group;
4826 
4827         // Set the first load in program order as the insert position.
4828         if (A->mayReadFromMemory())
4829           Group->setInsertPos(A);
4830       }
4831     } // Iteration over A accesses.
4832   } // Iteration over B accesses.
4833 
4834   // Remove interleaved store groups with gaps.
4835   for (InterleaveGroup *Group : StoreGroups)
4836     if (Group->getNumMembers() != Group->getFactor()) {
4837       LLVM_DEBUG(
4838           dbgs() << "LV: Invalidate candidate interleaved store group due "
4839                     "to gaps.\n");
4840       releaseGroup(Group);
4841     }
4842   // Remove interleaved groups with gaps (currently only loads) whose memory
4843   // accesses may wrap around. We have to revisit the getPtrStride analysis,
4844   // this time with ShouldCheckWrap=true, since collectConstStrideAccesses does
4845   // not check wrapping (see documentation there).
4846   // FORNOW we use Assume=false;
4847   // TODO: Change to Assume=true but making sure we don't exceed the threshold
4848   // of runtime SCEV assumptions checks (thereby potentially failing to
4849   // vectorize altogether).
4850   // Additional optional optimizations:
4851   // TODO: If we are peeling the loop and we know that the first pointer doesn't
4852   // wrap then we can deduce that all pointers in the group don't wrap.
4853   // This means that we can forcefully peel the loop in order to only have to
4854   // check the first pointer for no-wrap. When we'll change to use Assume=true
4855   // we'll only need at most one runtime check per interleaved group.
4856   for (InterleaveGroup *Group : LoadGroups) {
4857     // Case 1: A full group. Can Skip the checks; For full groups, if the wide
4858     // load would wrap around the address space we would do a memory access at
4859     // nullptr even without the transformation.
4860     if (Group->getNumMembers() == Group->getFactor())
4861       continue;
4862 
4863     // Case 2: If first and last members of the group don't wrap this implies
4864     // that all the pointers in the group don't wrap.
4865     // So we check only group member 0 (which is always guaranteed to exist),
4866     // and group member Factor - 1; If the latter doesn't exist we rely on
4867     // peeling (if it is a non-reveresed accsess -- see Case 3).
4868     Value *FirstMemberPtr = getLoadStorePointerOperand(Group->getMember(0));
4869     if (!getPtrStride(PSE, FirstMemberPtr, TheLoop, Strides, /*Assume=*/false,
4870                       /*ShouldCheckWrap=*/true)) {
4871       LLVM_DEBUG(
4872           dbgs() << "LV: Invalidate candidate interleaved group due to "
4873                     "first group member potentially pointer-wrapping.\n");
4874       releaseGroup(Group);
4875       continue;
4876     }
4877     Instruction *LastMember = Group->getMember(Group->getFactor() - 1);
4878     if (LastMember) {
4879       Value *LastMemberPtr = getLoadStorePointerOperand(LastMember);
4880       if (!getPtrStride(PSE, LastMemberPtr, TheLoop, Strides, /*Assume=*/false,
4881                         /*ShouldCheckWrap=*/true)) {
4882         LLVM_DEBUG(
4883             dbgs() << "LV: Invalidate candidate interleaved group due to "
4884                       "last group member potentially pointer-wrapping.\n");
4885         releaseGroup(Group);
4886       }
4887     } else {
4888       // Case 3: A non-reversed interleaved load group with gaps: We need
4889       // to execute at least one scalar epilogue iteration. This will ensure
4890       // we don't speculatively access memory out-of-bounds. We only need
4891       // to look for a member at index factor - 1, since every group must have
4892       // a member at index zero.
4893       if (Group->isReverse()) {
4894         LLVM_DEBUG(
4895             dbgs() << "LV: Invalidate candidate interleaved group due to "
4896                       "a reverse access with gaps.\n");
4897         releaseGroup(Group);
4898         continue;
4899       }
4900       LLVM_DEBUG(
4901           dbgs() << "LV: Interleaved group requires epilogue iteration.\n");
4902       RequiresScalarEpilogue = true;
4903     }
4904   }
4905 }
4906 
4907 Optional<unsigned> LoopVectorizationCostModel::computeMaxVF(bool OptForSize) {
4908   if (Legal->getRuntimePointerChecking()->Need && TTI.hasBranchDivergence()) {
4909     // TODO: It may by useful to do since it's still likely to be dynamically
4910     // uniform if the target can skip.
4911     LLVM_DEBUG(
4912         dbgs() << "LV: Not inserting runtime ptr check for divergent target");
4913 
4914     ORE->emit(
4915       createMissedAnalysis("CantVersionLoopWithDivergentTarget")
4916       << "runtime pointer checks needed. Not enabled for divergent target");
4917 
4918     return None;
4919   }
4920 
4921   unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop);
4922   if (!OptForSize) // Remaining checks deal with scalar loop when OptForSize.
4923     return computeFeasibleMaxVF(OptForSize, TC);
4924 
4925   if (Legal->getRuntimePointerChecking()->Need) {
4926     ORE->emit(createMissedAnalysis("CantVersionLoopWithOptForSize")
4927               << "runtime pointer checks needed. Enable vectorization of this "
4928                  "loop with '#pragma clang loop vectorize(enable)' when "
4929                  "compiling with -Os/-Oz");
4930     LLVM_DEBUG(
4931         dbgs()
4932         << "LV: Aborting. Runtime ptr check is required with -Os/-Oz.\n");
4933     return None;
4934   }
4935 
4936   // If we optimize the program for size, avoid creating the tail loop.
4937   LLVM_DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n');
4938 
4939   // If we don't know the precise trip count, don't try to vectorize.
4940   if (TC < 2) {
4941     ORE->emit(
4942         createMissedAnalysis("UnknownLoopCountComplexCFG")
4943         << "unable to calculate the loop count due to complex control flow");
4944     LLVM_DEBUG(
4945         dbgs() << "LV: Aborting. A tail loop is required with -Os/-Oz.\n");
4946     return None;
4947   }
4948 
4949   unsigned MaxVF = computeFeasibleMaxVF(OptForSize, TC);
4950 
4951   if (TC % MaxVF != 0) {
4952     // If the trip count that we found modulo the vectorization factor is not
4953     // zero then we require a tail.
4954     // FIXME: look for a smaller MaxVF that does divide TC rather than give up.
4955     // FIXME: return None if loop requiresScalarEpilog(<MaxVF>), or look for a
4956     //        smaller MaxVF that does not require a scalar epilog.
4957 
4958     ORE->emit(createMissedAnalysis("NoTailLoopWithOptForSize")
4959               << "cannot optimize for size and vectorize at the "
4960                  "same time. Enable vectorization of this loop "
4961                  "with '#pragma clang loop vectorize(enable)' "
4962                  "when compiling with -Os/-Oz");
4963     LLVM_DEBUG(
4964         dbgs() << "LV: Aborting. A tail loop is required with -Os/-Oz.\n");
4965     return None;
4966   }
4967 
4968   return MaxVF;
4969 }
4970 
4971 unsigned
4972 LoopVectorizationCostModel::computeFeasibleMaxVF(bool OptForSize,
4973                                                  unsigned ConstTripCount) {
4974   MinBWs = computeMinimumValueSizes(TheLoop->getBlocks(), *DB, &TTI);
4975   unsigned SmallestType, WidestType;
4976   std::tie(SmallestType, WidestType) = getSmallestAndWidestTypes();
4977   unsigned WidestRegister = TTI.getRegisterBitWidth(true);
4978 
4979   // Get the maximum safe dependence distance in bits computed by LAA.
4980   // It is computed by MaxVF * sizeOf(type) * 8, where type is taken from
4981   // the memory accesses that is most restrictive (involved in the smallest
4982   // dependence distance).
4983   unsigned MaxSafeRegisterWidth = Legal->getMaxSafeRegisterWidth();
4984 
4985   WidestRegister = std::min(WidestRegister, MaxSafeRegisterWidth);
4986 
4987   unsigned MaxVectorSize = WidestRegister / WidestType;
4988 
4989   LLVM_DEBUG(dbgs() << "LV: The Smallest and Widest types: " << SmallestType
4990                     << " / " << WidestType << " bits.\n");
4991   LLVM_DEBUG(dbgs() << "LV: The Widest register safe to use is: "
4992                     << WidestRegister << " bits.\n");
4993 
4994   assert(MaxVectorSize <= 256 && "Did not expect to pack so many elements"
4995                                  " into one vector!");
4996   if (MaxVectorSize == 0) {
4997     LLVM_DEBUG(dbgs() << "LV: The target has no vector registers.\n");
4998     MaxVectorSize = 1;
4999     return MaxVectorSize;
5000   } else if (ConstTripCount && ConstTripCount < MaxVectorSize &&
5001              isPowerOf2_32(ConstTripCount)) {
5002     // We need to clamp the VF to be the ConstTripCount. There is no point in
5003     // choosing a higher viable VF as done in the loop below.
5004     LLVM_DEBUG(dbgs() << "LV: Clamping the MaxVF to the constant trip count: "
5005                       << ConstTripCount << "\n");
5006     MaxVectorSize = ConstTripCount;
5007     return MaxVectorSize;
5008   }
5009 
5010   unsigned MaxVF = MaxVectorSize;
5011   if (TTI.shouldMaximizeVectorBandwidth(OptForSize) ||
5012       (MaximizeBandwidth && !OptForSize)) {
5013     // Collect all viable vectorization factors larger than the default MaxVF
5014     // (i.e. MaxVectorSize).
5015     SmallVector<unsigned, 8> VFs;
5016     unsigned NewMaxVectorSize = WidestRegister / SmallestType;
5017     for (unsigned VS = MaxVectorSize * 2; VS <= NewMaxVectorSize; VS *= 2)
5018       VFs.push_back(VS);
5019 
5020     // For each VF calculate its register usage.
5021     auto RUs = calculateRegisterUsage(VFs);
5022 
5023     // Select the largest VF which doesn't require more registers than existing
5024     // ones.
5025     unsigned TargetNumRegisters = TTI.getNumberOfRegisters(true);
5026     for (int i = RUs.size() - 1; i >= 0; --i) {
5027       if (RUs[i].MaxLocalUsers <= TargetNumRegisters) {
5028         MaxVF = VFs[i];
5029         break;
5030       }
5031     }
5032     if (unsigned MinVF = TTI.getMinimumVF(SmallestType)) {
5033       if (MaxVF < MinVF) {
5034         LLVM_DEBUG(dbgs() << "LV: Overriding calculated MaxVF(" << MaxVF
5035                           << ") with target's minimum: " << MinVF << '\n');
5036         MaxVF = MinVF;
5037       }
5038     }
5039   }
5040   return MaxVF;
5041 }
5042 
5043 VectorizationFactor
5044 LoopVectorizationCostModel::selectVectorizationFactor(unsigned MaxVF) {
5045   float Cost = expectedCost(1).first;
5046   const float ScalarCost = Cost;
5047   unsigned Width = 1;
5048   LLVM_DEBUG(dbgs() << "LV: Scalar loop costs: " << (int)ScalarCost << ".\n");
5049 
5050   bool ForceVectorization = Hints->getForce() == LoopVectorizeHints::FK_Enabled;
5051   if (ForceVectorization && MaxVF > 1) {
5052     // Ignore scalar width, because the user explicitly wants vectorization.
5053     // Initialize cost to max so that VF = 2 is, at least, chosen during cost
5054     // evaluation.
5055     Cost = std::numeric_limits<float>::max();
5056   }
5057 
5058   for (unsigned i = 2; i <= MaxVF; i *= 2) {
5059     // Notice that the vector loop needs to be executed less times, so
5060     // we need to divide the cost of the vector loops by the width of
5061     // the vector elements.
5062     VectorizationCostTy C = expectedCost(i);
5063     float VectorCost = C.first / (float)i;
5064     LLVM_DEBUG(dbgs() << "LV: Vector loop of width " << i
5065                       << " costs: " << (int)VectorCost << ".\n");
5066     if (!C.second && !ForceVectorization) {
5067       LLVM_DEBUG(
5068           dbgs() << "LV: Not considering vector loop of width " << i
5069                  << " because it will not generate any vector instructions.\n");
5070       continue;
5071     }
5072     if (VectorCost < Cost) {
5073       Cost = VectorCost;
5074       Width = i;
5075     }
5076   }
5077 
5078   if (!EnableCondStoresVectorization && NumPredStores) {
5079     ORE->emit(createMissedAnalysis("ConditionalStore")
5080               << "store that is conditionally executed prevents vectorization");
5081     LLVM_DEBUG(
5082         dbgs() << "LV: No vectorization. There are conditional stores.\n");
5083     Width = 1;
5084     Cost = ScalarCost;
5085   }
5086 
5087   LLVM_DEBUG(if (ForceVectorization && Width > 1 && Cost >= ScalarCost) dbgs()
5088              << "LV: Vectorization seems to be not beneficial, "
5089              << "but was forced by a user.\n");
5090   LLVM_DEBUG(dbgs() << "LV: Selecting VF: " << Width << ".\n");
5091   VectorizationFactor Factor = {Width, (unsigned)(Width * Cost)};
5092   return Factor;
5093 }
5094 
5095 std::pair<unsigned, unsigned>
5096 LoopVectorizationCostModel::getSmallestAndWidestTypes() {
5097   unsigned MinWidth = -1U;
5098   unsigned MaxWidth = 8;
5099   const DataLayout &DL = TheFunction->getParent()->getDataLayout();
5100 
5101   // For each block.
5102   for (BasicBlock *BB : TheLoop->blocks()) {
5103     // For each instruction in the loop.
5104     for (Instruction &I : *BB) {
5105       Type *T = I.getType();
5106 
5107       // Skip ignored values.
5108       if (ValuesToIgnore.find(&I) != ValuesToIgnore.end())
5109         continue;
5110 
5111       // Only examine Loads, Stores and PHINodes.
5112       if (!isa<LoadInst>(I) && !isa<StoreInst>(I) && !isa<PHINode>(I))
5113         continue;
5114 
5115       // Examine PHI nodes that are reduction variables. Update the type to
5116       // account for the recurrence type.
5117       if (auto *PN = dyn_cast<PHINode>(&I)) {
5118         if (!Legal->isReductionVariable(PN))
5119           continue;
5120         RecurrenceDescriptor RdxDesc = (*Legal->getReductionVars())[PN];
5121         T = RdxDesc.getRecurrenceType();
5122       }
5123 
5124       // Examine the stored values.
5125       if (auto *ST = dyn_cast<StoreInst>(&I))
5126         T = ST->getValueOperand()->getType();
5127 
5128       // Ignore loaded pointer types and stored pointer types that are not
5129       // vectorizable.
5130       //
5131       // FIXME: The check here attempts to predict whether a load or store will
5132       //        be vectorized. We only know this for certain after a VF has
5133       //        been selected. Here, we assume that if an access can be
5134       //        vectorized, it will be. We should also look at extending this
5135       //        optimization to non-pointer types.
5136       //
5137       if (T->isPointerTy() && !isConsecutiveLoadOrStore(&I) &&
5138           !isAccessInterleaved(&I) && !isLegalGatherOrScatter(&I))
5139         continue;
5140 
5141       MinWidth = std::min(MinWidth,
5142                           (unsigned)DL.getTypeSizeInBits(T->getScalarType()));
5143       MaxWidth = std::max(MaxWidth,
5144                           (unsigned)DL.getTypeSizeInBits(T->getScalarType()));
5145     }
5146   }
5147 
5148   return {MinWidth, MaxWidth};
5149 }
5150 
5151 unsigned LoopVectorizationCostModel::selectInterleaveCount(bool OptForSize,
5152                                                            unsigned VF,
5153                                                            unsigned LoopCost) {
5154   // -- The interleave heuristics --
5155   // We interleave the loop in order to expose ILP and reduce the loop overhead.
5156   // There are many micro-architectural considerations that we can't predict
5157   // at this level. For example, frontend pressure (on decode or fetch) due to
5158   // code size, or the number and capabilities of the execution ports.
5159   //
5160   // We use the following heuristics to select the interleave count:
5161   // 1. If the code has reductions, then we interleave to break the cross
5162   // iteration dependency.
5163   // 2. If the loop is really small, then we interleave to reduce the loop
5164   // overhead.
5165   // 3. We don't interleave if we think that we will spill registers to memory
5166   // due to the increased register pressure.
5167 
5168   // When we optimize for size, we don't interleave.
5169   if (OptForSize)
5170     return 1;
5171 
5172   // We used the distance for the interleave count.
5173   if (Legal->getMaxSafeDepDistBytes() != -1U)
5174     return 1;
5175 
5176   // Do not interleave loops with a relatively small trip count.
5177   unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop);
5178   if (TC > 1 && TC < TinyTripCountInterleaveThreshold)
5179     return 1;
5180 
5181   unsigned TargetNumRegisters = TTI.getNumberOfRegisters(VF > 1);
5182   LLVM_DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters
5183                     << " registers\n");
5184 
5185   if (VF == 1) {
5186     if (ForceTargetNumScalarRegs.getNumOccurrences() > 0)
5187       TargetNumRegisters = ForceTargetNumScalarRegs;
5188   } else {
5189     if (ForceTargetNumVectorRegs.getNumOccurrences() > 0)
5190       TargetNumRegisters = ForceTargetNumVectorRegs;
5191   }
5192 
5193   RegisterUsage R = calculateRegisterUsage({VF})[0];
5194   // We divide by these constants so assume that we have at least one
5195   // instruction that uses at least one register.
5196   R.MaxLocalUsers = std::max(R.MaxLocalUsers, 1U);
5197 
5198   // We calculate the interleave count using the following formula.
5199   // Subtract the number of loop invariants from the number of available
5200   // registers. These registers are used by all of the interleaved instances.
5201   // Next, divide the remaining registers by the number of registers that is
5202   // required by the loop, in order to estimate how many parallel instances
5203   // fit without causing spills. All of this is rounded down if necessary to be
5204   // a power of two. We want power of two interleave count to simplify any
5205   // addressing operations or alignment considerations.
5206   unsigned IC = PowerOf2Floor((TargetNumRegisters - R.LoopInvariantRegs) /
5207                               R.MaxLocalUsers);
5208 
5209   // Don't count the induction variable as interleaved.
5210   if (EnableIndVarRegisterHeur)
5211     IC = PowerOf2Floor((TargetNumRegisters - R.LoopInvariantRegs - 1) /
5212                        std::max(1U, (R.MaxLocalUsers - 1)));
5213 
5214   // Clamp the interleave ranges to reasonable counts.
5215   unsigned MaxInterleaveCount = TTI.getMaxInterleaveFactor(VF);
5216 
5217   // Check if the user has overridden the max.
5218   if (VF == 1) {
5219     if (ForceTargetMaxScalarInterleaveFactor.getNumOccurrences() > 0)
5220       MaxInterleaveCount = ForceTargetMaxScalarInterleaveFactor;
5221   } else {
5222     if (ForceTargetMaxVectorInterleaveFactor.getNumOccurrences() > 0)
5223       MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor;
5224   }
5225 
5226   // If we did not calculate the cost for VF (because the user selected the VF)
5227   // then we calculate the cost of VF here.
5228   if (LoopCost == 0)
5229     LoopCost = expectedCost(VF).first;
5230 
5231   // Clamp the calculated IC to be between the 1 and the max interleave count
5232   // that the target allows.
5233   if (IC > MaxInterleaveCount)
5234     IC = MaxInterleaveCount;
5235   else if (IC < 1)
5236     IC = 1;
5237 
5238   // Interleave if we vectorized this loop and there is a reduction that could
5239   // benefit from interleaving.
5240   if (VF > 1 && !Legal->getReductionVars()->empty()) {
5241     LLVM_DEBUG(dbgs() << "LV: Interleaving because of reductions.\n");
5242     return IC;
5243   }
5244 
5245   // Note that if we've already vectorized the loop we will have done the
5246   // runtime check and so interleaving won't require further checks.
5247   bool InterleavingRequiresRuntimePointerCheck =
5248       (VF == 1 && Legal->getRuntimePointerChecking()->Need);
5249 
5250   // We want to interleave small loops in order to reduce the loop overhead and
5251   // potentially expose ILP opportunities.
5252   LLVM_DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n');
5253   if (!InterleavingRequiresRuntimePointerCheck && LoopCost < SmallLoopCost) {
5254     // We assume that the cost overhead is 1 and we use the cost model
5255     // to estimate the cost of the loop and interleave until the cost of the
5256     // loop overhead is about 5% of the cost of the loop.
5257     unsigned SmallIC =
5258         std::min(IC, (unsigned)PowerOf2Floor(SmallLoopCost / LoopCost));
5259 
5260     // Interleave until store/load ports (estimated by max interleave count) are
5261     // saturated.
5262     unsigned NumStores = Legal->getNumStores();
5263     unsigned NumLoads = Legal->getNumLoads();
5264     unsigned StoresIC = IC / (NumStores ? NumStores : 1);
5265     unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1);
5266 
5267     // If we have a scalar reduction (vector reductions are already dealt with
5268     // by this point), we can increase the critical path length if the loop
5269     // we're interleaving is inside another loop. Limit, by default to 2, so the
5270     // critical path only gets increased by one reduction operation.
5271     if (!Legal->getReductionVars()->empty() && TheLoop->getLoopDepth() > 1) {
5272       unsigned F = static_cast<unsigned>(MaxNestedScalarReductionIC);
5273       SmallIC = std::min(SmallIC, F);
5274       StoresIC = std::min(StoresIC, F);
5275       LoadsIC = std::min(LoadsIC, F);
5276     }
5277 
5278     if (EnableLoadStoreRuntimeInterleave &&
5279         std::max(StoresIC, LoadsIC) > SmallIC) {
5280       LLVM_DEBUG(
5281           dbgs() << "LV: Interleaving to saturate store or load ports.\n");
5282       return std::max(StoresIC, LoadsIC);
5283     }
5284 
5285     LLVM_DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n");
5286     return SmallIC;
5287   }
5288 
5289   // Interleave if this is a large loop (small loops are already dealt with by
5290   // this point) that could benefit from interleaving.
5291   bool HasReductions = !Legal->getReductionVars()->empty();
5292   if (TTI.enableAggressiveInterleaving(HasReductions)) {
5293     LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n");
5294     return IC;
5295   }
5296 
5297   LLVM_DEBUG(dbgs() << "LV: Not Interleaving.\n");
5298   return 1;
5299 }
5300 
5301 SmallVector<LoopVectorizationCostModel::RegisterUsage, 8>
5302 LoopVectorizationCostModel::calculateRegisterUsage(ArrayRef<unsigned> VFs) {
5303   // This function calculates the register usage by measuring the highest number
5304   // of values that are alive at a single location. Obviously, this is a very
5305   // rough estimation. We scan the loop in a topological order in order and
5306   // assign a number to each instruction. We use RPO to ensure that defs are
5307   // met before their users. We assume that each instruction that has in-loop
5308   // users starts an interval. We record every time that an in-loop value is
5309   // used, so we have a list of the first and last occurrences of each
5310   // instruction. Next, we transpose this data structure into a multi map that
5311   // holds the list of intervals that *end* at a specific location. This multi
5312   // map allows us to perform a linear search. We scan the instructions linearly
5313   // and record each time that a new interval starts, by placing it in a set.
5314   // If we find this value in the multi-map then we remove it from the set.
5315   // The max register usage is the maximum size of the set.
5316   // We also search for instructions that are defined outside the loop, but are
5317   // used inside the loop. We need this number separately from the max-interval
5318   // usage number because when we unroll, loop-invariant values do not take
5319   // more register.
5320   LoopBlocksDFS DFS(TheLoop);
5321   DFS.perform(LI);
5322 
5323   RegisterUsage RU;
5324 
5325   // Each 'key' in the map opens a new interval. The values
5326   // of the map are the index of the 'last seen' usage of the
5327   // instruction that is the key.
5328   using IntervalMap = DenseMap<Instruction *, unsigned>;
5329 
5330   // Maps instruction to its index.
5331   DenseMap<unsigned, Instruction *> IdxToInstr;
5332   // Marks the end of each interval.
5333   IntervalMap EndPoint;
5334   // Saves the list of instruction indices that are used in the loop.
5335   SmallPtrSet<Instruction *, 8> Ends;
5336   // Saves the list of values that are used in the loop but are
5337   // defined outside the loop, such as arguments and constants.
5338   SmallPtrSet<Value *, 8> LoopInvariants;
5339 
5340   unsigned Index = 0;
5341   for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) {
5342     for (Instruction &I : *BB) {
5343       IdxToInstr[Index++] = &I;
5344 
5345       // Save the end location of each USE.
5346       for (Value *U : I.operands()) {
5347         auto *Instr = dyn_cast<Instruction>(U);
5348 
5349         // Ignore non-instruction values such as arguments, constants, etc.
5350         if (!Instr)
5351           continue;
5352 
5353         // If this instruction is outside the loop then record it and continue.
5354         if (!TheLoop->contains(Instr)) {
5355           LoopInvariants.insert(Instr);
5356           continue;
5357         }
5358 
5359         // Overwrite previous end points.
5360         EndPoint[Instr] = Index;
5361         Ends.insert(Instr);
5362       }
5363     }
5364   }
5365 
5366   // Saves the list of intervals that end with the index in 'key'.
5367   using InstrList = SmallVector<Instruction *, 2>;
5368   DenseMap<unsigned, InstrList> TransposeEnds;
5369 
5370   // Transpose the EndPoints to a list of values that end at each index.
5371   for (auto &Interval : EndPoint)
5372     TransposeEnds[Interval.second].push_back(Interval.first);
5373 
5374   SmallPtrSet<Instruction *, 8> OpenIntervals;
5375 
5376   // Get the size of the widest register.
5377   unsigned MaxSafeDepDist = -1U;
5378   if (Legal->getMaxSafeDepDistBytes() != -1U)
5379     MaxSafeDepDist = Legal->getMaxSafeDepDistBytes() * 8;
5380   unsigned WidestRegister =
5381       std::min(TTI.getRegisterBitWidth(true), MaxSafeDepDist);
5382   const DataLayout &DL = TheFunction->getParent()->getDataLayout();
5383 
5384   SmallVector<RegisterUsage, 8> RUs(VFs.size());
5385   SmallVector<unsigned, 8> MaxUsages(VFs.size(), 0);
5386 
5387   LLVM_DEBUG(dbgs() << "LV(REG): Calculating max register usage:\n");
5388 
5389   // A lambda that gets the register usage for the given type and VF.
5390   auto GetRegUsage = [&DL, WidestRegister](Type *Ty, unsigned VF) {
5391     if (Ty->isTokenTy())
5392       return 0U;
5393     unsigned TypeSize = DL.getTypeSizeInBits(Ty->getScalarType());
5394     return std::max<unsigned>(1, VF * TypeSize / WidestRegister);
5395   };
5396 
5397   for (unsigned int i = 0; i < Index; ++i) {
5398     Instruction *I = IdxToInstr[i];
5399 
5400     // Remove all of the instructions that end at this location.
5401     InstrList &List = TransposeEnds[i];
5402     for (Instruction *ToRemove : List)
5403       OpenIntervals.erase(ToRemove);
5404 
5405     // Ignore instructions that are never used within the loop.
5406     if (Ends.find(I) == Ends.end())
5407       continue;
5408 
5409     // Skip ignored values.
5410     if (ValuesToIgnore.find(I) != ValuesToIgnore.end())
5411       continue;
5412 
5413     // For each VF find the maximum usage of registers.
5414     for (unsigned j = 0, e = VFs.size(); j < e; ++j) {
5415       if (VFs[j] == 1) {
5416         MaxUsages[j] = std::max(MaxUsages[j], OpenIntervals.size());
5417         continue;
5418       }
5419       collectUniformsAndScalars(VFs[j]);
5420       // Count the number of live intervals.
5421       unsigned RegUsage = 0;
5422       for (auto Inst : OpenIntervals) {
5423         // Skip ignored values for VF > 1.
5424         if (VecValuesToIgnore.find(Inst) != VecValuesToIgnore.end() ||
5425             isScalarAfterVectorization(Inst, VFs[j]))
5426           continue;
5427         RegUsage += GetRegUsage(Inst->getType(), VFs[j]);
5428       }
5429       MaxUsages[j] = std::max(MaxUsages[j], RegUsage);
5430     }
5431 
5432     LLVM_DEBUG(dbgs() << "LV(REG): At #" << i << " Interval # "
5433                       << OpenIntervals.size() << '\n');
5434 
5435     // Add the current instruction to the list of open intervals.
5436     OpenIntervals.insert(I);
5437   }
5438 
5439   for (unsigned i = 0, e = VFs.size(); i < e; ++i) {
5440     unsigned Invariant = 0;
5441     if (VFs[i] == 1)
5442       Invariant = LoopInvariants.size();
5443     else {
5444       for (auto Inst : LoopInvariants)
5445         Invariant += GetRegUsage(Inst->getType(), VFs[i]);
5446     }
5447 
5448     LLVM_DEBUG(dbgs() << "LV(REG): VF = " << VFs[i] << '\n');
5449     LLVM_DEBUG(dbgs() << "LV(REG): Found max usage: " << MaxUsages[i] << '\n');
5450     LLVM_DEBUG(dbgs() << "LV(REG): Found invariant usage: " << Invariant
5451                       << '\n');
5452 
5453     RU.LoopInvariantRegs = Invariant;
5454     RU.MaxLocalUsers = MaxUsages[i];
5455     RUs[i] = RU;
5456   }
5457 
5458   return RUs;
5459 }
5460 
5461 bool LoopVectorizationCostModel::useEmulatedMaskMemRefHack(Instruction *I){
5462   // TODO: Cost model for emulated masked load/store is completely
5463   // broken. This hack guides the cost model to use an artificially
5464   // high enough value to practically disable vectorization with such
5465   // operations, except where previously deployed legality hack allowed
5466   // using very low cost values. This is to avoid regressions coming simply
5467   // from moving "masked load/store" check from legality to cost model.
5468   // Masked Load/Gather emulation was previously never allowed.
5469   // Limited number of Masked Store/Scatter emulation was allowed.
5470   assert(isScalarWithPredication(I) &&
5471          "Expecting a scalar emulated instruction");
5472   return isa<LoadInst>(I) ||
5473          (isa<StoreInst>(I) &&
5474           NumPredStores > NumberOfStoresToPredicate);
5475 }
5476 
5477 void LoopVectorizationCostModel::collectInstsToScalarize(unsigned VF) {
5478   // If we aren't vectorizing the loop, or if we've already collected the
5479   // instructions to scalarize, there's nothing to do. Collection may already
5480   // have occurred if we have a user-selected VF and are now computing the
5481   // expected cost for interleaving.
5482   if (VF < 2 || InstsToScalarize.find(VF) != InstsToScalarize.end())
5483     return;
5484 
5485   // Initialize a mapping for VF in InstsToScalalarize. If we find that it's
5486   // not profitable to scalarize any instructions, the presence of VF in the
5487   // map will indicate that we've analyzed it already.
5488   ScalarCostsTy &ScalarCostsVF = InstsToScalarize[VF];
5489 
5490   // Find all the instructions that are scalar with predication in the loop and
5491   // determine if it would be better to not if-convert the blocks they are in.
5492   // If so, we also record the instructions to scalarize.
5493   for (BasicBlock *BB : TheLoop->blocks()) {
5494     if (!Legal->blockNeedsPredication(BB))
5495       continue;
5496     for (Instruction &I : *BB)
5497       if (isScalarWithPredication(&I)) {
5498         ScalarCostsTy ScalarCosts;
5499         // Do not apply discount logic if hacked cost is needed
5500         // for emulated masked memrefs.
5501         if (!useEmulatedMaskMemRefHack(&I) &&
5502             computePredInstDiscount(&I, ScalarCosts, VF) >= 0)
5503           ScalarCostsVF.insert(ScalarCosts.begin(), ScalarCosts.end());
5504         // Remember that BB will remain after vectorization.
5505         PredicatedBBsAfterVectorization.insert(BB);
5506       }
5507   }
5508 }
5509 
5510 int LoopVectorizationCostModel::computePredInstDiscount(
5511     Instruction *PredInst, DenseMap<Instruction *, unsigned> &ScalarCosts,
5512     unsigned VF) {
5513   assert(!isUniformAfterVectorization(PredInst, VF) &&
5514          "Instruction marked uniform-after-vectorization will be predicated");
5515 
5516   // Initialize the discount to zero, meaning that the scalar version and the
5517   // vector version cost the same.
5518   int Discount = 0;
5519 
5520   // Holds instructions to analyze. The instructions we visit are mapped in
5521   // ScalarCosts. Those instructions are the ones that would be scalarized if
5522   // we find that the scalar version costs less.
5523   SmallVector<Instruction *, 8> Worklist;
5524 
5525   // Returns true if the given instruction can be scalarized.
5526   auto canBeScalarized = [&](Instruction *I) -> bool {
5527     // We only attempt to scalarize instructions forming a single-use chain
5528     // from the original predicated block that would otherwise be vectorized.
5529     // Although not strictly necessary, we give up on instructions we know will
5530     // already be scalar to avoid traversing chains that are unlikely to be
5531     // beneficial.
5532     if (!I->hasOneUse() || PredInst->getParent() != I->getParent() ||
5533         isScalarAfterVectorization(I, VF))
5534       return false;
5535 
5536     // If the instruction is scalar with predication, it will be analyzed
5537     // separately. We ignore it within the context of PredInst.
5538     if (isScalarWithPredication(I))
5539       return false;
5540 
5541     // If any of the instruction's operands are uniform after vectorization,
5542     // the instruction cannot be scalarized. This prevents, for example, a
5543     // masked load from being scalarized.
5544     //
5545     // We assume we will only emit a value for lane zero of an instruction
5546     // marked uniform after vectorization, rather than VF identical values.
5547     // Thus, if we scalarize an instruction that uses a uniform, we would
5548     // create uses of values corresponding to the lanes we aren't emitting code
5549     // for. This behavior can be changed by allowing getScalarValue to clone
5550     // the lane zero values for uniforms rather than asserting.
5551     for (Use &U : I->operands())
5552       if (auto *J = dyn_cast<Instruction>(U.get()))
5553         if (isUniformAfterVectorization(J, VF))
5554           return false;
5555 
5556     // Otherwise, we can scalarize the instruction.
5557     return true;
5558   };
5559 
5560   // Returns true if an operand that cannot be scalarized must be extracted
5561   // from a vector. We will account for this scalarization overhead below. Note
5562   // that the non-void predicated instructions are placed in their own blocks,
5563   // and their return values are inserted into vectors. Thus, an extract would
5564   // still be required.
5565   auto needsExtract = [&](Instruction *I) -> bool {
5566     return TheLoop->contains(I) && !isScalarAfterVectorization(I, VF);
5567   };
5568 
5569   // Compute the expected cost discount from scalarizing the entire expression
5570   // feeding the predicated instruction. We currently only consider expressions
5571   // that are single-use instruction chains.
5572   Worklist.push_back(PredInst);
5573   while (!Worklist.empty()) {
5574     Instruction *I = Worklist.pop_back_val();
5575 
5576     // If we've already analyzed the instruction, there's nothing to do.
5577     if (ScalarCosts.find(I) != ScalarCosts.end())
5578       continue;
5579 
5580     // Compute the cost of the vector instruction. Note that this cost already
5581     // includes the scalarization overhead of the predicated instruction.
5582     unsigned VectorCost = getInstructionCost(I, VF).first;
5583 
5584     // Compute the cost of the scalarized instruction. This cost is the cost of
5585     // the instruction as if it wasn't if-converted and instead remained in the
5586     // predicated block. We will scale this cost by block probability after
5587     // computing the scalarization overhead.
5588     unsigned ScalarCost = VF * getInstructionCost(I, 1).first;
5589 
5590     // Compute the scalarization overhead of needed insertelement instructions
5591     // and phi nodes.
5592     if (isScalarWithPredication(I) && !I->getType()->isVoidTy()) {
5593       ScalarCost += TTI.getScalarizationOverhead(ToVectorTy(I->getType(), VF),
5594                                                  true, false);
5595       ScalarCost += VF * TTI.getCFInstrCost(Instruction::PHI);
5596     }
5597 
5598     // Compute the scalarization overhead of needed extractelement
5599     // instructions. For each of the instruction's operands, if the operand can
5600     // be scalarized, add it to the worklist; otherwise, account for the
5601     // overhead.
5602     for (Use &U : I->operands())
5603       if (auto *J = dyn_cast<Instruction>(U.get())) {
5604         assert(VectorType::isValidElementType(J->getType()) &&
5605                "Instruction has non-scalar type");
5606         if (canBeScalarized(J))
5607           Worklist.push_back(J);
5608         else if (needsExtract(J))
5609           ScalarCost += TTI.getScalarizationOverhead(
5610                               ToVectorTy(J->getType(),VF), false, true);
5611       }
5612 
5613     // Scale the total scalar cost by block probability.
5614     ScalarCost /= getReciprocalPredBlockProb();
5615 
5616     // Compute the discount. A non-negative discount means the vector version
5617     // of the instruction costs more, and scalarizing would be beneficial.
5618     Discount += VectorCost - ScalarCost;
5619     ScalarCosts[I] = ScalarCost;
5620   }
5621 
5622   return Discount;
5623 }
5624 
5625 LoopVectorizationCostModel::VectorizationCostTy
5626 LoopVectorizationCostModel::expectedCost(unsigned VF) {
5627   VectorizationCostTy Cost;
5628 
5629   // For each block.
5630   for (BasicBlock *BB : TheLoop->blocks()) {
5631     VectorizationCostTy BlockCost;
5632 
5633     // For each instruction in the old loop.
5634     for (Instruction &I : BB->instructionsWithoutDebug()) {
5635       // Skip ignored values.
5636       if (ValuesToIgnore.find(&I) != ValuesToIgnore.end() ||
5637           (VF > 1 && VecValuesToIgnore.find(&I) != VecValuesToIgnore.end()))
5638         continue;
5639 
5640       VectorizationCostTy C = getInstructionCost(&I, VF);
5641 
5642       // Check if we should override the cost.
5643       if (ForceTargetInstructionCost.getNumOccurrences() > 0)
5644         C.first = ForceTargetInstructionCost;
5645 
5646       BlockCost.first += C.first;
5647       BlockCost.second |= C.second;
5648       LLVM_DEBUG(dbgs() << "LV: Found an estimated cost of " << C.first
5649                         << " for VF " << VF << " For instruction: " << I
5650                         << '\n');
5651     }
5652 
5653     // If we are vectorizing a predicated block, it will have been
5654     // if-converted. This means that the block's instructions (aside from
5655     // stores and instructions that may divide by zero) will now be
5656     // unconditionally executed. For the scalar case, we may not always execute
5657     // the predicated block. Thus, scale the block's cost by the probability of
5658     // executing it.
5659     if (VF == 1 && Legal->blockNeedsPredication(BB))
5660       BlockCost.first /= getReciprocalPredBlockProb();
5661 
5662     Cost.first += BlockCost.first;
5663     Cost.second |= BlockCost.second;
5664   }
5665 
5666   return Cost;
5667 }
5668 
5669 /// Gets Address Access SCEV after verifying that the access pattern
5670 /// is loop invariant except the induction variable dependence.
5671 ///
5672 /// This SCEV can be sent to the Target in order to estimate the address
5673 /// calculation cost.
5674 static const SCEV *getAddressAccessSCEV(
5675               Value *Ptr,
5676               LoopVectorizationLegality *Legal,
5677               PredicatedScalarEvolution &PSE,
5678               const Loop *TheLoop) {
5679 
5680   auto *Gep = dyn_cast<GetElementPtrInst>(Ptr);
5681   if (!Gep)
5682     return nullptr;
5683 
5684   // We are looking for a gep with all loop invariant indices except for one
5685   // which should be an induction variable.
5686   auto SE = PSE.getSE();
5687   unsigned NumOperands = Gep->getNumOperands();
5688   for (unsigned i = 1; i < NumOperands; ++i) {
5689     Value *Opd = Gep->getOperand(i);
5690     if (!SE->isLoopInvariant(SE->getSCEV(Opd), TheLoop) &&
5691         !Legal->isInductionVariable(Opd))
5692       return nullptr;
5693   }
5694 
5695   // Now we know we have a GEP ptr, %inv, %ind, %inv. return the Ptr SCEV.
5696   return PSE.getSCEV(Ptr);
5697 }
5698 
5699 static bool isStrideMul(Instruction *I, LoopVectorizationLegality *Legal) {
5700   return Legal->hasStride(I->getOperand(0)) ||
5701          Legal->hasStride(I->getOperand(1));
5702 }
5703 
5704 unsigned LoopVectorizationCostModel::getMemInstScalarizationCost(Instruction *I,
5705                                                                  unsigned VF) {
5706   Type *ValTy = getMemInstValueType(I);
5707   auto SE = PSE.getSE();
5708 
5709   unsigned Alignment = getMemInstAlignment(I);
5710   unsigned AS = getMemInstAddressSpace(I);
5711   Value *Ptr = getLoadStorePointerOperand(I);
5712   Type *PtrTy = ToVectorTy(Ptr->getType(), VF);
5713 
5714   // Figure out whether the access is strided and get the stride value
5715   // if it's known in compile time
5716   const SCEV *PtrSCEV = getAddressAccessSCEV(Ptr, Legal, PSE, TheLoop);
5717 
5718   // Get the cost of the scalar memory instruction and address computation.
5719   unsigned Cost = VF * TTI.getAddressComputationCost(PtrTy, SE, PtrSCEV);
5720 
5721   Cost += VF *
5722           TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), Alignment,
5723                               AS, I);
5724 
5725   // Get the overhead of the extractelement and insertelement instructions
5726   // we might create due to scalarization.
5727   Cost += getScalarizationOverhead(I, VF, TTI);
5728 
5729   // If we have a predicated store, it may not be executed for each vector
5730   // lane. Scale the cost by the probability of executing the predicated
5731   // block.
5732   if (isScalarWithPredication(I)) {
5733     Cost /= getReciprocalPredBlockProb();
5734 
5735     if (useEmulatedMaskMemRefHack(I))
5736       // Artificially setting to a high enough value to practically disable
5737       // vectorization with such operations.
5738       Cost = 3000000;
5739   }
5740 
5741   return Cost;
5742 }
5743 
5744 unsigned LoopVectorizationCostModel::getConsecutiveMemOpCost(Instruction *I,
5745                                                              unsigned VF) {
5746   Type *ValTy = getMemInstValueType(I);
5747   Type *VectorTy = ToVectorTy(ValTy, VF);
5748   unsigned Alignment = getMemInstAlignment(I);
5749   Value *Ptr = getLoadStorePointerOperand(I);
5750   unsigned AS = getMemInstAddressSpace(I);
5751   int ConsecutiveStride = Legal->isConsecutivePtr(Ptr);
5752 
5753   assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) &&
5754          "Stride should be 1 or -1 for consecutive memory access");
5755   unsigned Cost = 0;
5756   if (Legal->isMaskRequired(I))
5757     Cost += TTI.getMaskedMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS);
5758   else
5759     Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS, I);
5760 
5761   bool Reverse = ConsecutiveStride < 0;
5762   if (Reverse)
5763     Cost += TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, 0);
5764   return Cost;
5765 }
5766 
5767 unsigned LoopVectorizationCostModel::getUniformMemOpCost(Instruction *I,
5768                                                          unsigned VF) {
5769   LoadInst *LI = cast<LoadInst>(I);
5770   Type *ValTy = LI->getType();
5771   Type *VectorTy = ToVectorTy(ValTy, VF);
5772   unsigned Alignment = LI->getAlignment();
5773   unsigned AS = LI->getPointerAddressSpace();
5774 
5775   return TTI.getAddressComputationCost(ValTy) +
5776          TTI.getMemoryOpCost(Instruction::Load, ValTy, Alignment, AS) +
5777          TTI.getShuffleCost(TargetTransformInfo::SK_Broadcast, VectorTy);
5778 }
5779 
5780 unsigned LoopVectorizationCostModel::getGatherScatterCost(Instruction *I,
5781                                                           unsigned VF) {
5782   Type *ValTy = getMemInstValueType(I);
5783   Type *VectorTy = ToVectorTy(ValTy, VF);
5784   unsigned Alignment = getMemInstAlignment(I);
5785   Value *Ptr = getLoadStorePointerOperand(I);
5786 
5787   return TTI.getAddressComputationCost(VectorTy) +
5788          TTI.getGatherScatterOpCost(I->getOpcode(), VectorTy, Ptr,
5789                                     Legal->isMaskRequired(I), Alignment);
5790 }
5791 
5792 unsigned LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I,
5793                                                             unsigned VF) {
5794   Type *ValTy = getMemInstValueType(I);
5795   Type *VectorTy = ToVectorTy(ValTy, VF);
5796   unsigned AS = getMemInstAddressSpace(I);
5797 
5798   auto Group = getInterleavedAccessGroup(I);
5799   assert(Group && "Fail to get an interleaved access group.");
5800 
5801   unsigned InterleaveFactor = Group->getFactor();
5802   Type *WideVecTy = VectorType::get(ValTy, VF * InterleaveFactor);
5803 
5804   // Holds the indices of existing members in an interleaved load group.
5805   // An interleaved store group doesn't need this as it doesn't allow gaps.
5806   SmallVector<unsigned, 4> Indices;
5807   if (isa<LoadInst>(I)) {
5808     for (unsigned i = 0; i < InterleaveFactor; i++)
5809       if (Group->getMember(i))
5810         Indices.push_back(i);
5811   }
5812 
5813   // Calculate the cost of the whole interleaved group.
5814   unsigned Cost = TTI.getInterleavedMemoryOpCost(I->getOpcode(), WideVecTy,
5815                                                  Group->getFactor(), Indices,
5816                                                  Group->getAlignment(), AS);
5817 
5818   if (Group->isReverse())
5819     Cost += Group->getNumMembers() *
5820             TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, 0);
5821   return Cost;
5822 }
5823 
5824 unsigned LoopVectorizationCostModel::getMemoryInstructionCost(Instruction *I,
5825                                                               unsigned VF) {
5826   // Calculate scalar cost only. Vectorization cost should be ready at this
5827   // moment.
5828   if (VF == 1) {
5829     Type *ValTy = getMemInstValueType(I);
5830     unsigned Alignment = getMemInstAlignment(I);
5831     unsigned AS = getMemInstAddressSpace(I);
5832 
5833     return TTI.getAddressComputationCost(ValTy) +
5834            TTI.getMemoryOpCost(I->getOpcode(), ValTy, Alignment, AS, I);
5835   }
5836   return getWideningCost(I, VF);
5837 }
5838 
5839 LoopVectorizationCostModel::VectorizationCostTy
5840 LoopVectorizationCostModel::getInstructionCost(Instruction *I, unsigned VF) {
5841   // If we know that this instruction will remain uniform, check the cost of
5842   // the scalar version.
5843   if (isUniformAfterVectorization(I, VF))
5844     VF = 1;
5845 
5846   if (VF > 1 && isProfitableToScalarize(I, VF))
5847     return VectorizationCostTy(InstsToScalarize[VF][I], false);
5848 
5849   // Forced scalars do not have any scalarization overhead.
5850   auto ForcedScalar = ForcedScalars.find(VF);
5851   if (VF > 1 && ForcedScalar != ForcedScalars.end()) {
5852     auto InstSet = ForcedScalar->second;
5853     if (InstSet.find(I) != InstSet.end())
5854       return VectorizationCostTy((getInstructionCost(I, 1).first * VF), false);
5855   }
5856 
5857   Type *VectorTy;
5858   unsigned C = getInstructionCost(I, VF, VectorTy);
5859 
5860   bool TypeNotScalarized =
5861       VF > 1 && VectorTy->isVectorTy() && TTI.getNumberOfParts(VectorTy) < VF;
5862   return VectorizationCostTy(C, TypeNotScalarized);
5863 }
5864 
5865 void LoopVectorizationCostModel::setCostBasedWideningDecision(unsigned VF) {
5866   if (VF == 1)
5867     return;
5868   NumPredStores = 0;
5869   for (BasicBlock *BB : TheLoop->blocks()) {
5870     // For each instruction in the old loop.
5871     for (Instruction &I : *BB) {
5872       Value *Ptr =  getLoadStorePointerOperand(&I);
5873       if (!Ptr)
5874         continue;
5875 
5876       if (isa<StoreInst>(&I) && isScalarWithPredication(&I))
5877         NumPredStores++;
5878       if (isa<LoadInst>(&I) && Legal->isUniform(Ptr)) {
5879         // Scalar load + broadcast
5880         unsigned Cost = getUniformMemOpCost(&I, VF);
5881         setWideningDecision(&I, VF, CM_Scalarize, Cost);
5882         continue;
5883       }
5884 
5885       // We assume that widening is the best solution when possible.
5886       if (memoryInstructionCanBeWidened(&I, VF)) {
5887         unsigned Cost = getConsecutiveMemOpCost(&I, VF);
5888         int ConsecutiveStride =
5889                Legal->isConsecutivePtr(getLoadStorePointerOperand(&I));
5890         assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) &&
5891                "Expected consecutive stride.");
5892         InstWidening Decision =
5893             ConsecutiveStride == 1 ? CM_Widen : CM_Widen_Reverse;
5894         setWideningDecision(&I, VF, Decision, Cost);
5895         continue;
5896       }
5897 
5898       // Choose between Interleaving, Gather/Scatter or Scalarization.
5899       unsigned InterleaveCost = std::numeric_limits<unsigned>::max();
5900       unsigned NumAccesses = 1;
5901       if (isAccessInterleaved(&I)) {
5902         auto Group = getInterleavedAccessGroup(&I);
5903         assert(Group && "Fail to get an interleaved access group.");
5904 
5905         // Make one decision for the whole group.
5906         if (getWideningDecision(&I, VF) != CM_Unknown)
5907           continue;
5908 
5909         NumAccesses = Group->getNumMembers();
5910         InterleaveCost = getInterleaveGroupCost(&I, VF);
5911       }
5912 
5913       unsigned GatherScatterCost =
5914           isLegalGatherOrScatter(&I)
5915               ? getGatherScatterCost(&I, VF) * NumAccesses
5916               : std::numeric_limits<unsigned>::max();
5917 
5918       unsigned ScalarizationCost =
5919           getMemInstScalarizationCost(&I, VF) * NumAccesses;
5920 
5921       // Choose better solution for the current VF,
5922       // write down this decision and use it during vectorization.
5923       unsigned Cost;
5924       InstWidening Decision;
5925       if (InterleaveCost <= GatherScatterCost &&
5926           InterleaveCost < ScalarizationCost) {
5927         Decision = CM_Interleave;
5928         Cost = InterleaveCost;
5929       } else if (GatherScatterCost < ScalarizationCost) {
5930         Decision = CM_GatherScatter;
5931         Cost = GatherScatterCost;
5932       } else {
5933         Decision = CM_Scalarize;
5934         Cost = ScalarizationCost;
5935       }
5936       // If the instructions belongs to an interleave group, the whole group
5937       // receives the same decision. The whole group receives the cost, but
5938       // the cost will actually be assigned to one instruction.
5939       if (auto Group = getInterleavedAccessGroup(&I))
5940         setWideningDecision(Group, VF, Decision, Cost);
5941       else
5942         setWideningDecision(&I, VF, Decision, Cost);
5943     }
5944   }
5945 
5946   // Make sure that any load of address and any other address computation
5947   // remains scalar unless there is gather/scatter support. This avoids
5948   // inevitable extracts into address registers, and also has the benefit of
5949   // activating LSR more, since that pass can't optimize vectorized
5950   // addresses.
5951   if (TTI.prefersVectorizedAddressing())
5952     return;
5953 
5954   // Start with all scalar pointer uses.
5955   SmallPtrSet<Instruction *, 8> AddrDefs;
5956   for (BasicBlock *BB : TheLoop->blocks())
5957     for (Instruction &I : *BB) {
5958       Instruction *PtrDef =
5959         dyn_cast_or_null<Instruction>(getLoadStorePointerOperand(&I));
5960       if (PtrDef && TheLoop->contains(PtrDef) &&
5961           getWideningDecision(&I, VF) != CM_GatherScatter)
5962         AddrDefs.insert(PtrDef);
5963     }
5964 
5965   // Add all instructions used to generate the addresses.
5966   SmallVector<Instruction *, 4> Worklist;
5967   for (auto *I : AddrDefs)
5968     Worklist.push_back(I);
5969   while (!Worklist.empty()) {
5970     Instruction *I = Worklist.pop_back_val();
5971     for (auto &Op : I->operands())
5972       if (auto *InstOp = dyn_cast<Instruction>(Op))
5973         if ((InstOp->getParent() == I->getParent()) && !isa<PHINode>(InstOp) &&
5974             AddrDefs.insert(InstOp).second)
5975           Worklist.push_back(InstOp);
5976   }
5977 
5978   for (auto *I : AddrDefs) {
5979     if (isa<LoadInst>(I)) {
5980       // Setting the desired widening decision should ideally be handled in
5981       // by cost functions, but since this involves the task of finding out
5982       // if the loaded register is involved in an address computation, it is
5983       // instead changed here when we know this is the case.
5984       InstWidening Decision = getWideningDecision(I, VF);
5985       if (Decision == CM_Widen || Decision == CM_Widen_Reverse)
5986         // Scalarize a widened load of address.
5987         setWideningDecision(I, VF, CM_Scalarize,
5988                             (VF * getMemoryInstructionCost(I, 1)));
5989       else if (auto Group = getInterleavedAccessGroup(I)) {
5990         // Scalarize an interleave group of address loads.
5991         for (unsigned I = 0; I < Group->getFactor(); ++I) {
5992           if (Instruction *Member = Group->getMember(I))
5993             setWideningDecision(Member, VF, CM_Scalarize,
5994                                 (VF * getMemoryInstructionCost(Member, 1)));
5995         }
5996       }
5997     } else
5998       // Make sure I gets scalarized and a cost estimate without
5999       // scalarization overhead.
6000       ForcedScalars[VF].insert(I);
6001   }
6002 }
6003 
6004 unsigned LoopVectorizationCostModel::getInstructionCost(Instruction *I,
6005                                                         unsigned VF,
6006                                                         Type *&VectorTy) {
6007   Type *RetTy = I->getType();
6008   if (canTruncateToMinimalBitwidth(I, VF))
6009     RetTy = IntegerType::get(RetTy->getContext(), MinBWs[I]);
6010   VectorTy = isScalarAfterVectorization(I, VF) ? RetTy : ToVectorTy(RetTy, VF);
6011   auto SE = PSE.getSE();
6012 
6013   // TODO: We need to estimate the cost of intrinsic calls.
6014   switch (I->getOpcode()) {
6015   case Instruction::GetElementPtr:
6016     // We mark this instruction as zero-cost because the cost of GEPs in
6017     // vectorized code depends on whether the corresponding memory instruction
6018     // is scalarized or not. Therefore, we handle GEPs with the memory
6019     // instruction cost.
6020     return 0;
6021   case Instruction::Br: {
6022     // In cases of scalarized and predicated instructions, there will be VF
6023     // predicated blocks in the vectorized loop. Each branch around these
6024     // blocks requires also an extract of its vector compare i1 element.
6025     bool ScalarPredicatedBB = false;
6026     BranchInst *BI = cast<BranchInst>(I);
6027     if (VF > 1 && BI->isConditional() &&
6028         (PredicatedBBsAfterVectorization.find(BI->getSuccessor(0)) !=
6029              PredicatedBBsAfterVectorization.end() ||
6030          PredicatedBBsAfterVectorization.find(BI->getSuccessor(1)) !=
6031              PredicatedBBsAfterVectorization.end()))
6032       ScalarPredicatedBB = true;
6033 
6034     if (ScalarPredicatedBB) {
6035       // Return cost for branches around scalarized and predicated blocks.
6036       Type *Vec_i1Ty =
6037           VectorType::get(IntegerType::getInt1Ty(RetTy->getContext()), VF);
6038       return (TTI.getScalarizationOverhead(Vec_i1Ty, false, true) +
6039               (TTI.getCFInstrCost(Instruction::Br) * VF));
6040     } else if (I->getParent() == TheLoop->getLoopLatch() || VF == 1)
6041       // The back-edge branch will remain, as will all scalar branches.
6042       return TTI.getCFInstrCost(Instruction::Br);
6043     else
6044       // This branch will be eliminated by if-conversion.
6045       return 0;
6046     // Note: We currently assume zero cost for an unconditional branch inside
6047     // a predicated block since it will become a fall-through, although we
6048     // may decide in the future to call TTI for all branches.
6049   }
6050   case Instruction::PHI: {
6051     auto *Phi = cast<PHINode>(I);
6052 
6053     // First-order recurrences are replaced by vector shuffles inside the loop.
6054     if (VF > 1 && Legal->isFirstOrderRecurrence(Phi))
6055       return TTI.getShuffleCost(TargetTransformInfo::SK_ExtractSubvector,
6056                                 VectorTy, VF - 1, VectorTy);
6057 
6058     // Phi nodes in non-header blocks (not inductions, reductions, etc.) are
6059     // converted into select instructions. We require N - 1 selects per phi
6060     // node, where N is the number of incoming values.
6061     if (VF > 1 && Phi->getParent() != TheLoop->getHeader())
6062       return (Phi->getNumIncomingValues() - 1) *
6063              TTI.getCmpSelInstrCost(
6064                  Instruction::Select, ToVectorTy(Phi->getType(), VF),
6065                  ToVectorTy(Type::getInt1Ty(Phi->getContext()), VF));
6066 
6067     return TTI.getCFInstrCost(Instruction::PHI);
6068   }
6069   case Instruction::UDiv:
6070   case Instruction::SDiv:
6071   case Instruction::URem:
6072   case Instruction::SRem:
6073     // If we have a predicated instruction, it may not be executed for each
6074     // vector lane. Get the scalarization cost and scale this amount by the
6075     // probability of executing the predicated block. If the instruction is not
6076     // predicated, we fall through to the next case.
6077     if (VF > 1 && isScalarWithPredication(I)) {
6078       unsigned Cost = 0;
6079 
6080       // These instructions have a non-void type, so account for the phi nodes
6081       // that we will create. This cost is likely to be zero. The phi node
6082       // cost, if any, should be scaled by the block probability because it
6083       // models a copy at the end of each predicated block.
6084       Cost += VF * TTI.getCFInstrCost(Instruction::PHI);
6085 
6086       // The cost of the non-predicated instruction.
6087       Cost += VF * TTI.getArithmeticInstrCost(I->getOpcode(), RetTy);
6088 
6089       // The cost of insertelement and extractelement instructions needed for
6090       // scalarization.
6091       Cost += getScalarizationOverhead(I, VF, TTI);
6092 
6093       // Scale the cost by the probability of executing the predicated blocks.
6094       // This assumes the predicated block for each vector lane is equally
6095       // likely.
6096       return Cost / getReciprocalPredBlockProb();
6097     }
6098     LLVM_FALLTHROUGH;
6099   case Instruction::Add:
6100   case Instruction::FAdd:
6101   case Instruction::Sub:
6102   case Instruction::FSub:
6103   case Instruction::Mul:
6104   case Instruction::FMul:
6105   case Instruction::FDiv:
6106   case Instruction::FRem:
6107   case Instruction::Shl:
6108   case Instruction::LShr:
6109   case Instruction::AShr:
6110   case Instruction::And:
6111   case Instruction::Or:
6112   case Instruction::Xor: {
6113     // Since we will replace the stride by 1 the multiplication should go away.
6114     if (I->getOpcode() == Instruction::Mul && isStrideMul(I, Legal))
6115       return 0;
6116     // Certain instructions can be cheaper to vectorize if they have a constant
6117     // second vector operand. One example of this are shifts on x86.
6118     TargetTransformInfo::OperandValueKind Op1VK =
6119         TargetTransformInfo::OK_AnyValue;
6120     TargetTransformInfo::OperandValueKind Op2VK =
6121         TargetTransformInfo::OK_AnyValue;
6122     TargetTransformInfo::OperandValueProperties Op1VP =
6123         TargetTransformInfo::OP_None;
6124     TargetTransformInfo::OperandValueProperties Op2VP =
6125         TargetTransformInfo::OP_None;
6126     Value *Op2 = I->getOperand(1);
6127 
6128     // Check for a splat or for a non uniform vector of constants.
6129     if (isa<ConstantInt>(Op2)) {
6130       ConstantInt *CInt = cast<ConstantInt>(Op2);
6131       if (CInt && CInt->getValue().isPowerOf2())
6132         Op2VP = TargetTransformInfo::OP_PowerOf2;
6133       Op2VK = TargetTransformInfo::OK_UniformConstantValue;
6134     } else if (isa<ConstantVector>(Op2) || isa<ConstantDataVector>(Op2)) {
6135       Op2VK = TargetTransformInfo::OK_NonUniformConstantValue;
6136       Constant *SplatValue = cast<Constant>(Op2)->getSplatValue();
6137       if (SplatValue) {
6138         ConstantInt *CInt = dyn_cast<ConstantInt>(SplatValue);
6139         if (CInt && CInt->getValue().isPowerOf2())
6140           Op2VP = TargetTransformInfo::OP_PowerOf2;
6141         Op2VK = TargetTransformInfo::OK_UniformConstantValue;
6142       }
6143     } else if (Legal->isUniform(Op2)) {
6144       Op2VK = TargetTransformInfo::OK_UniformValue;
6145     }
6146     SmallVector<const Value *, 4> Operands(I->operand_values());
6147     unsigned N = isScalarAfterVectorization(I, VF) ? VF : 1;
6148     return N * TTI.getArithmeticInstrCost(I->getOpcode(), VectorTy, Op1VK,
6149                                           Op2VK, Op1VP, Op2VP, Operands);
6150   }
6151   case Instruction::Select: {
6152     SelectInst *SI = cast<SelectInst>(I);
6153     const SCEV *CondSCEV = SE->getSCEV(SI->getCondition());
6154     bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop));
6155     Type *CondTy = SI->getCondition()->getType();
6156     if (!ScalarCond)
6157       CondTy = VectorType::get(CondTy, VF);
6158 
6159     return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, CondTy, I);
6160   }
6161   case Instruction::ICmp:
6162   case Instruction::FCmp: {
6163     Type *ValTy = I->getOperand(0)->getType();
6164     Instruction *Op0AsInstruction = dyn_cast<Instruction>(I->getOperand(0));
6165     if (canTruncateToMinimalBitwidth(Op0AsInstruction, VF))
6166       ValTy = IntegerType::get(ValTy->getContext(), MinBWs[Op0AsInstruction]);
6167     VectorTy = ToVectorTy(ValTy, VF);
6168     return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, nullptr, I);
6169   }
6170   case Instruction::Store:
6171   case Instruction::Load: {
6172     unsigned Width = VF;
6173     if (Width > 1) {
6174       InstWidening Decision = getWideningDecision(I, Width);
6175       assert(Decision != CM_Unknown &&
6176              "CM decision should be taken at this point");
6177       if (Decision == CM_Scalarize)
6178         Width = 1;
6179     }
6180     VectorTy = ToVectorTy(getMemInstValueType(I), Width);
6181     return getMemoryInstructionCost(I, VF);
6182   }
6183   case Instruction::ZExt:
6184   case Instruction::SExt:
6185   case Instruction::FPToUI:
6186   case Instruction::FPToSI:
6187   case Instruction::FPExt:
6188   case Instruction::PtrToInt:
6189   case Instruction::IntToPtr:
6190   case Instruction::SIToFP:
6191   case Instruction::UIToFP:
6192   case Instruction::Trunc:
6193   case Instruction::FPTrunc:
6194   case Instruction::BitCast: {
6195     // We optimize the truncation of induction variables having constant
6196     // integer steps. The cost of these truncations is the same as the scalar
6197     // operation.
6198     if (isOptimizableIVTruncate(I, VF)) {
6199       auto *Trunc = cast<TruncInst>(I);
6200       return TTI.getCastInstrCost(Instruction::Trunc, Trunc->getDestTy(),
6201                                   Trunc->getSrcTy(), Trunc);
6202     }
6203 
6204     Type *SrcScalarTy = I->getOperand(0)->getType();
6205     Type *SrcVecTy =
6206         VectorTy->isVectorTy() ? ToVectorTy(SrcScalarTy, VF) : SrcScalarTy;
6207     if (canTruncateToMinimalBitwidth(I, VF)) {
6208       // This cast is going to be shrunk. This may remove the cast or it might
6209       // turn it into slightly different cast. For example, if MinBW == 16,
6210       // "zext i8 %1 to i32" becomes "zext i8 %1 to i16".
6211       //
6212       // Calculate the modified src and dest types.
6213       Type *MinVecTy = VectorTy;
6214       if (I->getOpcode() == Instruction::Trunc) {
6215         SrcVecTy = smallestIntegerVectorType(SrcVecTy, MinVecTy);
6216         VectorTy =
6217             largestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy);
6218       } else if (I->getOpcode() == Instruction::ZExt ||
6219                  I->getOpcode() == Instruction::SExt) {
6220         SrcVecTy = largestIntegerVectorType(SrcVecTy, MinVecTy);
6221         VectorTy =
6222             smallestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy);
6223       }
6224     }
6225 
6226     unsigned N = isScalarAfterVectorization(I, VF) ? VF : 1;
6227     return N * TTI.getCastInstrCost(I->getOpcode(), VectorTy, SrcVecTy, I);
6228   }
6229   case Instruction::Call: {
6230     bool NeedToScalarize;
6231     CallInst *CI = cast<CallInst>(I);
6232     unsigned CallCost = getVectorCallCost(CI, VF, TTI, TLI, NeedToScalarize);
6233     if (getVectorIntrinsicIDForCall(CI, TLI))
6234       return std::min(CallCost, getVectorIntrinsicCost(CI, VF, TTI, TLI));
6235     return CallCost;
6236   }
6237   default:
6238     // The cost of executing VF copies of the scalar instruction. This opcode
6239     // is unknown. Assume that it is the same as 'mul'.
6240     return VF * TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy) +
6241            getScalarizationOverhead(I, VF, TTI);
6242   } // end of switch.
6243 }
6244 
6245 char LoopVectorize::ID = 0;
6246 
6247 static const char lv_name[] = "Loop Vectorization";
6248 
6249 INITIALIZE_PASS_BEGIN(LoopVectorize, LV_NAME, lv_name, false, false)
6250 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
6251 INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass)
6252 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
6253 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
6254 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
6255 INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass)
6256 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
6257 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
6258 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
6259 INITIALIZE_PASS_DEPENDENCY(LoopAccessLegacyAnalysis)
6260 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass)
6261 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass)
6262 INITIALIZE_PASS_END(LoopVectorize, LV_NAME, lv_name, false, false)
6263 
6264 namespace llvm {
6265 
6266 Pass *createLoopVectorizePass(bool NoUnrolling, bool AlwaysVectorize) {
6267   return new LoopVectorize(NoUnrolling, AlwaysVectorize);
6268 }
6269 
6270 } // end namespace llvm
6271 
6272 bool LoopVectorizationCostModel::isConsecutiveLoadOrStore(Instruction *Inst) {
6273   // Check if the pointer operand of a load or store instruction is
6274   // consecutive.
6275   if (auto *Ptr = getLoadStorePointerOperand(Inst))
6276     return Legal->isConsecutivePtr(Ptr);
6277   return false;
6278 }
6279 
6280 void LoopVectorizationCostModel::collectValuesToIgnore() {
6281   // Ignore ephemeral values.
6282   CodeMetrics::collectEphemeralValues(TheLoop, AC, ValuesToIgnore);
6283 
6284   // Ignore type-promoting instructions we identified during reduction
6285   // detection.
6286   for (auto &Reduction : *Legal->getReductionVars()) {
6287     RecurrenceDescriptor &RedDes = Reduction.second;
6288     SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts();
6289     VecValuesToIgnore.insert(Casts.begin(), Casts.end());
6290   }
6291   // Ignore type-casting instructions we identified during induction
6292   // detection.
6293   for (auto &Induction : *Legal->getInductionVars()) {
6294     InductionDescriptor &IndDes = Induction.second;
6295     const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts();
6296     VecValuesToIgnore.insert(Casts.begin(), Casts.end());
6297   }
6298 }
6299 
6300 VectorizationFactor
6301 LoopVectorizationPlanner::planInVPlanNativePath(bool OptForSize,
6302                                                 unsigned UserVF) {
6303   // Width 1 means no vectorization, cost 0 means uncomputed cost.
6304   const VectorizationFactor NoVectorization = {1U, 0U};
6305 
6306   // Outer loop handling: They may require CFG and instruction level
6307   // transformations before even evaluating whether vectorization is profitable.
6308   // Since we cannot modify the incoming IR, we need to build VPlan upfront in
6309   // the vectorization pipeline.
6310   if (!OrigLoop->empty()) {
6311     // TODO: If UserVF is not provided, we set UserVF to 4 for stress testing.
6312     // This won't be necessary when UserVF is not required in the VPlan-native
6313     // path.
6314     if (VPlanBuildStressTest && !UserVF)
6315       UserVF = 4;
6316 
6317     assert(EnableVPlanNativePath && "VPlan-native path is not enabled.");
6318     assert(UserVF && "Expected UserVF for outer loop vectorization.");
6319     assert(isPowerOf2_32(UserVF) && "VF needs to be a power of two");
6320     LLVM_DEBUG(dbgs() << "LV: Using user VF " << UserVF << ".\n");
6321     buildVPlans(UserVF, UserVF);
6322 
6323     // For VPlan build stress testing, we bail out after VPlan construction.
6324     if (VPlanBuildStressTest)
6325       return NoVectorization;
6326 
6327     return {UserVF, 0};
6328   }
6329 
6330   LLVM_DEBUG(
6331       dbgs() << "LV: Not vectorizing. Inner loops aren't supported in the "
6332                 "VPlan-native path.\n");
6333   return NoVectorization;
6334 }
6335 
6336 VectorizationFactor
6337 LoopVectorizationPlanner::plan(bool OptForSize, unsigned UserVF) {
6338   assert(OrigLoop->empty() && "Inner loop expected.");
6339   // Width 1 means no vectorization, cost 0 means uncomputed cost.
6340   const VectorizationFactor NoVectorization = {1U, 0U};
6341   Optional<unsigned> MaybeMaxVF = CM.computeMaxVF(OptForSize);
6342   if (!MaybeMaxVF.hasValue()) // Cases considered too costly to vectorize.
6343     return NoVectorization;
6344 
6345   if (UserVF) {
6346     LLVM_DEBUG(dbgs() << "LV: Using user VF " << UserVF << ".\n");
6347     assert(isPowerOf2_32(UserVF) && "VF needs to be a power of two");
6348     // Collect the instructions (and their associated costs) that will be more
6349     // profitable to scalarize.
6350     CM.selectUserVectorizationFactor(UserVF);
6351     buildVPlansWithVPRecipes(UserVF, UserVF);
6352     LLVM_DEBUG(printPlans(dbgs()));
6353     return {UserVF, 0};
6354   }
6355 
6356   unsigned MaxVF = MaybeMaxVF.getValue();
6357   assert(MaxVF != 0 && "MaxVF is zero.");
6358 
6359   for (unsigned VF = 1; VF <= MaxVF; VF *= 2) {
6360     // Collect Uniform and Scalar instructions after vectorization with VF.
6361     CM.collectUniformsAndScalars(VF);
6362 
6363     // Collect the instructions (and their associated costs) that will be more
6364     // profitable to scalarize.
6365     if (VF > 1)
6366       CM.collectInstsToScalarize(VF);
6367   }
6368 
6369   buildVPlansWithVPRecipes(1, MaxVF);
6370   LLVM_DEBUG(printPlans(dbgs()));
6371   if (MaxVF == 1)
6372     return NoVectorization;
6373 
6374   // Select the optimal vectorization factor.
6375   return CM.selectVectorizationFactor(MaxVF);
6376 }
6377 
6378 void LoopVectorizationPlanner::setBestPlan(unsigned VF, unsigned UF) {
6379   LLVM_DEBUG(dbgs() << "Setting best plan to VF=" << VF << ", UF=" << UF
6380                     << '\n');
6381   BestVF = VF;
6382   BestUF = UF;
6383 
6384   erase_if(VPlans, [VF](const VPlanPtr &Plan) {
6385     return !Plan->hasVF(VF);
6386   });
6387   assert(VPlans.size() == 1 && "Best VF has not a single VPlan.");
6388 }
6389 
6390 void LoopVectorizationPlanner::executePlan(InnerLoopVectorizer &ILV,
6391                                            DominatorTree *DT) {
6392   // Perform the actual loop transformation.
6393 
6394   // 1. Create a new empty loop. Unlink the old loop and connect the new one.
6395   VPCallbackILV CallbackILV(ILV);
6396 
6397   VPTransformState State{BestVF, BestUF,      LI,
6398                          DT,     ILV.Builder, ILV.VectorLoopValueMap,
6399                          &ILV,   CallbackILV};
6400   State.CFG.PrevBB = ILV.createVectorizedLoopSkeleton();
6401 
6402   //===------------------------------------------------===//
6403   //
6404   // Notice: any optimization or new instruction that go
6405   // into the code below should also be implemented in
6406   // the cost-model.
6407   //
6408   //===------------------------------------------------===//
6409 
6410   // 2. Copy and widen instructions from the old loop into the new loop.
6411   assert(VPlans.size() == 1 && "Not a single VPlan to execute.");
6412   VPlans.front()->execute(&State);
6413 
6414   // 3. Fix the vectorized code: take care of header phi's, live-outs,
6415   //    predication, updating analyses.
6416   ILV.fixVectorizedLoop();
6417 }
6418 
6419 void LoopVectorizationPlanner::collectTriviallyDeadInstructions(
6420     SmallPtrSetImpl<Instruction *> &DeadInstructions) {
6421   BasicBlock *Latch = OrigLoop->getLoopLatch();
6422 
6423   // We create new control-flow for the vectorized loop, so the original
6424   // condition will be dead after vectorization if it's only used by the
6425   // branch.
6426   auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0));
6427   if (Cmp && Cmp->hasOneUse())
6428     DeadInstructions.insert(Cmp);
6429 
6430   // We create new "steps" for induction variable updates to which the original
6431   // induction variables map. An original update instruction will be dead if
6432   // all its users except the induction variable are dead.
6433   for (auto &Induction : *Legal->getInductionVars()) {
6434     PHINode *Ind = Induction.first;
6435     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
6436     if (llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
6437           return U == Ind || DeadInstructions.find(cast<Instruction>(U)) !=
6438                                  DeadInstructions.end();
6439         }))
6440       DeadInstructions.insert(IndUpdate);
6441 
6442     // We record as "Dead" also the type-casting instructions we had identified
6443     // during induction analysis. We don't need any handling for them in the
6444     // vectorized loop because we have proven that, under a proper runtime
6445     // test guarding the vectorized loop, the value of the phi, and the casted
6446     // value of the phi, are the same. The last instruction in this casting chain
6447     // will get its scalar/vector/widened def from the scalar/vector/widened def
6448     // of the respective phi node. Any other casts in the induction def-use chain
6449     // have no other uses outside the phi update chain, and will be ignored.
6450     InductionDescriptor &IndDes = Induction.second;
6451     const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts();
6452     DeadInstructions.insert(Casts.begin(), Casts.end());
6453   }
6454 }
6455 
6456 Value *InnerLoopUnroller::reverseVector(Value *Vec) { return Vec; }
6457 
6458 Value *InnerLoopUnroller::getBroadcastInstrs(Value *V) { return V; }
6459 
6460 Value *InnerLoopUnroller::getStepVector(Value *Val, int StartIdx, Value *Step,
6461                                         Instruction::BinaryOps BinOp) {
6462   // When unrolling and the VF is 1, we only need to add a simple scalar.
6463   Type *Ty = Val->getType();
6464   assert(!Ty->isVectorTy() && "Val must be a scalar");
6465 
6466   if (Ty->isFloatingPointTy()) {
6467     Constant *C = ConstantFP::get(Ty, (double)StartIdx);
6468 
6469     // Floating point operations had to be 'fast' to enable the unrolling.
6470     Value *MulOp = addFastMathFlag(Builder.CreateFMul(C, Step));
6471     return addFastMathFlag(Builder.CreateBinOp(BinOp, Val, MulOp));
6472   }
6473   Constant *C = ConstantInt::get(Ty, StartIdx);
6474   return Builder.CreateAdd(Val, Builder.CreateMul(C, Step), "induction");
6475 }
6476 
6477 static void AddRuntimeUnrollDisableMetaData(Loop *L) {
6478   SmallVector<Metadata *, 4> MDs;
6479   // Reserve first location for self reference to the LoopID metadata node.
6480   MDs.push_back(nullptr);
6481   bool IsUnrollMetadata = false;
6482   MDNode *LoopID = L->getLoopID();
6483   if (LoopID) {
6484     // First find existing loop unrolling disable metadata.
6485     for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) {
6486       auto *MD = dyn_cast<MDNode>(LoopID->getOperand(i));
6487       if (MD) {
6488         const auto *S = dyn_cast<MDString>(MD->getOperand(0));
6489         IsUnrollMetadata =
6490             S && S->getString().startswith("llvm.loop.unroll.disable");
6491       }
6492       MDs.push_back(LoopID->getOperand(i));
6493     }
6494   }
6495 
6496   if (!IsUnrollMetadata) {
6497     // Add runtime unroll disable metadata.
6498     LLVMContext &Context = L->getHeader()->getContext();
6499     SmallVector<Metadata *, 1> DisableOperands;
6500     DisableOperands.push_back(
6501         MDString::get(Context, "llvm.loop.unroll.runtime.disable"));
6502     MDNode *DisableNode = MDNode::get(Context, DisableOperands);
6503     MDs.push_back(DisableNode);
6504     MDNode *NewLoopID = MDNode::get(Context, MDs);
6505     // Set operand 0 to refer to the loop id itself.
6506     NewLoopID->replaceOperandWith(0, NewLoopID);
6507     L->setLoopID(NewLoopID);
6508   }
6509 }
6510 
6511 bool LoopVectorizationPlanner::getDecisionAndClampRange(
6512     const std::function<bool(unsigned)> &Predicate, VFRange &Range) {
6513   assert(Range.End > Range.Start && "Trying to test an empty VF range.");
6514   bool PredicateAtRangeStart = Predicate(Range.Start);
6515 
6516   for (unsigned TmpVF = Range.Start * 2; TmpVF < Range.End; TmpVF *= 2)
6517     if (Predicate(TmpVF) != PredicateAtRangeStart) {
6518       Range.End = TmpVF;
6519       break;
6520     }
6521 
6522   return PredicateAtRangeStart;
6523 }
6524 
6525 /// Build VPlans for the full range of feasible VF's = {\p MinVF, 2 * \p MinVF,
6526 /// 4 * \p MinVF, ..., \p MaxVF} by repeatedly building a VPlan for a sub-range
6527 /// of VF's starting at a given VF and extending it as much as possible. Each
6528 /// vectorization decision can potentially shorten this sub-range during
6529 /// buildVPlan().
6530 void LoopVectorizationPlanner::buildVPlans(unsigned MinVF, unsigned MaxVF) {
6531   for (unsigned VF = MinVF; VF < MaxVF + 1;) {
6532     VFRange SubRange = {VF, MaxVF + 1};
6533     VPlans.push_back(buildVPlan(SubRange));
6534     VF = SubRange.End;
6535   }
6536 }
6537 
6538 VPValue *VPRecipeBuilder::createEdgeMask(BasicBlock *Src, BasicBlock *Dst,
6539                                          VPlanPtr &Plan) {
6540   assert(is_contained(predecessors(Dst), Src) && "Invalid edge");
6541 
6542   // Look for cached value.
6543   std::pair<BasicBlock *, BasicBlock *> Edge(Src, Dst);
6544   EdgeMaskCacheTy::iterator ECEntryIt = EdgeMaskCache.find(Edge);
6545   if (ECEntryIt != EdgeMaskCache.end())
6546     return ECEntryIt->second;
6547 
6548   VPValue *SrcMask = createBlockInMask(Src, Plan);
6549 
6550   // The terminator has to be a branch inst!
6551   BranchInst *BI = dyn_cast<BranchInst>(Src->getTerminator());
6552   assert(BI && "Unexpected terminator found");
6553 
6554   if (!BI->isConditional())
6555     return EdgeMaskCache[Edge] = SrcMask;
6556 
6557   VPValue *EdgeMask = Plan->getVPValue(BI->getCondition());
6558   assert(EdgeMask && "No Edge Mask found for condition");
6559 
6560   if (BI->getSuccessor(0) != Dst)
6561     EdgeMask = Builder.createNot(EdgeMask);
6562 
6563   if (SrcMask) // Otherwise block in-mask is all-one, no need to AND.
6564     EdgeMask = Builder.createAnd(EdgeMask, SrcMask);
6565 
6566   return EdgeMaskCache[Edge] = EdgeMask;
6567 }
6568 
6569 VPValue *VPRecipeBuilder::createBlockInMask(BasicBlock *BB, VPlanPtr &Plan) {
6570   assert(OrigLoop->contains(BB) && "Block is not a part of a loop");
6571 
6572   // Look for cached value.
6573   BlockMaskCacheTy::iterator BCEntryIt = BlockMaskCache.find(BB);
6574   if (BCEntryIt != BlockMaskCache.end())
6575     return BCEntryIt->second;
6576 
6577   // All-one mask is modelled as no-mask following the convention for masked
6578   // load/store/gather/scatter. Initialize BlockMask to no-mask.
6579   VPValue *BlockMask = nullptr;
6580 
6581   // Loop incoming mask is all-one.
6582   if (OrigLoop->getHeader() == BB)
6583     return BlockMaskCache[BB] = BlockMask;
6584 
6585   // This is the block mask. We OR all incoming edges.
6586   for (auto *Predecessor : predecessors(BB)) {
6587     VPValue *EdgeMask = createEdgeMask(Predecessor, BB, Plan);
6588     if (!EdgeMask) // Mask of predecessor is all-one so mask of block is too.
6589       return BlockMaskCache[BB] = EdgeMask;
6590 
6591     if (!BlockMask) { // BlockMask has its initialized nullptr value.
6592       BlockMask = EdgeMask;
6593       continue;
6594     }
6595 
6596     BlockMask = Builder.createOr(BlockMask, EdgeMask);
6597   }
6598 
6599   return BlockMaskCache[BB] = BlockMask;
6600 }
6601 
6602 VPInterleaveRecipe *VPRecipeBuilder::tryToInterleaveMemory(Instruction *I,
6603                                                            VFRange &Range) {
6604   const InterleaveGroup *IG = CM.getInterleavedAccessGroup(I);
6605   if (!IG)
6606     return nullptr;
6607 
6608   // Now check if IG is relevant for VF's in the given range.
6609   auto isIGMember = [&](Instruction *I) -> std::function<bool(unsigned)> {
6610     return [=](unsigned VF) -> bool {
6611       return (VF >= 2 && // Query is illegal for VF == 1
6612               CM.getWideningDecision(I, VF) ==
6613                   LoopVectorizationCostModel::CM_Interleave);
6614     };
6615   };
6616   if (!LoopVectorizationPlanner::getDecisionAndClampRange(isIGMember(I), Range))
6617     return nullptr;
6618 
6619   // I is a member of an InterleaveGroup for VF's in the (possibly trimmed)
6620   // range. If it's the primary member of the IG construct a VPInterleaveRecipe.
6621   // Otherwise, it's an adjunct member of the IG, do not construct any Recipe.
6622   assert(I == IG->getInsertPos() &&
6623          "Generating a recipe for an adjunct member of an interleave group");
6624 
6625   return new VPInterleaveRecipe(IG);
6626 }
6627 
6628 VPWidenMemoryInstructionRecipe *
6629 VPRecipeBuilder::tryToWidenMemory(Instruction *I, VFRange &Range,
6630                                   VPlanPtr &Plan) {
6631   if (!isa<LoadInst>(I) && !isa<StoreInst>(I))
6632     return nullptr;
6633 
6634   auto willWiden = [&](unsigned VF) -> bool {
6635     if (VF == 1)
6636       return false;
6637     if (CM.isScalarAfterVectorization(I, VF) ||
6638         CM.isProfitableToScalarize(I, VF))
6639       return false;
6640     LoopVectorizationCostModel::InstWidening Decision =
6641         CM.getWideningDecision(I, VF);
6642     assert(Decision != LoopVectorizationCostModel::CM_Unknown &&
6643            "CM decision should be taken at this point.");
6644     assert(Decision != LoopVectorizationCostModel::CM_Interleave &&
6645            "Interleave memory opportunity should be caught earlier.");
6646     return Decision != LoopVectorizationCostModel::CM_Scalarize;
6647   };
6648 
6649   if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range))
6650     return nullptr;
6651 
6652   VPValue *Mask = nullptr;
6653   if (Legal->isMaskRequired(I))
6654     Mask = createBlockInMask(I->getParent(), Plan);
6655 
6656   return new VPWidenMemoryInstructionRecipe(*I, Mask);
6657 }
6658 
6659 VPWidenIntOrFpInductionRecipe *
6660 VPRecipeBuilder::tryToOptimizeInduction(Instruction *I, VFRange &Range) {
6661   if (PHINode *Phi = dyn_cast<PHINode>(I)) {
6662     // Check if this is an integer or fp induction. If so, build the recipe that
6663     // produces its scalar and vector values.
6664     InductionDescriptor II = Legal->getInductionVars()->lookup(Phi);
6665     if (II.getKind() == InductionDescriptor::IK_IntInduction ||
6666         II.getKind() == InductionDescriptor::IK_FpInduction)
6667       return new VPWidenIntOrFpInductionRecipe(Phi);
6668 
6669     return nullptr;
6670   }
6671 
6672   // Optimize the special case where the source is a constant integer
6673   // induction variable. Notice that we can only optimize the 'trunc' case
6674   // because (a) FP conversions lose precision, (b) sext/zext may wrap, and
6675   // (c) other casts depend on pointer size.
6676 
6677   // Determine whether \p K is a truncation based on an induction variable that
6678   // can be optimized.
6679   auto isOptimizableIVTruncate =
6680       [&](Instruction *K) -> std::function<bool(unsigned)> {
6681     return
6682         [=](unsigned VF) -> bool { return CM.isOptimizableIVTruncate(K, VF); };
6683   };
6684 
6685   if (isa<TruncInst>(I) && LoopVectorizationPlanner::getDecisionAndClampRange(
6686                                isOptimizableIVTruncate(I), Range))
6687     return new VPWidenIntOrFpInductionRecipe(cast<PHINode>(I->getOperand(0)),
6688                                              cast<TruncInst>(I));
6689   return nullptr;
6690 }
6691 
6692 VPBlendRecipe *VPRecipeBuilder::tryToBlend(Instruction *I, VPlanPtr &Plan) {
6693   PHINode *Phi = dyn_cast<PHINode>(I);
6694   if (!Phi || Phi->getParent() == OrigLoop->getHeader())
6695     return nullptr;
6696 
6697   // We know that all PHIs in non-header blocks are converted into selects, so
6698   // we don't have to worry about the insertion order and we can just use the
6699   // builder. At this point we generate the predication tree. There may be
6700   // duplications since this is a simple recursive scan, but future
6701   // optimizations will clean it up.
6702 
6703   SmallVector<VPValue *, 2> Masks;
6704   unsigned NumIncoming = Phi->getNumIncomingValues();
6705   for (unsigned In = 0; In < NumIncoming; In++) {
6706     VPValue *EdgeMask =
6707       createEdgeMask(Phi->getIncomingBlock(In), Phi->getParent(), Plan);
6708     assert((EdgeMask || NumIncoming == 1) &&
6709            "Multiple predecessors with one having a full mask");
6710     if (EdgeMask)
6711       Masks.push_back(EdgeMask);
6712   }
6713   return new VPBlendRecipe(Phi, Masks);
6714 }
6715 
6716 bool VPRecipeBuilder::tryToWiden(Instruction *I, VPBasicBlock *VPBB,
6717                                  VFRange &Range) {
6718   if (CM.isScalarWithPredication(I))
6719     return false;
6720 
6721   auto IsVectorizableOpcode = [](unsigned Opcode) {
6722     switch (Opcode) {
6723     case Instruction::Add:
6724     case Instruction::And:
6725     case Instruction::AShr:
6726     case Instruction::BitCast:
6727     case Instruction::Br:
6728     case Instruction::Call:
6729     case Instruction::FAdd:
6730     case Instruction::FCmp:
6731     case Instruction::FDiv:
6732     case Instruction::FMul:
6733     case Instruction::FPExt:
6734     case Instruction::FPToSI:
6735     case Instruction::FPToUI:
6736     case Instruction::FPTrunc:
6737     case Instruction::FRem:
6738     case Instruction::FSub:
6739     case Instruction::GetElementPtr:
6740     case Instruction::ICmp:
6741     case Instruction::IntToPtr:
6742     case Instruction::Load:
6743     case Instruction::LShr:
6744     case Instruction::Mul:
6745     case Instruction::Or:
6746     case Instruction::PHI:
6747     case Instruction::PtrToInt:
6748     case Instruction::SDiv:
6749     case Instruction::Select:
6750     case Instruction::SExt:
6751     case Instruction::Shl:
6752     case Instruction::SIToFP:
6753     case Instruction::SRem:
6754     case Instruction::Store:
6755     case Instruction::Sub:
6756     case Instruction::Trunc:
6757     case Instruction::UDiv:
6758     case Instruction::UIToFP:
6759     case Instruction::URem:
6760     case Instruction::Xor:
6761     case Instruction::ZExt:
6762       return true;
6763     }
6764     return false;
6765   };
6766 
6767   if (!IsVectorizableOpcode(I->getOpcode()))
6768     return false;
6769 
6770   if (CallInst *CI = dyn_cast<CallInst>(I)) {
6771     Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
6772     if (ID && (ID == Intrinsic::assume || ID == Intrinsic::lifetime_end ||
6773                ID == Intrinsic::lifetime_start || ID == Intrinsic::sideeffect))
6774       return false;
6775   }
6776 
6777   auto willWiden = [&](unsigned VF) -> bool {
6778     if (!isa<PHINode>(I) && (CM.isScalarAfterVectorization(I, VF) ||
6779                              CM.isProfitableToScalarize(I, VF)))
6780       return false;
6781     if (CallInst *CI = dyn_cast<CallInst>(I)) {
6782       Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
6783       // The following case may be scalarized depending on the VF.
6784       // The flag shows whether we use Intrinsic or a usual Call for vectorized
6785       // version of the instruction.
6786       // Is it beneficial to perform intrinsic call compared to lib call?
6787       bool NeedToScalarize;
6788       unsigned CallCost = getVectorCallCost(CI, VF, *TTI, TLI, NeedToScalarize);
6789       bool UseVectorIntrinsic =
6790           ID && getVectorIntrinsicCost(CI, VF, *TTI, TLI) <= CallCost;
6791       return UseVectorIntrinsic || !NeedToScalarize;
6792     }
6793     if (isa<LoadInst>(I) || isa<StoreInst>(I)) {
6794       assert(CM.getWideningDecision(I, VF) ==
6795                  LoopVectorizationCostModel::CM_Scalarize &&
6796              "Memory widening decisions should have been taken care by now");
6797       return false;
6798     }
6799     return true;
6800   };
6801 
6802   if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range))
6803     return false;
6804 
6805   // Success: widen this instruction. We optimize the common case where
6806   // consecutive instructions can be represented by a single recipe.
6807   if (!VPBB->empty()) {
6808     VPWidenRecipe *LastWidenRecipe = dyn_cast<VPWidenRecipe>(&VPBB->back());
6809     if (LastWidenRecipe && LastWidenRecipe->appendInstruction(I))
6810       return true;
6811   }
6812 
6813   VPBB->appendRecipe(new VPWidenRecipe(I));
6814   return true;
6815 }
6816 
6817 VPBasicBlock *VPRecipeBuilder::handleReplication(
6818     Instruction *I, VFRange &Range, VPBasicBlock *VPBB,
6819     DenseMap<Instruction *, VPReplicateRecipe *> &PredInst2Recipe,
6820     VPlanPtr &Plan) {
6821   bool IsUniform = LoopVectorizationPlanner::getDecisionAndClampRange(
6822       [&](unsigned VF) { return CM.isUniformAfterVectorization(I, VF); },
6823       Range);
6824 
6825   bool IsPredicated = CM.isScalarWithPredication(I);
6826   auto *Recipe = new VPReplicateRecipe(I, IsUniform, IsPredicated);
6827 
6828   // Find if I uses a predicated instruction. If so, it will use its scalar
6829   // value. Avoid hoisting the insert-element which packs the scalar value into
6830   // a vector value, as that happens iff all users use the vector value.
6831   for (auto &Op : I->operands())
6832     if (auto *PredInst = dyn_cast<Instruction>(Op))
6833       if (PredInst2Recipe.find(PredInst) != PredInst2Recipe.end())
6834         PredInst2Recipe[PredInst]->setAlsoPack(false);
6835 
6836   // Finalize the recipe for Instr, first if it is not predicated.
6837   if (!IsPredicated) {
6838     LLVM_DEBUG(dbgs() << "LV: Scalarizing:" << *I << "\n");
6839     VPBB->appendRecipe(Recipe);
6840     return VPBB;
6841   }
6842   LLVM_DEBUG(dbgs() << "LV: Scalarizing and predicating:" << *I << "\n");
6843   assert(VPBB->getSuccessors().empty() &&
6844          "VPBB has successors when handling predicated replication.");
6845   // Record predicated instructions for above packing optimizations.
6846   PredInst2Recipe[I] = Recipe;
6847   VPBlockBase *Region = createReplicateRegion(I, Recipe, Plan);
6848   VPBlockUtils::insertBlockAfter(Region, VPBB);
6849   auto *RegSucc = new VPBasicBlock();
6850   VPBlockUtils::insertBlockAfter(RegSucc, Region);
6851   return RegSucc;
6852 }
6853 
6854 VPRegionBlock *VPRecipeBuilder::createReplicateRegion(Instruction *Instr,
6855                                                       VPRecipeBase *PredRecipe,
6856                                                       VPlanPtr &Plan) {
6857   // Instructions marked for predication are replicated and placed under an
6858   // if-then construct to prevent side-effects.
6859 
6860   // Generate recipes to compute the block mask for this region.
6861   VPValue *BlockInMask = createBlockInMask(Instr->getParent(), Plan);
6862 
6863   // Build the triangular if-then region.
6864   std::string RegionName = (Twine("pred.") + Instr->getOpcodeName()).str();
6865   assert(Instr->getParent() && "Predicated instruction not in any basic block");
6866   auto *BOMRecipe = new VPBranchOnMaskRecipe(BlockInMask);
6867   auto *Entry = new VPBasicBlock(Twine(RegionName) + ".entry", BOMRecipe);
6868   auto *PHIRecipe =
6869       Instr->getType()->isVoidTy() ? nullptr : new VPPredInstPHIRecipe(Instr);
6870   auto *Exit = new VPBasicBlock(Twine(RegionName) + ".continue", PHIRecipe);
6871   auto *Pred = new VPBasicBlock(Twine(RegionName) + ".if", PredRecipe);
6872   VPRegionBlock *Region = new VPRegionBlock(Entry, Exit, RegionName, true);
6873 
6874   // Note: first set Entry as region entry and then connect successors starting
6875   // from it in order, to propagate the "parent" of each VPBasicBlock.
6876   VPBlockUtils::insertTwoBlocksAfter(Pred, Exit, BlockInMask, Entry);
6877   VPBlockUtils::connectBlocks(Pred, Exit);
6878 
6879   return Region;
6880 }
6881 
6882 bool VPRecipeBuilder::tryToCreateRecipe(Instruction *Instr, VFRange &Range,
6883                                         VPlanPtr &Plan, VPBasicBlock *VPBB) {
6884   VPRecipeBase *Recipe = nullptr;
6885   // Check if Instr should belong to an interleave memory recipe, or already
6886   // does. In the latter case Instr is irrelevant.
6887   if ((Recipe = tryToInterleaveMemory(Instr, Range))) {
6888     VPBB->appendRecipe(Recipe);
6889     return true;
6890   }
6891 
6892   // Check if Instr is a memory operation that should be widened.
6893   if ((Recipe = tryToWidenMemory(Instr, Range, Plan))) {
6894     VPBB->appendRecipe(Recipe);
6895     return true;
6896   }
6897 
6898   // Check if Instr should form some PHI recipe.
6899   if ((Recipe = tryToOptimizeInduction(Instr, Range))) {
6900     VPBB->appendRecipe(Recipe);
6901     return true;
6902   }
6903   if ((Recipe = tryToBlend(Instr, Plan))) {
6904     VPBB->appendRecipe(Recipe);
6905     return true;
6906   }
6907   if (PHINode *Phi = dyn_cast<PHINode>(Instr)) {
6908     VPBB->appendRecipe(new VPWidenPHIRecipe(Phi));
6909     return true;
6910   }
6911 
6912   // Check if Instr is to be widened by a general VPWidenRecipe, after
6913   // having first checked for specific widening recipes that deal with
6914   // Interleave Groups, Inductions and Phi nodes.
6915   if (tryToWiden(Instr, VPBB, Range))
6916     return true;
6917 
6918   return false;
6919 }
6920 
6921 void LoopVectorizationPlanner::buildVPlansWithVPRecipes(unsigned MinVF,
6922                                                         unsigned MaxVF) {
6923   assert(OrigLoop->empty() && "Inner loop expected.");
6924 
6925   // Collect conditions feeding internal conditional branches; they need to be
6926   // represented in VPlan for it to model masking.
6927   SmallPtrSet<Value *, 1> NeedDef;
6928 
6929   auto *Latch = OrigLoop->getLoopLatch();
6930   for (BasicBlock *BB : OrigLoop->blocks()) {
6931     if (BB == Latch)
6932       continue;
6933     BranchInst *Branch = dyn_cast<BranchInst>(BB->getTerminator());
6934     if (Branch && Branch->isConditional())
6935       NeedDef.insert(Branch->getCondition());
6936   }
6937 
6938   // Collect instructions from the original loop that will become trivially dead
6939   // in the vectorized loop. We don't need to vectorize these instructions. For
6940   // example, original induction update instructions can become dead because we
6941   // separately emit induction "steps" when generating code for the new loop.
6942   // Similarly, we create a new latch condition when setting up the structure
6943   // of the new loop, so the old one can become dead.
6944   SmallPtrSet<Instruction *, 4> DeadInstructions;
6945   collectTriviallyDeadInstructions(DeadInstructions);
6946 
6947   for (unsigned VF = MinVF; VF < MaxVF + 1;) {
6948     VFRange SubRange = {VF, MaxVF + 1};
6949     VPlans.push_back(
6950         buildVPlanWithVPRecipes(SubRange, NeedDef, DeadInstructions));
6951     VF = SubRange.End;
6952   }
6953 }
6954 
6955 LoopVectorizationPlanner::VPlanPtr
6956 LoopVectorizationPlanner::buildVPlanWithVPRecipes(
6957     VFRange &Range, SmallPtrSetImpl<Value *> &NeedDef,
6958     SmallPtrSetImpl<Instruction *> &DeadInstructions) {
6959   // Hold a mapping from predicated instructions to their recipes, in order to
6960   // fix their AlsoPack behavior if a user is determined to replicate and use a
6961   // scalar instead of vector value.
6962   DenseMap<Instruction *, VPReplicateRecipe *> PredInst2Recipe;
6963 
6964   DenseMap<Instruction *, Instruction *> &SinkAfter = Legal->getSinkAfter();
6965   DenseMap<Instruction *, Instruction *> SinkAfterInverse;
6966 
6967   // Create a dummy pre-entry VPBasicBlock to start building the VPlan.
6968   VPBasicBlock *VPBB = new VPBasicBlock("Pre-Entry");
6969   auto Plan = llvm::make_unique<VPlan>(VPBB);
6970 
6971   VPRecipeBuilder RecipeBuilder(OrigLoop, TLI, TTI, Legal, CM, Builder);
6972   // Represent values that will have defs inside VPlan.
6973   for (Value *V : NeedDef)
6974     Plan->addVPValue(V);
6975 
6976   // Scan the body of the loop in a topological order to visit each basic block
6977   // after having visited its predecessor basic blocks.
6978   LoopBlocksDFS DFS(OrigLoop);
6979   DFS.perform(LI);
6980 
6981   for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) {
6982     // Relevant instructions from basic block BB will be grouped into VPRecipe
6983     // ingredients and fill a new VPBasicBlock.
6984     unsigned VPBBsForBB = 0;
6985     auto *FirstVPBBForBB = new VPBasicBlock(BB->getName());
6986     VPBlockUtils::insertBlockAfter(FirstVPBBForBB, VPBB);
6987     VPBB = FirstVPBBForBB;
6988     Builder.setInsertPoint(VPBB);
6989 
6990     std::vector<Instruction *> Ingredients;
6991 
6992     // Organize the ingredients to vectorize from current basic block in the
6993     // right order.
6994     for (Instruction &I : BB->instructionsWithoutDebug()) {
6995       Instruction *Instr = &I;
6996 
6997       // First filter out irrelevant instructions, to ensure no recipes are
6998       // built for them.
6999       if (isa<BranchInst>(Instr) ||
7000           DeadInstructions.find(Instr) != DeadInstructions.end())
7001         continue;
7002 
7003       // I is a member of an InterleaveGroup for Range.Start. If it's an adjunct
7004       // member of the IG, do not construct any Recipe for it.
7005       const InterleaveGroup *IG = CM.getInterleavedAccessGroup(Instr);
7006       if (IG && Instr != IG->getInsertPos() &&
7007           Range.Start >= 2 && // Query is illegal for VF == 1
7008           CM.getWideningDecision(Instr, Range.Start) ==
7009               LoopVectorizationCostModel::CM_Interleave) {
7010         auto SinkCandidate = SinkAfterInverse.find(Instr);
7011         if (SinkCandidate != SinkAfterInverse.end())
7012           Ingredients.push_back(SinkCandidate->second);
7013         continue;
7014       }
7015 
7016       // Move instructions to handle first-order recurrences, step 1: avoid
7017       // handling this instruction until after we've handled the instruction it
7018       // should follow.
7019       auto SAIt = SinkAfter.find(Instr);
7020       if (SAIt != SinkAfter.end()) {
7021         LLVM_DEBUG(dbgs() << "Sinking" << *SAIt->first << " after"
7022                           << *SAIt->second
7023                           << " to vectorize a 1st order recurrence.\n");
7024         SinkAfterInverse[SAIt->second] = Instr;
7025         continue;
7026       }
7027 
7028       Ingredients.push_back(Instr);
7029 
7030       // Move instructions to handle first-order recurrences, step 2: push the
7031       // instruction to be sunk at its insertion point.
7032       auto SAInvIt = SinkAfterInverse.find(Instr);
7033       if (SAInvIt != SinkAfterInverse.end())
7034         Ingredients.push_back(SAInvIt->second);
7035     }
7036 
7037     // Introduce each ingredient into VPlan.
7038     for (Instruction *Instr : Ingredients) {
7039       if (RecipeBuilder.tryToCreateRecipe(Instr, Range, Plan, VPBB))
7040         continue;
7041 
7042       // Otherwise, if all widening options failed, Instruction is to be
7043       // replicated. This may create a successor for VPBB.
7044       VPBasicBlock *NextVPBB = RecipeBuilder.handleReplication(
7045           Instr, Range, VPBB, PredInst2Recipe, Plan);
7046       if (NextVPBB != VPBB) {
7047         VPBB = NextVPBB;
7048         VPBB->setName(BB->hasName() ? BB->getName() + "." + Twine(VPBBsForBB++)
7049                                     : "");
7050       }
7051     }
7052   }
7053 
7054   // Discard empty dummy pre-entry VPBasicBlock. Note that other VPBasicBlocks
7055   // may also be empty, such as the last one VPBB, reflecting original
7056   // basic-blocks with no recipes.
7057   VPBasicBlock *PreEntry = cast<VPBasicBlock>(Plan->getEntry());
7058   assert(PreEntry->empty() && "Expecting empty pre-entry block.");
7059   VPBlockBase *Entry = Plan->setEntry(PreEntry->getSingleSuccessor());
7060   VPBlockUtils::disconnectBlocks(PreEntry, Entry);
7061   delete PreEntry;
7062 
7063   std::string PlanName;
7064   raw_string_ostream RSO(PlanName);
7065   unsigned VF = Range.Start;
7066   Plan->addVF(VF);
7067   RSO << "Initial VPlan for VF={" << VF;
7068   for (VF *= 2; VF < Range.End; VF *= 2) {
7069     Plan->addVF(VF);
7070     RSO << "," << VF;
7071   }
7072   RSO << "},UF>=1";
7073   RSO.flush();
7074   Plan->setName(PlanName);
7075 
7076   return Plan;
7077 }
7078 
7079 LoopVectorizationPlanner::VPlanPtr
7080 LoopVectorizationPlanner::buildVPlan(VFRange &Range) {
7081   // Outer loop handling: They may require CFG and instruction level
7082   // transformations before even evaluating whether vectorization is profitable.
7083   // Since we cannot modify the incoming IR, we need to build VPlan upfront in
7084   // the vectorization pipeline.
7085   assert(!OrigLoop->empty());
7086   assert(EnableVPlanNativePath && "VPlan-native path is not enabled.");
7087 
7088   // Create new empty VPlan
7089   auto Plan = llvm::make_unique<VPlan>();
7090 
7091   // Build hierarchical CFG
7092   VPlanHCFGBuilder HCFGBuilder(OrigLoop, LI, *Plan);
7093   HCFGBuilder.buildHierarchicalCFG();
7094 
7095   return Plan;
7096 }
7097 
7098 Value* LoopVectorizationPlanner::VPCallbackILV::
7099 getOrCreateVectorValues(Value *V, unsigned Part) {
7100       return ILV.getOrCreateVectorValue(V, Part);
7101 }
7102 
7103 void VPInterleaveRecipe::print(raw_ostream &O, const Twine &Indent) const {
7104   O << " +\n"
7105     << Indent << "\"INTERLEAVE-GROUP with factor " << IG->getFactor() << " at ";
7106   IG->getInsertPos()->printAsOperand(O, false);
7107   O << "\\l\"";
7108   for (unsigned i = 0; i < IG->getFactor(); ++i)
7109     if (Instruction *I = IG->getMember(i))
7110       O << " +\n"
7111         << Indent << "\"  " << VPlanIngredient(I) << " " << i << "\\l\"";
7112 }
7113 
7114 void VPWidenRecipe::execute(VPTransformState &State) {
7115   for (auto &Instr : make_range(Begin, End))
7116     State.ILV->widenInstruction(Instr);
7117 }
7118 
7119 void VPWidenIntOrFpInductionRecipe::execute(VPTransformState &State) {
7120   assert(!State.Instance && "Int or FP induction being replicated.");
7121   State.ILV->widenIntOrFpInduction(IV, Trunc);
7122 }
7123 
7124 void VPWidenPHIRecipe::execute(VPTransformState &State) {
7125   State.ILV->widenPHIInstruction(Phi, State.UF, State.VF);
7126 }
7127 
7128 void VPBlendRecipe::execute(VPTransformState &State) {
7129   State.ILV->setDebugLocFromInst(State.Builder, Phi);
7130   // We know that all PHIs in non-header blocks are converted into
7131   // selects, so we don't have to worry about the insertion order and we
7132   // can just use the builder.
7133   // At this point we generate the predication tree. There may be
7134   // duplications since this is a simple recursive scan, but future
7135   // optimizations will clean it up.
7136 
7137   unsigned NumIncoming = Phi->getNumIncomingValues();
7138 
7139   assert((User || NumIncoming == 1) &&
7140          "Multiple predecessors with predecessors having a full mask");
7141   // Generate a sequence of selects of the form:
7142   // SELECT(Mask3, In3,
7143   //      SELECT(Mask2, In2,
7144   //                   ( ...)))
7145   InnerLoopVectorizer::VectorParts Entry(State.UF);
7146   for (unsigned In = 0; In < NumIncoming; ++In) {
7147     for (unsigned Part = 0; Part < State.UF; ++Part) {
7148       // We might have single edge PHIs (blocks) - use an identity
7149       // 'select' for the first PHI operand.
7150       Value *In0 =
7151           State.ILV->getOrCreateVectorValue(Phi->getIncomingValue(In), Part);
7152       if (In == 0)
7153         Entry[Part] = In0; // Initialize with the first incoming value.
7154       else {
7155         // Select between the current value and the previous incoming edge
7156         // based on the incoming mask.
7157         Value *Cond = State.get(User->getOperand(In), Part);
7158         Entry[Part] =
7159             State.Builder.CreateSelect(Cond, In0, Entry[Part], "predphi");
7160       }
7161     }
7162   }
7163   for (unsigned Part = 0; Part < State.UF; ++Part)
7164     State.ValueMap.setVectorValue(Phi, Part, Entry[Part]);
7165 }
7166 
7167 void VPInterleaveRecipe::execute(VPTransformState &State) {
7168   assert(!State.Instance && "Interleave group being replicated.");
7169   State.ILV->vectorizeInterleaveGroup(IG->getInsertPos());
7170 }
7171 
7172 void VPReplicateRecipe::execute(VPTransformState &State) {
7173   if (State.Instance) { // Generate a single instance.
7174     State.ILV->scalarizeInstruction(Ingredient, *State.Instance, IsPredicated);
7175     // Insert scalar instance packing it into a vector.
7176     if (AlsoPack && State.VF > 1) {
7177       // If we're constructing lane 0, initialize to start from undef.
7178       if (State.Instance->Lane == 0) {
7179         Value *Undef =
7180             UndefValue::get(VectorType::get(Ingredient->getType(), State.VF));
7181         State.ValueMap.setVectorValue(Ingredient, State.Instance->Part, Undef);
7182       }
7183       State.ILV->packScalarIntoVectorValue(Ingredient, *State.Instance);
7184     }
7185     return;
7186   }
7187 
7188   // Generate scalar instances for all VF lanes of all UF parts, unless the
7189   // instruction is uniform inwhich case generate only the first lane for each
7190   // of the UF parts.
7191   unsigned EndLane = IsUniform ? 1 : State.VF;
7192   for (unsigned Part = 0; Part < State.UF; ++Part)
7193     for (unsigned Lane = 0; Lane < EndLane; ++Lane)
7194       State.ILV->scalarizeInstruction(Ingredient, {Part, Lane}, IsPredicated);
7195 }
7196 
7197 void VPBranchOnMaskRecipe::execute(VPTransformState &State) {
7198   assert(State.Instance && "Branch on Mask works only on single instance.");
7199 
7200   unsigned Part = State.Instance->Part;
7201   unsigned Lane = State.Instance->Lane;
7202 
7203   Value *ConditionBit = nullptr;
7204   if (!User) // Block in mask is all-one.
7205     ConditionBit = State.Builder.getTrue();
7206   else {
7207     VPValue *BlockInMask = User->getOperand(0);
7208     ConditionBit = State.get(BlockInMask, Part);
7209     if (ConditionBit->getType()->isVectorTy())
7210       ConditionBit = State.Builder.CreateExtractElement(
7211           ConditionBit, State.Builder.getInt32(Lane));
7212   }
7213 
7214   // Replace the temporary unreachable terminator with a new conditional branch,
7215   // whose two destinations will be set later when they are created.
7216   auto *CurrentTerminator = State.CFG.PrevBB->getTerminator();
7217   assert(isa<UnreachableInst>(CurrentTerminator) &&
7218          "Expected to replace unreachable terminator with conditional branch.");
7219   auto *CondBr = BranchInst::Create(State.CFG.PrevBB, nullptr, ConditionBit);
7220   CondBr->setSuccessor(0, nullptr);
7221   ReplaceInstWithInst(CurrentTerminator, CondBr);
7222 }
7223 
7224 void VPPredInstPHIRecipe::execute(VPTransformState &State) {
7225   assert(State.Instance && "Predicated instruction PHI works per instance.");
7226   Instruction *ScalarPredInst = cast<Instruction>(
7227       State.ValueMap.getScalarValue(PredInst, *State.Instance));
7228   BasicBlock *PredicatedBB = ScalarPredInst->getParent();
7229   BasicBlock *PredicatingBB = PredicatedBB->getSinglePredecessor();
7230   assert(PredicatingBB && "Predicated block has no single predecessor.");
7231 
7232   // By current pack/unpack logic we need to generate only a single phi node: if
7233   // a vector value for the predicated instruction exists at this point it means
7234   // the instruction has vector users only, and a phi for the vector value is
7235   // needed. In this case the recipe of the predicated instruction is marked to
7236   // also do that packing, thereby "hoisting" the insert-element sequence.
7237   // Otherwise, a phi node for the scalar value is needed.
7238   unsigned Part = State.Instance->Part;
7239   if (State.ValueMap.hasVectorValue(PredInst, Part)) {
7240     Value *VectorValue = State.ValueMap.getVectorValue(PredInst, Part);
7241     InsertElementInst *IEI = cast<InsertElementInst>(VectorValue);
7242     PHINode *VPhi = State.Builder.CreatePHI(IEI->getType(), 2);
7243     VPhi->addIncoming(IEI->getOperand(0), PredicatingBB); // Unmodified vector.
7244     VPhi->addIncoming(IEI, PredicatedBB); // New vector with inserted element.
7245     State.ValueMap.resetVectorValue(PredInst, Part, VPhi); // Update cache.
7246   } else {
7247     Type *PredInstType = PredInst->getType();
7248     PHINode *Phi = State.Builder.CreatePHI(PredInstType, 2);
7249     Phi->addIncoming(UndefValue::get(ScalarPredInst->getType()), PredicatingBB);
7250     Phi->addIncoming(ScalarPredInst, PredicatedBB);
7251     State.ValueMap.resetScalarValue(PredInst, *State.Instance, Phi);
7252   }
7253 }
7254 
7255 void VPWidenMemoryInstructionRecipe::execute(VPTransformState &State) {
7256   if (!User)
7257     return State.ILV->vectorizeMemoryInstruction(&Instr);
7258 
7259   // Last (and currently only) operand is a mask.
7260   InnerLoopVectorizer::VectorParts MaskValues(State.UF);
7261   VPValue *Mask = User->getOperand(User->getNumOperands() - 1);
7262   for (unsigned Part = 0; Part < State.UF; ++Part)
7263     MaskValues[Part] = State.get(Mask, Part);
7264   State.ILV->vectorizeMemoryInstruction(&Instr, &MaskValues);
7265 }
7266 
7267 // Process the loop in the VPlan-native vectorization path. This path builds
7268 // VPlan upfront in the vectorization pipeline, which allows to apply
7269 // VPlan-to-VPlan transformations from the very beginning without modifying the
7270 // input LLVM IR.
7271 static bool processLoopInVPlanNativePath(
7272     Loop *L, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT,
7273     LoopVectorizationLegality *LVL, TargetTransformInfo *TTI,
7274     TargetLibraryInfo *TLI, DemandedBits *DB, AssumptionCache *AC,
7275     OptimizationRemarkEmitter *ORE, LoopVectorizeHints &Hints) {
7276 
7277   assert(EnableVPlanNativePath && "VPlan-native path is disabled.");
7278   Function *F = L->getHeader()->getParent();
7279   InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL->getLAI());
7280   LoopVectorizationCostModel CM(L, PSE, LI, LVL, *TTI, TLI, DB, AC, ORE, F,
7281                                 &Hints, IAI);
7282   // Use the planner for outer loop vectorization.
7283   // TODO: CM is not used at this point inside the planner. Turn CM into an
7284   // optional argument if we don't need it in the future.
7285   LoopVectorizationPlanner LVP(L, LI, TLI, TTI, LVL, CM);
7286 
7287   // Get user vectorization factor.
7288   unsigned UserVF = Hints.getWidth();
7289 
7290   // Check the function attributes to find out if this function should be
7291   // optimized for size.
7292   bool OptForSize =
7293       Hints.getForce() != LoopVectorizeHints::FK_Enabled && F->optForSize();
7294 
7295   // Plan how to best vectorize, return the best VF and its cost.
7296   LVP.planInVPlanNativePath(OptForSize, UserVF);
7297 
7298   // Returning false. We are currently not generating vector code in the VPlan
7299   // native path.
7300   return false;
7301 }
7302 
7303 bool LoopVectorizePass::processLoop(Loop *L) {
7304   assert((EnableVPlanNativePath || L->empty()) &&
7305          "VPlan-native path is not enabled. Only process inner loops.");
7306 
7307 #ifndef NDEBUG
7308   const std::string DebugLocStr = getDebugLocString(L);
7309 #endif /* NDEBUG */
7310 
7311   LLVM_DEBUG(dbgs() << "\nLV: Checking a loop in \""
7312                     << L->getHeader()->getParent()->getName() << "\" from "
7313                     << DebugLocStr << "\n");
7314 
7315   LoopVectorizeHints Hints(L, DisableUnrolling, *ORE);
7316 
7317   LLVM_DEBUG(
7318       dbgs() << "LV: Loop hints:"
7319              << " force="
7320              << (Hints.getForce() == LoopVectorizeHints::FK_Disabled
7321                      ? "disabled"
7322                      : (Hints.getForce() == LoopVectorizeHints::FK_Enabled
7323                             ? "enabled"
7324                             : "?"))
7325              << " width=" << Hints.getWidth()
7326              << " unroll=" << Hints.getInterleave() << "\n");
7327 
7328   // Function containing loop
7329   Function *F = L->getHeader()->getParent();
7330 
7331   // Looking at the diagnostic output is the only way to determine if a loop
7332   // was vectorized (other than looking at the IR or machine code), so it
7333   // is important to generate an optimization remark for each loop. Most of
7334   // these messages are generated as OptimizationRemarkAnalysis. Remarks
7335   // generated as OptimizationRemark and OptimizationRemarkMissed are
7336   // less verbose reporting vectorized loops and unvectorized loops that may
7337   // benefit from vectorization, respectively.
7338 
7339   if (!Hints.allowVectorization(F, L, AlwaysVectorize)) {
7340     LLVM_DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n");
7341     return false;
7342   }
7343 
7344   PredicatedScalarEvolution PSE(*SE, *L);
7345 
7346   // Check if it is legal to vectorize the loop.
7347   LoopVectorizationRequirements Requirements(*ORE);
7348   LoopVectorizationLegality LVL(L, PSE, DT, TLI, AA, F, GetLAA, LI, ORE,
7349                                 &Requirements, &Hints, DB, AC);
7350   if (!LVL.canVectorize(EnableVPlanNativePath)) {
7351     LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n");
7352     emitMissedWarning(F, L, Hints, ORE);
7353     return false;
7354   }
7355 
7356   // Check the function attributes to find out if this function should be
7357   // optimized for size.
7358   bool OptForSize =
7359       Hints.getForce() != LoopVectorizeHints::FK_Enabled && F->optForSize();
7360 
7361   // Entrance to the VPlan-native vectorization path. Outer loops are processed
7362   // here. They may require CFG and instruction level transformations before
7363   // even evaluating whether vectorization is profitable. Since we cannot modify
7364   // the incoming IR, we need to build VPlan upfront in the vectorization
7365   // pipeline.
7366   if (!L->empty())
7367     return processLoopInVPlanNativePath(L, PSE, LI, DT, &LVL, TTI, TLI, DB, AC,
7368                                         ORE, Hints);
7369 
7370   assert(L->empty() && "Inner loop expected.");
7371   // Check the loop for a trip count threshold: vectorize loops with a tiny trip
7372   // count by optimizing for size, to minimize overheads.
7373   // Prefer constant trip counts over profile data, over upper bound estimate.
7374   unsigned ExpectedTC = 0;
7375   bool HasExpectedTC = false;
7376   if (const SCEVConstant *ConstExits =
7377       dyn_cast<SCEVConstant>(SE->getBackedgeTakenCount(L))) {
7378     const APInt &ExitsCount = ConstExits->getAPInt();
7379     // We are interested in small values for ExpectedTC. Skip over those that
7380     // can't fit an unsigned.
7381     if (ExitsCount.ult(std::numeric_limits<unsigned>::max())) {
7382       ExpectedTC = static_cast<unsigned>(ExitsCount.getZExtValue()) + 1;
7383       HasExpectedTC = true;
7384     }
7385   }
7386   // ExpectedTC may be large because it's bound by a variable. Check
7387   // profiling information to validate we should vectorize.
7388   if (!HasExpectedTC && LoopVectorizeWithBlockFrequency) {
7389     auto EstimatedTC = getLoopEstimatedTripCount(L);
7390     if (EstimatedTC) {
7391       ExpectedTC = *EstimatedTC;
7392       HasExpectedTC = true;
7393     }
7394   }
7395   if (!HasExpectedTC) {
7396     ExpectedTC = SE->getSmallConstantMaxTripCount(L);
7397     HasExpectedTC = (ExpectedTC > 0);
7398   }
7399 
7400   if (HasExpectedTC && ExpectedTC < TinyTripCountVectorThreshold) {
7401     LLVM_DEBUG(dbgs() << "LV: Found a loop with a very small trip count. "
7402                       << "This loop is worth vectorizing only if no scalar "
7403                       << "iteration overheads are incurred.");
7404     if (Hints.getForce() == LoopVectorizeHints::FK_Enabled)
7405       LLVM_DEBUG(dbgs() << " But vectorizing was explicitly forced.\n");
7406     else {
7407       LLVM_DEBUG(dbgs() << "\n");
7408       // Loops with a very small trip count are considered for vectorization
7409       // under OptForSize, thereby making sure the cost of their loop body is
7410       // dominant, free of runtime guards and scalar iteration overheads.
7411       OptForSize = true;
7412     }
7413   }
7414 
7415   // Check the function attributes to see if implicit floats are allowed.
7416   // FIXME: This check doesn't seem possibly correct -- what if the loop is
7417   // an integer loop and the vector instructions selected are purely integer
7418   // vector instructions?
7419   if (F->hasFnAttribute(Attribute::NoImplicitFloat)) {
7420     LLVM_DEBUG(dbgs() << "LV: Can't vectorize when the NoImplicitFloat"
7421                          "attribute is used.\n");
7422     ORE->emit(createLVMissedAnalysis(Hints.vectorizeAnalysisPassName(),
7423                                      "NoImplicitFloat", L)
7424               << "loop not vectorized due to NoImplicitFloat attribute");
7425     emitMissedWarning(F, L, Hints, ORE);
7426     return false;
7427   }
7428 
7429   // Check if the target supports potentially unsafe FP vectorization.
7430   // FIXME: Add a check for the type of safety issue (denormal, signaling)
7431   // for the target we're vectorizing for, to make sure none of the
7432   // additional fp-math flags can help.
7433   if (Hints.isPotentiallyUnsafe() &&
7434       TTI->isFPVectorizationPotentiallyUnsafe()) {
7435     LLVM_DEBUG(
7436         dbgs() << "LV: Potentially unsafe FP op prevents vectorization.\n");
7437     ORE->emit(
7438         createLVMissedAnalysis(Hints.vectorizeAnalysisPassName(), "UnsafeFP", L)
7439         << "loop not vectorized due to unsafe FP support.");
7440     emitMissedWarning(F, L, Hints, ORE);
7441     return false;
7442   }
7443 
7444   bool UseInterleaved = TTI->enableInterleavedAccessVectorization();
7445   InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL.getLAI());
7446 
7447   // If an override option has been passed in for interleaved accesses, use it.
7448   if (EnableInterleavedMemAccesses.getNumOccurrences() > 0)
7449     UseInterleaved = EnableInterleavedMemAccesses;
7450 
7451   // Analyze interleaved memory accesses.
7452   if (UseInterleaved) {
7453     IAI.analyzeInterleaving();
7454   }
7455 
7456   // Use the cost model.
7457   LoopVectorizationCostModel CM(L, PSE, LI, &LVL, *TTI, TLI, DB, AC, ORE, F,
7458                                 &Hints, IAI);
7459   CM.collectValuesToIgnore();
7460 
7461   // Use the planner for vectorization.
7462   LoopVectorizationPlanner LVP(L, LI, TLI, TTI, &LVL, CM);
7463 
7464   // Get user vectorization factor.
7465   unsigned UserVF = Hints.getWidth();
7466 
7467   // Plan how to best vectorize, return the best VF and its cost.
7468   VectorizationFactor VF = LVP.plan(OptForSize, UserVF);
7469 
7470   // Select the interleave count.
7471   unsigned IC = CM.selectInterleaveCount(OptForSize, VF.Width, VF.Cost);
7472 
7473   // Get user interleave count.
7474   unsigned UserIC = Hints.getInterleave();
7475 
7476   // Identify the diagnostic messages that should be produced.
7477   std::pair<StringRef, std::string> VecDiagMsg, IntDiagMsg;
7478   bool VectorizeLoop = true, InterleaveLoop = true;
7479   if (Requirements.doesNotMeet(F, L, Hints)) {
7480     LLVM_DEBUG(dbgs() << "LV: Not vectorizing: loop did not meet vectorization "
7481                          "requirements.\n");
7482     emitMissedWarning(F, L, Hints, ORE);
7483     return false;
7484   }
7485 
7486   if (VF.Width == 1) {
7487     LLVM_DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n");
7488     VecDiagMsg = std::make_pair(
7489         "VectorizationNotBeneficial",
7490         "the cost-model indicates that vectorization is not beneficial");
7491     VectorizeLoop = false;
7492   }
7493 
7494   if (IC == 1 && UserIC <= 1) {
7495     // Tell the user interleaving is not beneficial.
7496     LLVM_DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n");
7497     IntDiagMsg = std::make_pair(
7498         "InterleavingNotBeneficial",
7499         "the cost-model indicates that interleaving is not beneficial");
7500     InterleaveLoop = false;
7501     if (UserIC == 1) {
7502       IntDiagMsg.first = "InterleavingNotBeneficialAndDisabled";
7503       IntDiagMsg.second +=
7504           " and is explicitly disabled or interleave count is set to 1";
7505     }
7506   } else if (IC > 1 && UserIC == 1) {
7507     // Tell the user interleaving is beneficial, but it explicitly disabled.
7508     LLVM_DEBUG(
7509         dbgs() << "LV: Interleaving is beneficial but is explicitly disabled.");
7510     IntDiagMsg = std::make_pair(
7511         "InterleavingBeneficialButDisabled",
7512         "the cost-model indicates that interleaving is beneficial "
7513         "but is explicitly disabled or interleave count is set to 1");
7514     InterleaveLoop = false;
7515   }
7516 
7517   // Override IC if user provided an interleave count.
7518   IC = UserIC > 0 ? UserIC : IC;
7519 
7520   // Emit diagnostic messages, if any.
7521   const char *VAPassName = Hints.vectorizeAnalysisPassName();
7522   if (!VectorizeLoop && !InterleaveLoop) {
7523     // Do not vectorize or interleaving the loop.
7524     ORE->emit([&]() {
7525       return OptimizationRemarkMissed(VAPassName, VecDiagMsg.first,
7526                                       L->getStartLoc(), L->getHeader())
7527              << VecDiagMsg.second;
7528     });
7529     ORE->emit([&]() {
7530       return OptimizationRemarkMissed(LV_NAME, IntDiagMsg.first,
7531                                       L->getStartLoc(), L->getHeader())
7532              << IntDiagMsg.second;
7533     });
7534     return false;
7535   } else if (!VectorizeLoop && InterleaveLoop) {
7536     LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n');
7537     ORE->emit([&]() {
7538       return OptimizationRemarkAnalysis(VAPassName, VecDiagMsg.first,
7539                                         L->getStartLoc(), L->getHeader())
7540              << VecDiagMsg.second;
7541     });
7542   } else if (VectorizeLoop && !InterleaveLoop) {
7543     LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width
7544                       << ") in " << DebugLocStr << '\n');
7545     ORE->emit([&]() {
7546       return OptimizationRemarkAnalysis(LV_NAME, IntDiagMsg.first,
7547                                         L->getStartLoc(), L->getHeader())
7548              << IntDiagMsg.second;
7549     });
7550   } else if (VectorizeLoop && InterleaveLoop) {
7551     LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width
7552                       << ") in " << DebugLocStr << '\n');
7553     LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n');
7554   }
7555 
7556   LVP.setBestPlan(VF.Width, IC);
7557 
7558   using namespace ore;
7559 
7560   if (!VectorizeLoop) {
7561     assert(IC > 1 && "interleave count should not be 1 or 0");
7562     // If we decided that it is not legal to vectorize the loop, then
7563     // interleave it.
7564     InnerLoopUnroller Unroller(L, PSE, LI, DT, TLI, TTI, AC, ORE, IC, &LVL,
7565                                &CM);
7566     LVP.executePlan(Unroller, DT);
7567 
7568     ORE->emit([&]() {
7569       return OptimizationRemark(LV_NAME, "Interleaved", L->getStartLoc(),
7570                                 L->getHeader())
7571              << "interleaved loop (interleaved count: "
7572              << NV("InterleaveCount", IC) << ")";
7573     });
7574   } else {
7575     // If we decided that it is *legal* to vectorize the loop, then do it.
7576     InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, IC,
7577                            &LVL, &CM);
7578     LVP.executePlan(LB, DT);
7579     ++LoopsVectorized;
7580 
7581     // Add metadata to disable runtime unrolling a scalar loop when there are
7582     // no runtime checks about strides and memory. A scalar loop that is
7583     // rarely used is not worth unrolling.
7584     if (!LB.areSafetyChecksAdded())
7585       AddRuntimeUnrollDisableMetaData(L);
7586 
7587     // Report the vectorization decision.
7588     ORE->emit([&]() {
7589       return OptimizationRemark(LV_NAME, "Vectorized", L->getStartLoc(),
7590                                 L->getHeader())
7591              << "vectorized loop (vectorization width: "
7592              << NV("VectorizationFactor", VF.Width)
7593              << ", interleaved count: " << NV("InterleaveCount", IC) << ")";
7594     });
7595   }
7596 
7597   // Mark the loop as already vectorized to avoid vectorizing again.
7598   Hints.setAlreadyVectorized();
7599 
7600   LLVM_DEBUG(verifyFunction(*L->getHeader()->getParent()));
7601   return true;
7602 }
7603 
7604 bool LoopVectorizePass::runImpl(
7605     Function &F, ScalarEvolution &SE_, LoopInfo &LI_, TargetTransformInfo &TTI_,
7606     DominatorTree &DT_, BlockFrequencyInfo &BFI_, TargetLibraryInfo *TLI_,
7607     DemandedBits &DB_, AliasAnalysis &AA_, AssumptionCache &AC_,
7608     std::function<const LoopAccessInfo &(Loop &)> &GetLAA_,
7609     OptimizationRemarkEmitter &ORE_) {
7610   SE = &SE_;
7611   LI = &LI_;
7612   TTI = &TTI_;
7613   DT = &DT_;
7614   BFI = &BFI_;
7615   TLI = TLI_;
7616   AA = &AA_;
7617   AC = &AC_;
7618   GetLAA = &GetLAA_;
7619   DB = &DB_;
7620   ORE = &ORE_;
7621 
7622   // Don't attempt if
7623   // 1. the target claims to have no vector registers, and
7624   // 2. interleaving won't help ILP.
7625   //
7626   // The second condition is necessary because, even if the target has no
7627   // vector registers, loop vectorization may still enable scalar
7628   // interleaving.
7629   if (!TTI->getNumberOfRegisters(true) && TTI->getMaxInterleaveFactor(1) < 2)
7630     return false;
7631 
7632   bool Changed = false;
7633 
7634   // The vectorizer requires loops to be in simplified form.
7635   // Since simplification may add new inner loops, it has to run before the
7636   // legality and profitability checks. This means running the loop vectorizer
7637   // will simplify all loops, regardless of whether anything end up being
7638   // vectorized.
7639   for (auto &L : *LI)
7640     Changed |= simplifyLoop(L, DT, LI, SE, AC, false /* PreserveLCSSA */);
7641 
7642   // Build up a worklist of inner-loops to vectorize. This is necessary as
7643   // the act of vectorizing or partially unrolling a loop creates new loops
7644   // and can invalidate iterators across the loops.
7645   SmallVector<Loop *, 8> Worklist;
7646 
7647   for (Loop *L : *LI)
7648     collectSupportedLoops(*L, LI, ORE, Worklist);
7649 
7650   LoopsAnalyzed += Worklist.size();
7651 
7652   // Now walk the identified inner loops.
7653   while (!Worklist.empty()) {
7654     Loop *L = Worklist.pop_back_val();
7655 
7656     // For the inner loops we actually process, form LCSSA to simplify the
7657     // transform.
7658     Changed |= formLCSSARecursively(*L, *DT, LI, SE);
7659 
7660     Changed |= processLoop(L);
7661   }
7662 
7663   // Process each loop nest in the function.
7664   return Changed;
7665 }
7666 
7667 PreservedAnalyses LoopVectorizePass::run(Function &F,
7668                                          FunctionAnalysisManager &AM) {
7669     auto &SE = AM.getResult<ScalarEvolutionAnalysis>(F);
7670     auto &LI = AM.getResult<LoopAnalysis>(F);
7671     auto &TTI = AM.getResult<TargetIRAnalysis>(F);
7672     auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
7673     auto &BFI = AM.getResult<BlockFrequencyAnalysis>(F);
7674     auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
7675     auto &AA = AM.getResult<AAManager>(F);
7676     auto &AC = AM.getResult<AssumptionAnalysis>(F);
7677     auto &DB = AM.getResult<DemandedBitsAnalysis>(F);
7678     auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F);
7679 
7680     auto &LAM = AM.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager();
7681     std::function<const LoopAccessInfo &(Loop &)> GetLAA =
7682         [&](Loop &L) -> const LoopAccessInfo & {
7683       LoopStandardAnalysisResults AR = {AA, AC, DT, LI, SE, TLI, TTI, nullptr};
7684       return LAM.getResult<LoopAccessAnalysis>(L, AR);
7685     };
7686     bool Changed =
7687         runImpl(F, SE, LI, TTI, DT, BFI, &TLI, DB, AA, AC, GetLAA, ORE);
7688     if (!Changed)
7689       return PreservedAnalyses::all();
7690     PreservedAnalyses PA;
7691     PA.preserve<LoopAnalysis>();
7692     PA.preserve<DominatorTreeAnalysis>();
7693     PA.preserve<BasicAA>();
7694     PA.preserve<GlobalsAA>();
7695     return PA;
7696 }
7697