1 //===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops
10 // and generates target-independent LLVM-IR.
11 // The vectorizer uses the TargetTransformInfo analysis to estimate the costs
12 // of instructions in order to estimate the profitability of vectorization.
13 //
14 // The loop vectorizer combines consecutive loop iterations into a single
15 // 'wide' iteration. After this transformation the index is incremented
16 // by the SIMD vector width, and not by one.
17 //
18 // This pass has three parts:
19 // 1. The main loop pass that drives the different parts.
20 // 2. LoopVectorizationLegality - A unit that checks for the legality
21 //    of the vectorization.
22 // 3. InnerLoopVectorizer - A unit that performs the actual
23 //    widening of instructions.
24 // 4. LoopVectorizationCostModel - A unit that checks for the profitability
25 //    of vectorization. It decides on the optimal vector width, which
26 //    can be one, if vectorization is not profitable.
27 //
28 // There is a development effort going on to migrate loop vectorizer to the
29 // VPlan infrastructure and to introduce outer loop vectorization support (see
30 // docs/Proposal/VectorizationPlan.rst and
31 // http://lists.llvm.org/pipermail/llvm-dev/2017-December/119523.html). For this
32 // purpose, we temporarily introduced the VPlan-native vectorization path: an
33 // alternative vectorization path that is natively implemented on top of the
34 // VPlan infrastructure. See EnableVPlanNativePath for enabling.
35 //
36 //===----------------------------------------------------------------------===//
37 //
38 // The reduction-variable vectorization is based on the paper:
39 //  D. Nuzman and R. Henderson. Multi-platform Auto-vectorization.
40 //
41 // Variable uniformity checks are inspired by:
42 //  Karrenberg, R. and Hack, S. Whole Function Vectorization.
43 //
44 // The interleaved access vectorization is based on the paper:
45 //  Dorit Nuzman, Ira Rosen and Ayal Zaks.  Auto-Vectorization of Interleaved
46 //  Data for SIMD
47 //
48 // Other ideas/concepts are from:
49 //  A. Zaks and D. Nuzman. Autovectorization in GCC-two years later.
50 //
51 //  S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua.  An Evaluation of
52 //  Vectorizing Compilers.
53 //
54 //===----------------------------------------------------------------------===//
55 
56 #include "llvm/Transforms/Vectorize/LoopVectorize.h"
57 #include "LoopVectorizationPlanner.h"
58 #include "VPRecipeBuilder.h"
59 #include "VPlanHCFGBuilder.h"
60 #include "VPlanHCFGTransforms.h"
61 #include "VPlanPredicator.h"
62 #include "llvm/ADT/APInt.h"
63 #include "llvm/ADT/ArrayRef.h"
64 #include "llvm/ADT/DenseMap.h"
65 #include "llvm/ADT/DenseMapInfo.h"
66 #include "llvm/ADT/Hashing.h"
67 #include "llvm/ADT/MapVector.h"
68 #include "llvm/ADT/None.h"
69 #include "llvm/ADT/Optional.h"
70 #include "llvm/ADT/STLExtras.h"
71 #include "llvm/ADT/SetVector.h"
72 #include "llvm/ADT/SmallPtrSet.h"
73 #include "llvm/ADT/SmallVector.h"
74 #include "llvm/ADT/Statistic.h"
75 #include "llvm/ADT/StringRef.h"
76 #include "llvm/ADT/Twine.h"
77 #include "llvm/ADT/iterator_range.h"
78 #include "llvm/Analysis/AssumptionCache.h"
79 #include "llvm/Analysis/BasicAliasAnalysis.h"
80 #include "llvm/Analysis/BlockFrequencyInfo.h"
81 #include "llvm/Analysis/CFG.h"
82 #include "llvm/Analysis/CodeMetrics.h"
83 #include "llvm/Analysis/DemandedBits.h"
84 #include "llvm/Analysis/GlobalsModRef.h"
85 #include "llvm/Analysis/LoopAccessAnalysis.h"
86 #include "llvm/Analysis/LoopAnalysisManager.h"
87 #include "llvm/Analysis/LoopInfo.h"
88 #include "llvm/Analysis/LoopIterator.h"
89 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
90 #include "llvm/Analysis/ScalarEvolution.h"
91 #include "llvm/Analysis/ScalarEvolutionExpander.h"
92 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
93 #include "llvm/Analysis/TargetLibraryInfo.h"
94 #include "llvm/Analysis/TargetTransformInfo.h"
95 #include "llvm/Analysis/VectorUtils.h"
96 #include "llvm/IR/Attributes.h"
97 #include "llvm/IR/BasicBlock.h"
98 #include "llvm/IR/CFG.h"
99 #include "llvm/IR/Constant.h"
100 #include "llvm/IR/Constants.h"
101 #include "llvm/IR/DataLayout.h"
102 #include "llvm/IR/DebugInfoMetadata.h"
103 #include "llvm/IR/DebugLoc.h"
104 #include "llvm/IR/DerivedTypes.h"
105 #include "llvm/IR/DiagnosticInfo.h"
106 #include "llvm/IR/Dominators.h"
107 #include "llvm/IR/Function.h"
108 #include "llvm/IR/IRBuilder.h"
109 #include "llvm/IR/InstrTypes.h"
110 #include "llvm/IR/Instruction.h"
111 #include "llvm/IR/Instructions.h"
112 #include "llvm/IR/IntrinsicInst.h"
113 #include "llvm/IR/Intrinsics.h"
114 #include "llvm/IR/LLVMContext.h"
115 #include "llvm/IR/Metadata.h"
116 #include "llvm/IR/Module.h"
117 #include "llvm/IR/Operator.h"
118 #include "llvm/IR/Type.h"
119 #include "llvm/IR/Use.h"
120 #include "llvm/IR/User.h"
121 #include "llvm/IR/Value.h"
122 #include "llvm/IR/ValueHandle.h"
123 #include "llvm/IR/Verifier.h"
124 #include "llvm/Pass.h"
125 #include "llvm/Support/Casting.h"
126 #include "llvm/Support/CommandLine.h"
127 #include "llvm/Support/Compiler.h"
128 #include "llvm/Support/Debug.h"
129 #include "llvm/Support/ErrorHandling.h"
130 #include "llvm/Support/MathExtras.h"
131 #include "llvm/Support/raw_ostream.h"
132 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
133 #include "llvm/Transforms/Utils/LoopSimplify.h"
134 #include "llvm/Transforms/Utils/LoopUtils.h"
135 #include "llvm/Transforms/Utils/LoopVersioning.h"
136 #include "llvm/Transforms/Vectorize/LoopVectorizationLegality.h"
137 #include <algorithm>
138 #include <cassert>
139 #include <cstdint>
140 #include <cstdlib>
141 #include <functional>
142 #include <iterator>
143 #include <limits>
144 #include <memory>
145 #include <string>
146 #include <tuple>
147 #include <utility>
148 #include <vector>
149 
150 using namespace llvm;
151 
152 #define LV_NAME "loop-vectorize"
153 #define DEBUG_TYPE LV_NAME
154 
155 /// @{
156 /// Metadata attribute names
157 static const char *const LLVMLoopVectorizeFollowupAll =
158     "llvm.loop.vectorize.followup_all";
159 static const char *const LLVMLoopVectorizeFollowupVectorized =
160     "llvm.loop.vectorize.followup_vectorized";
161 static const char *const LLVMLoopVectorizeFollowupEpilogue =
162     "llvm.loop.vectorize.followup_epilogue";
163 /// @}
164 
165 STATISTIC(LoopsVectorized, "Number of loops vectorized");
166 STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization");
167 
168 /// Loops with a known constant trip count below this number are vectorized only
169 /// if no scalar iteration overheads are incurred.
170 static cl::opt<unsigned> TinyTripCountVectorThreshold(
171     "vectorizer-min-trip-count", cl::init(16), cl::Hidden,
172     cl::desc("Loops with a constant trip count that is smaller than this "
173              "value are vectorized only if no scalar iteration overheads "
174              "are incurred."));
175 
176 static cl::opt<bool> MaximizeBandwidth(
177     "vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden,
178     cl::desc("Maximize bandwidth when selecting vectorization factor which "
179              "will be determined by the smallest type in loop."));
180 
181 static cl::opt<bool> EnableInterleavedMemAccesses(
182     "enable-interleaved-mem-accesses", cl::init(false), cl::Hidden,
183     cl::desc("Enable vectorization on interleaved memory accesses in a loop"));
184 
185 /// An interleave-group may need masking if it resides in a block that needs
186 /// predication, or in order to mask away gaps.
187 static cl::opt<bool> EnableMaskedInterleavedMemAccesses(
188     "enable-masked-interleaved-mem-accesses", cl::init(false), cl::Hidden,
189     cl::desc("Enable vectorization on masked interleaved memory accesses in a loop"));
190 
191 /// We don't interleave loops with a known constant trip count below this
192 /// number.
193 static const unsigned TinyTripCountInterleaveThreshold = 128;
194 
195 static cl::opt<unsigned> ForceTargetNumScalarRegs(
196     "force-target-num-scalar-regs", cl::init(0), cl::Hidden,
197     cl::desc("A flag that overrides the target's number of scalar registers."));
198 
199 static cl::opt<unsigned> ForceTargetNumVectorRegs(
200     "force-target-num-vector-regs", cl::init(0), cl::Hidden,
201     cl::desc("A flag that overrides the target's number of vector registers."));
202 
203 static cl::opt<unsigned> ForceTargetMaxScalarInterleaveFactor(
204     "force-target-max-scalar-interleave", cl::init(0), cl::Hidden,
205     cl::desc("A flag that overrides the target's max interleave factor for "
206              "scalar loops."));
207 
208 static cl::opt<unsigned> ForceTargetMaxVectorInterleaveFactor(
209     "force-target-max-vector-interleave", cl::init(0), cl::Hidden,
210     cl::desc("A flag that overrides the target's max interleave factor for "
211              "vectorized loops."));
212 
213 static cl::opt<unsigned> ForceTargetInstructionCost(
214     "force-target-instruction-cost", cl::init(0), cl::Hidden,
215     cl::desc("A flag that overrides the target's expected cost for "
216              "an instruction to a single constant value. Mostly "
217              "useful for getting consistent testing."));
218 
219 static cl::opt<unsigned> SmallLoopCost(
220     "small-loop-cost", cl::init(20), cl::Hidden,
221     cl::desc(
222         "The cost of a loop that is considered 'small' by the interleaver."));
223 
224 static cl::opt<bool> LoopVectorizeWithBlockFrequency(
225     "loop-vectorize-with-block-frequency", cl::init(true), cl::Hidden,
226     cl::desc("Enable the use of the block frequency analysis to access PGO "
227              "heuristics minimizing code growth in cold regions and being more "
228              "aggressive in hot regions."));
229 
230 // Runtime interleave loops for load/store throughput.
231 static cl::opt<bool> EnableLoadStoreRuntimeInterleave(
232     "enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden,
233     cl::desc(
234         "Enable runtime interleaving until load/store ports are saturated"));
235 
236 /// The number of stores in a loop that are allowed to need predication.
237 static cl::opt<unsigned> NumberOfStoresToPredicate(
238     "vectorize-num-stores-pred", cl::init(1), cl::Hidden,
239     cl::desc("Max number of stores to be predicated behind an if."));
240 
241 static cl::opt<bool> EnableIndVarRegisterHeur(
242     "enable-ind-var-reg-heur", cl::init(true), cl::Hidden,
243     cl::desc("Count the induction variable only once when interleaving"));
244 
245 static cl::opt<bool> EnableCondStoresVectorization(
246     "enable-cond-stores-vec", cl::init(true), cl::Hidden,
247     cl::desc("Enable if predication of stores during vectorization."));
248 
249 static cl::opt<unsigned> MaxNestedScalarReductionIC(
250     "max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden,
251     cl::desc("The maximum interleave count to use when interleaving a scalar "
252              "reduction in a nested loop."));
253 
254 cl::opt<bool> EnableVPlanNativePath(
255     "enable-vplan-native-path", cl::init(false), cl::Hidden,
256     cl::desc("Enable VPlan-native vectorization path with "
257              "support for outer loop vectorization."));
258 
259 // FIXME: Remove this switch once we have divergence analysis. Currently we
260 // assume divergent non-backedge branches when this switch is true.
261 cl::opt<bool> EnableVPlanPredication(
262     "enable-vplan-predication", cl::init(false), cl::Hidden,
263     cl::desc("Enable VPlan-native vectorization path predicator with "
264              "support for outer loop vectorization."));
265 
266 // This flag enables the stress testing of the VPlan H-CFG construction in the
267 // VPlan-native vectorization path. It must be used in conjuction with
268 // -enable-vplan-native-path. -vplan-verify-hcfg can also be used to enable the
269 // verification of the H-CFGs built.
270 static cl::opt<bool> VPlanBuildStressTest(
271     "vplan-build-stress-test", cl::init(false), cl::Hidden,
272     cl::desc(
273         "Build VPlan for every supported loop nest in the function and bail "
274         "out right after the build (stress test the VPlan H-CFG construction "
275         "in the VPlan-native vectorization path)."));
276 
277 /// A helper function for converting Scalar types to vector types.
278 /// If the incoming type is void, we return void. If the VF is 1, we return
279 /// the scalar type.
280 static Type *ToVectorTy(Type *Scalar, unsigned VF) {
281   if (Scalar->isVoidTy() || VF == 1)
282     return Scalar;
283   return VectorType::get(Scalar, VF);
284 }
285 
286 /// A helper function that returns the type of loaded or stored value.
287 static Type *getMemInstValueType(Value *I) {
288   assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
289          "Expected Load or Store instruction");
290   if (auto *LI = dyn_cast<LoadInst>(I))
291     return LI->getType();
292   return cast<StoreInst>(I)->getValueOperand()->getType();
293 }
294 
295 /// A helper function that returns true if the given type is irregular. The
296 /// type is irregular if its allocated size doesn't equal the store size of an
297 /// element of the corresponding vector type at the given vectorization factor.
298 static bool hasIrregularType(Type *Ty, const DataLayout &DL, unsigned VF) {
299   // Determine if an array of VF elements of type Ty is "bitcast compatible"
300   // with a <VF x Ty> vector.
301   if (VF > 1) {
302     auto *VectorTy = VectorType::get(Ty, VF);
303     return VF * DL.getTypeAllocSize(Ty) != DL.getTypeStoreSize(VectorTy);
304   }
305 
306   // If the vectorization factor is one, we just check if an array of type Ty
307   // requires padding between elements.
308   return DL.getTypeAllocSizeInBits(Ty) != DL.getTypeSizeInBits(Ty);
309 }
310 
311 /// A helper function that returns the reciprocal of the block probability of
312 /// predicated blocks. If we return X, we are assuming the predicated block
313 /// will execute once for every X iterations of the loop header.
314 ///
315 /// TODO: We should use actual block probability here, if available. Currently,
316 ///       we always assume predicated blocks have a 50% chance of executing.
317 static unsigned getReciprocalPredBlockProb() { return 2; }
318 
319 /// A helper function that adds a 'fast' flag to floating-point operations.
320 static Value *addFastMathFlag(Value *V) {
321   if (isa<FPMathOperator>(V)) {
322     FastMathFlags Flags;
323     Flags.setFast();
324     cast<Instruction>(V)->setFastMathFlags(Flags);
325   }
326   return V;
327 }
328 
329 /// A helper function that returns an integer or floating-point constant with
330 /// value C.
331 static Constant *getSignedIntOrFpConstant(Type *Ty, int64_t C) {
332   return Ty->isIntegerTy() ? ConstantInt::getSigned(Ty, C)
333                            : ConstantFP::get(Ty, C);
334 }
335 
336 namespace llvm {
337 
338 /// InnerLoopVectorizer vectorizes loops which contain only one basic
339 /// block to a specified vectorization factor (VF).
340 /// This class performs the widening of scalars into vectors, or multiple
341 /// scalars. This class also implements the following features:
342 /// * It inserts an epilogue loop for handling loops that don't have iteration
343 ///   counts that are known to be a multiple of the vectorization factor.
344 /// * It handles the code generation for reduction variables.
345 /// * Scalarization (implementation using scalars) of un-vectorizable
346 ///   instructions.
347 /// InnerLoopVectorizer does not perform any vectorization-legality
348 /// checks, and relies on the caller to check for the different legality
349 /// aspects. The InnerLoopVectorizer relies on the
350 /// LoopVectorizationLegality class to provide information about the induction
351 /// and reduction variables that were found to a given vectorization factor.
352 class InnerLoopVectorizer {
353 public:
354   InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE,
355                       LoopInfo *LI, DominatorTree *DT,
356                       const TargetLibraryInfo *TLI,
357                       const TargetTransformInfo *TTI, AssumptionCache *AC,
358                       OptimizationRemarkEmitter *ORE, unsigned VecWidth,
359                       unsigned UnrollFactor, LoopVectorizationLegality *LVL,
360                       LoopVectorizationCostModel *CM)
361       : OrigLoop(OrigLoop), PSE(PSE), LI(LI), DT(DT), TLI(TLI), TTI(TTI),
362         AC(AC), ORE(ORE), VF(VecWidth), UF(UnrollFactor),
363         Builder(PSE.getSE()->getContext()),
364         VectorLoopValueMap(UnrollFactor, VecWidth), Legal(LVL), Cost(CM) {}
365   virtual ~InnerLoopVectorizer() = default;
366 
367   /// Create a new empty loop. Unlink the old loop and connect the new one.
368   /// Return the pre-header block of the new loop.
369   BasicBlock *createVectorizedLoopSkeleton();
370 
371   /// Widen a single instruction within the innermost loop.
372   void widenInstruction(Instruction &I);
373 
374   /// Fix the vectorized code, taking care of header phi's, live-outs, and more.
375   void fixVectorizedLoop();
376 
377   // Return true if any runtime check is added.
378   bool areSafetyChecksAdded() { return AddedSafetyChecks; }
379 
380   /// A type for vectorized values in the new loop. Each value from the
381   /// original loop, when vectorized, is represented by UF vector values in the
382   /// new unrolled loop, where UF is the unroll factor.
383   using VectorParts = SmallVector<Value *, 2>;
384 
385   /// Vectorize a single PHINode in a block. This method handles the induction
386   /// variable canonicalization. It supports both VF = 1 for unrolled loops and
387   /// arbitrary length vectors.
388   void widenPHIInstruction(Instruction *PN, unsigned UF, unsigned VF);
389 
390   /// A helper function to scalarize a single Instruction in the innermost loop.
391   /// Generates a sequence of scalar instances for each lane between \p MinLane
392   /// and \p MaxLane, times each part between \p MinPart and \p MaxPart,
393   /// inclusive..
394   void scalarizeInstruction(Instruction *Instr, const VPIteration &Instance,
395                             bool IfPredicateInstr);
396 
397   /// Widen an integer or floating-point induction variable \p IV. If \p Trunc
398   /// is provided, the integer induction variable will first be truncated to
399   /// the corresponding type.
400   void widenIntOrFpInduction(PHINode *IV, TruncInst *Trunc = nullptr);
401 
402   /// getOrCreateVectorValue and getOrCreateScalarValue coordinate to generate a
403   /// vector or scalar value on-demand if one is not yet available. When
404   /// vectorizing a loop, we visit the definition of an instruction before its
405   /// uses. When visiting the definition, we either vectorize or scalarize the
406   /// instruction, creating an entry for it in the corresponding map. (In some
407   /// cases, such as induction variables, we will create both vector and scalar
408   /// entries.) Then, as we encounter uses of the definition, we derive values
409   /// for each scalar or vector use unless such a value is already available.
410   /// For example, if we scalarize a definition and one of its uses is vector,
411   /// we build the required vector on-demand with an insertelement sequence
412   /// when visiting the use. Otherwise, if the use is scalar, we can use the
413   /// existing scalar definition.
414   ///
415   /// Return a value in the new loop corresponding to \p V from the original
416   /// loop at unroll index \p Part. If the value has already been vectorized,
417   /// the corresponding vector entry in VectorLoopValueMap is returned. If,
418   /// however, the value has a scalar entry in VectorLoopValueMap, we construct
419   /// a new vector value on-demand by inserting the scalar values into a vector
420   /// with an insertelement sequence. If the value has been neither vectorized
421   /// nor scalarized, it must be loop invariant, so we simply broadcast the
422   /// value into a vector.
423   Value *getOrCreateVectorValue(Value *V, unsigned Part);
424 
425   /// Return a value in the new loop corresponding to \p V from the original
426   /// loop at unroll and vector indices \p Instance. If the value has been
427   /// vectorized but not scalarized, the necessary extractelement instruction
428   /// will be generated.
429   Value *getOrCreateScalarValue(Value *V, const VPIteration &Instance);
430 
431   /// Construct the vector value of a scalarized value \p V one lane at a time.
432   void packScalarIntoVectorValue(Value *V, const VPIteration &Instance);
433 
434   /// Try to vectorize the interleaved access group that \p Instr belongs to,
435   /// optionally masking the vector operations if \p BlockInMask is non-null.
436   void vectorizeInterleaveGroup(Instruction *Instr,
437                                 VectorParts *BlockInMask = nullptr);
438 
439   /// Vectorize Load and Store instructions, optionally masking the vector
440   /// operations if \p BlockInMask is non-null.
441   void vectorizeMemoryInstruction(Instruction *Instr,
442                                   VectorParts *BlockInMask = nullptr);
443 
444   /// Set the debug location in the builder using the debug location in
445   /// the instruction.
446   void setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr);
447 
448   /// Fix the non-induction PHIs in the OrigPHIsToFix vector.
449   void fixNonInductionPHIs(void);
450 
451 protected:
452   friend class LoopVectorizationPlanner;
453 
454   /// A small list of PHINodes.
455   using PhiVector = SmallVector<PHINode *, 4>;
456 
457   /// A type for scalarized values in the new loop. Each value from the
458   /// original loop, when scalarized, is represented by UF x VF scalar values
459   /// in the new unrolled loop, where UF is the unroll factor and VF is the
460   /// vectorization factor.
461   using ScalarParts = SmallVector<SmallVector<Value *, 4>, 2>;
462 
463   /// Set up the values of the IVs correctly when exiting the vector loop.
464   void fixupIVUsers(PHINode *OrigPhi, const InductionDescriptor &II,
465                     Value *CountRoundDown, Value *EndValue,
466                     BasicBlock *MiddleBlock);
467 
468   /// Create a new induction variable inside L.
469   PHINode *createInductionVariable(Loop *L, Value *Start, Value *End,
470                                    Value *Step, Instruction *DL);
471 
472   /// Handle all cross-iteration phis in the header.
473   void fixCrossIterationPHIs();
474 
475   /// Fix a first-order recurrence. This is the second phase of vectorizing
476   /// this phi node.
477   void fixFirstOrderRecurrence(PHINode *Phi);
478 
479   /// Fix a reduction cross-iteration phi. This is the second phase of
480   /// vectorizing this phi node.
481   void fixReduction(PHINode *Phi);
482 
483   /// The Loop exit block may have single value PHI nodes with some
484   /// incoming value. While vectorizing we only handled real values
485   /// that were defined inside the loop and we should have one value for
486   /// each predecessor of its parent basic block. See PR14725.
487   void fixLCSSAPHIs();
488 
489   /// Iteratively sink the scalarized operands of a predicated instruction into
490   /// the block that was created for it.
491   void sinkScalarOperands(Instruction *PredInst);
492 
493   /// Shrinks vector element sizes to the smallest bitwidth they can be legally
494   /// represented as.
495   void truncateToMinimalBitwidths();
496 
497   /// Insert the new loop to the loop hierarchy and pass manager
498   /// and update the analysis passes.
499   void updateAnalysis();
500 
501   /// Create a broadcast instruction. This method generates a broadcast
502   /// instruction (shuffle) for loop invariant values and for the induction
503   /// value. If this is the induction variable then we extend it to N, N+1, ...
504   /// this is needed because each iteration in the loop corresponds to a SIMD
505   /// element.
506   virtual Value *getBroadcastInstrs(Value *V);
507 
508   /// This function adds (StartIdx, StartIdx + Step, StartIdx + 2*Step, ...)
509   /// to each vector element of Val. The sequence starts at StartIndex.
510   /// \p Opcode is relevant for FP induction variable.
511   virtual Value *getStepVector(Value *Val, int StartIdx, Value *Step,
512                                Instruction::BinaryOps Opcode =
513                                Instruction::BinaryOpsEnd);
514 
515   /// Compute scalar induction steps. \p ScalarIV is the scalar induction
516   /// variable on which to base the steps, \p Step is the size of the step, and
517   /// \p EntryVal is the value from the original loop that maps to the steps.
518   /// Note that \p EntryVal doesn't have to be an induction variable - it
519   /// can also be a truncate instruction.
520   void buildScalarSteps(Value *ScalarIV, Value *Step, Instruction *EntryVal,
521                         const InductionDescriptor &ID);
522 
523   /// Create a vector induction phi node based on an existing scalar one. \p
524   /// EntryVal is the value from the original loop that maps to the vector phi
525   /// node, and \p Step is the loop-invariant step. If \p EntryVal is a
526   /// truncate instruction, instead of widening the original IV, we widen a
527   /// version of the IV truncated to \p EntryVal's type.
528   void createVectorIntOrFpInductionPHI(const InductionDescriptor &II,
529                                        Value *Step, Instruction *EntryVal);
530 
531   /// Returns true if an instruction \p I should be scalarized instead of
532   /// vectorized for the chosen vectorization factor.
533   bool shouldScalarizeInstruction(Instruction *I) const;
534 
535   /// Returns true if we should generate a scalar version of \p IV.
536   bool needsScalarInduction(Instruction *IV) const;
537 
538   /// If there is a cast involved in the induction variable \p ID, which should
539   /// be ignored in the vectorized loop body, this function records the
540   /// VectorLoopValue of the respective Phi also as the VectorLoopValue of the
541   /// cast. We had already proved that the casted Phi is equal to the uncasted
542   /// Phi in the vectorized loop (under a runtime guard), and therefore
543   /// there is no need to vectorize the cast - the same value can be used in the
544   /// vector loop for both the Phi and the cast.
545   /// If \p VectorLoopValue is a scalarized value, \p Lane is also specified,
546   /// Otherwise, \p VectorLoopValue is a widened/vectorized value.
547   ///
548   /// \p EntryVal is the value from the original loop that maps to the vector
549   /// phi node and is used to distinguish what is the IV currently being
550   /// processed - original one (if \p EntryVal is a phi corresponding to the
551   /// original IV) or the "newly-created" one based on the proof mentioned above
552   /// (see also buildScalarSteps() and createVectorIntOrFPInductionPHI()). In the
553   /// latter case \p EntryVal is a TruncInst and we must not record anything for
554   /// that IV, but it's error-prone to expect callers of this routine to care
555   /// about that, hence this explicit parameter.
556   void recordVectorLoopValueForInductionCast(const InductionDescriptor &ID,
557                                              const Instruction *EntryVal,
558                                              Value *VectorLoopValue,
559                                              unsigned Part,
560                                              unsigned Lane = UINT_MAX);
561 
562   /// Generate a shuffle sequence that will reverse the vector Vec.
563   virtual Value *reverseVector(Value *Vec);
564 
565   /// Returns (and creates if needed) the original loop trip count.
566   Value *getOrCreateTripCount(Loop *NewLoop);
567 
568   /// Returns (and creates if needed) the trip count of the widened loop.
569   Value *getOrCreateVectorTripCount(Loop *NewLoop);
570 
571   /// Returns a bitcasted value to the requested vector type.
572   /// Also handles bitcasts of vector<float> <-> vector<pointer> types.
573   Value *createBitOrPointerCast(Value *V, VectorType *DstVTy,
574                                 const DataLayout &DL);
575 
576   /// Emit a bypass check to see if the vector trip count is zero, including if
577   /// it overflows.
578   void emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass);
579 
580   /// Emit a bypass check to see if all of the SCEV assumptions we've
581   /// had to make are correct.
582   void emitSCEVChecks(Loop *L, BasicBlock *Bypass);
583 
584   /// Emit bypass checks to check any memory assumptions we may have made.
585   void emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass);
586 
587   /// Compute the transformed value of Index at offset StartValue using step
588   /// StepValue.
589   /// For integer induction, returns StartValue + Index * StepValue.
590   /// For pointer induction, returns StartValue[Index * StepValue].
591   /// FIXME: The newly created binary instructions should contain nsw/nuw
592   /// flags, which can be found from the original scalar operations.
593   Value *emitTransformedIndex(IRBuilder<> &B, Value *Index, ScalarEvolution *SE,
594                               const DataLayout &DL,
595                               const InductionDescriptor &ID) const;
596 
597   /// Add additional metadata to \p To that was not present on \p Orig.
598   ///
599   /// Currently this is used to add the noalias annotations based on the
600   /// inserted memchecks.  Use this for instructions that are *cloned* into the
601   /// vector loop.
602   void addNewMetadata(Instruction *To, const Instruction *Orig);
603 
604   /// Add metadata from one instruction to another.
605   ///
606   /// This includes both the original MDs from \p From and additional ones (\see
607   /// addNewMetadata).  Use this for *newly created* instructions in the vector
608   /// loop.
609   void addMetadata(Instruction *To, Instruction *From);
610 
611   /// Similar to the previous function but it adds the metadata to a
612   /// vector of instructions.
613   void addMetadata(ArrayRef<Value *> To, Instruction *From);
614 
615   /// The original loop.
616   Loop *OrigLoop;
617 
618   /// A wrapper around ScalarEvolution used to add runtime SCEV checks. Applies
619   /// dynamic knowledge to simplify SCEV expressions and converts them to a
620   /// more usable form.
621   PredicatedScalarEvolution &PSE;
622 
623   /// Loop Info.
624   LoopInfo *LI;
625 
626   /// Dominator Tree.
627   DominatorTree *DT;
628 
629   /// Alias Analysis.
630   AliasAnalysis *AA;
631 
632   /// Target Library Info.
633   const TargetLibraryInfo *TLI;
634 
635   /// Target Transform Info.
636   const TargetTransformInfo *TTI;
637 
638   /// Assumption Cache.
639   AssumptionCache *AC;
640 
641   /// Interface to emit optimization remarks.
642   OptimizationRemarkEmitter *ORE;
643 
644   /// LoopVersioning.  It's only set up (non-null) if memchecks were
645   /// used.
646   ///
647   /// This is currently only used to add no-alias metadata based on the
648   /// memchecks.  The actually versioning is performed manually.
649   std::unique_ptr<LoopVersioning> LVer;
650 
651   /// The vectorization SIMD factor to use. Each vector will have this many
652   /// vector elements.
653   unsigned VF;
654 
655   /// The vectorization unroll factor to use. Each scalar is vectorized to this
656   /// many different vector instructions.
657   unsigned UF;
658 
659   /// The builder that we use
660   IRBuilder<> Builder;
661 
662   // --- Vectorization state ---
663 
664   /// The vector-loop preheader.
665   BasicBlock *LoopVectorPreHeader;
666 
667   /// The scalar-loop preheader.
668   BasicBlock *LoopScalarPreHeader;
669 
670   /// Middle Block between the vector and the scalar.
671   BasicBlock *LoopMiddleBlock;
672 
673   /// The ExitBlock of the scalar loop.
674   BasicBlock *LoopExitBlock;
675 
676   /// The vector loop body.
677   BasicBlock *LoopVectorBody;
678 
679   /// The scalar loop body.
680   BasicBlock *LoopScalarBody;
681 
682   /// A list of all bypass blocks. The first block is the entry of the loop.
683   SmallVector<BasicBlock *, 4> LoopBypassBlocks;
684 
685   /// The new Induction variable which was added to the new block.
686   PHINode *Induction = nullptr;
687 
688   /// The induction variable of the old basic block.
689   PHINode *OldInduction = nullptr;
690 
691   /// Maps values from the original loop to their corresponding values in the
692   /// vectorized loop. A key value can map to either vector values, scalar
693   /// values or both kinds of values, depending on whether the key was
694   /// vectorized and scalarized.
695   VectorizerValueMap VectorLoopValueMap;
696 
697   /// Store instructions that were predicated.
698   SmallVector<Instruction *, 4> PredicatedInstructions;
699 
700   /// Trip count of the original loop.
701   Value *TripCount = nullptr;
702 
703   /// Trip count of the widened loop (TripCount - TripCount % (VF*UF))
704   Value *VectorTripCount = nullptr;
705 
706   /// The legality analysis.
707   LoopVectorizationLegality *Legal;
708 
709   /// The profitablity analysis.
710   LoopVectorizationCostModel *Cost;
711 
712   // Record whether runtime checks are added.
713   bool AddedSafetyChecks = false;
714 
715   // Holds the end values for each induction variable. We save the end values
716   // so we can later fix-up the external users of the induction variables.
717   DenseMap<PHINode *, Value *> IVEndValues;
718 
719   // Vector of original scalar PHIs whose corresponding widened PHIs need to be
720   // fixed up at the end of vector code generation.
721   SmallVector<PHINode *, 8> OrigPHIsToFix;
722 };
723 
724 class InnerLoopUnroller : public InnerLoopVectorizer {
725 public:
726   InnerLoopUnroller(Loop *OrigLoop, PredicatedScalarEvolution &PSE,
727                     LoopInfo *LI, DominatorTree *DT,
728                     const TargetLibraryInfo *TLI,
729                     const TargetTransformInfo *TTI, AssumptionCache *AC,
730                     OptimizationRemarkEmitter *ORE, unsigned UnrollFactor,
731                     LoopVectorizationLegality *LVL,
732                     LoopVectorizationCostModel *CM)
733       : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 1,
734                             UnrollFactor, LVL, CM) {}
735 
736 private:
737   Value *getBroadcastInstrs(Value *V) override;
738   Value *getStepVector(Value *Val, int StartIdx, Value *Step,
739                        Instruction::BinaryOps Opcode =
740                        Instruction::BinaryOpsEnd) override;
741   Value *reverseVector(Value *Vec) override;
742 };
743 
744 } // end namespace llvm
745 
746 /// Look for a meaningful debug location on the instruction or it's
747 /// operands.
748 static Instruction *getDebugLocFromInstOrOperands(Instruction *I) {
749   if (!I)
750     return I;
751 
752   DebugLoc Empty;
753   if (I->getDebugLoc() != Empty)
754     return I;
755 
756   for (User::op_iterator OI = I->op_begin(), OE = I->op_end(); OI != OE; ++OI) {
757     if (Instruction *OpInst = dyn_cast<Instruction>(*OI))
758       if (OpInst->getDebugLoc() != Empty)
759         return OpInst;
760   }
761 
762   return I;
763 }
764 
765 void InnerLoopVectorizer::setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr) {
766   if (const Instruction *Inst = dyn_cast_or_null<Instruction>(Ptr)) {
767     const DILocation *DIL = Inst->getDebugLoc();
768     if (DIL && Inst->getFunction()->isDebugInfoForProfiling() &&
769         !isa<DbgInfoIntrinsic>(Inst)) {
770       auto NewDIL = DIL->cloneByMultiplyingDuplicationFactor(UF * VF);
771       if (NewDIL)
772         B.SetCurrentDebugLocation(NewDIL.getValue());
773       else
774         LLVM_DEBUG(dbgs()
775                    << "Failed to create new discriminator: "
776                    << DIL->getFilename() << " Line: " << DIL->getLine());
777     }
778     else
779       B.SetCurrentDebugLocation(DIL);
780   } else
781     B.SetCurrentDebugLocation(DebugLoc());
782 }
783 
784 #ifndef NDEBUG
785 /// \return string containing a file name and a line # for the given loop.
786 static std::string getDebugLocString(const Loop *L) {
787   std::string Result;
788   if (L) {
789     raw_string_ostream OS(Result);
790     if (const DebugLoc LoopDbgLoc = L->getStartLoc())
791       LoopDbgLoc.print(OS);
792     else
793       // Just print the module name.
794       OS << L->getHeader()->getParent()->getParent()->getModuleIdentifier();
795     OS.flush();
796   }
797   return Result;
798 }
799 #endif
800 
801 void InnerLoopVectorizer::addNewMetadata(Instruction *To,
802                                          const Instruction *Orig) {
803   // If the loop was versioned with memchecks, add the corresponding no-alias
804   // metadata.
805   if (LVer && (isa<LoadInst>(Orig) || isa<StoreInst>(Orig)))
806     LVer->annotateInstWithNoAlias(To, Orig);
807 }
808 
809 void InnerLoopVectorizer::addMetadata(Instruction *To,
810                                       Instruction *From) {
811   propagateMetadata(To, From);
812   addNewMetadata(To, From);
813 }
814 
815 void InnerLoopVectorizer::addMetadata(ArrayRef<Value *> To,
816                                       Instruction *From) {
817   for (Value *V : To) {
818     if (Instruction *I = dyn_cast<Instruction>(V))
819       addMetadata(I, From);
820   }
821 }
822 
823 namespace llvm {
824 
825 /// LoopVectorizationCostModel - estimates the expected speedups due to
826 /// vectorization.
827 /// In many cases vectorization is not profitable. This can happen because of
828 /// a number of reasons. In this class we mainly attempt to predict the
829 /// expected speedup/slowdowns due to the supported instruction set. We use the
830 /// TargetTransformInfo to query the different backends for the cost of
831 /// different operations.
832 class LoopVectorizationCostModel {
833 public:
834   LoopVectorizationCostModel(Loop *L, PredicatedScalarEvolution &PSE,
835                              LoopInfo *LI, LoopVectorizationLegality *Legal,
836                              const TargetTransformInfo &TTI,
837                              const TargetLibraryInfo *TLI, DemandedBits *DB,
838                              AssumptionCache *AC,
839                              OptimizationRemarkEmitter *ORE, const Function *F,
840                              const LoopVectorizeHints *Hints,
841                              InterleavedAccessInfo &IAI)
842       : TheLoop(L), PSE(PSE), LI(LI), Legal(Legal), TTI(TTI), TLI(TLI), DB(DB),
843     AC(AC), ORE(ORE), TheFunction(F), Hints(Hints), InterleaveInfo(IAI) {}
844 
845   /// \return An upper bound for the vectorization factor, or None if
846   /// vectorization should be avoided up front.
847   Optional<unsigned> computeMaxVF(bool OptForSize);
848 
849   /// \return The most profitable vectorization factor and the cost of that VF.
850   /// This method checks every power of two up to MaxVF. If UserVF is not ZERO
851   /// then this vectorization factor will be selected if vectorization is
852   /// possible.
853   VectorizationFactor selectVectorizationFactor(unsigned MaxVF);
854 
855   /// Setup cost-based decisions for user vectorization factor.
856   void selectUserVectorizationFactor(unsigned UserVF) {
857     collectUniformsAndScalars(UserVF);
858     collectInstsToScalarize(UserVF);
859   }
860 
861   /// \return The size (in bits) of the smallest and widest types in the code
862   /// that needs to be vectorized. We ignore values that remain scalar such as
863   /// 64 bit loop indices.
864   std::pair<unsigned, unsigned> getSmallestAndWidestTypes();
865 
866   /// \return The desired interleave count.
867   /// If interleave count has been specified by metadata it will be returned.
868   /// Otherwise, the interleave count is computed and returned. VF and LoopCost
869   /// are the selected vectorization factor and the cost of the selected VF.
870   unsigned selectInterleaveCount(bool OptForSize, unsigned VF,
871                                  unsigned LoopCost);
872 
873   /// Memory access instruction may be vectorized in more than one way.
874   /// Form of instruction after vectorization depends on cost.
875   /// This function takes cost-based decisions for Load/Store instructions
876   /// and collects them in a map. This decisions map is used for building
877   /// the lists of loop-uniform and loop-scalar instructions.
878   /// The calculated cost is saved with widening decision in order to
879   /// avoid redundant calculations.
880   void setCostBasedWideningDecision(unsigned VF);
881 
882   /// A struct that represents some properties of the register usage
883   /// of a loop.
884   struct RegisterUsage {
885     /// Holds the number of loop invariant values that are used in the loop.
886     unsigned LoopInvariantRegs;
887 
888     /// Holds the maximum number of concurrent live intervals in the loop.
889     unsigned MaxLocalUsers;
890   };
891 
892   /// \return Returns information about the register usages of the loop for the
893   /// given vectorization factors.
894   SmallVector<RegisterUsage, 8> calculateRegisterUsage(ArrayRef<unsigned> VFs);
895 
896   /// Collect values we want to ignore in the cost model.
897   void collectValuesToIgnore();
898 
899   /// \returns The smallest bitwidth each instruction can be represented with.
900   /// The vector equivalents of these instructions should be truncated to this
901   /// type.
902   const MapVector<Instruction *, uint64_t> &getMinimalBitwidths() const {
903     return MinBWs;
904   }
905 
906   /// \returns True if it is more profitable to scalarize instruction \p I for
907   /// vectorization factor \p VF.
908   bool isProfitableToScalarize(Instruction *I, unsigned VF) const {
909     assert(VF > 1 && "Profitable to scalarize relevant only for VF > 1.");
910 
911     // Cost model is not run in the VPlan-native path - return conservative
912     // result until this changes.
913     if (EnableVPlanNativePath)
914       return false;
915 
916     auto Scalars = InstsToScalarize.find(VF);
917     assert(Scalars != InstsToScalarize.end() &&
918            "VF not yet analyzed for scalarization profitability");
919     return Scalars->second.find(I) != Scalars->second.end();
920   }
921 
922   /// Returns true if \p I is known to be uniform after vectorization.
923   bool isUniformAfterVectorization(Instruction *I, unsigned VF) const {
924     if (VF == 1)
925       return true;
926 
927     // Cost model is not run in the VPlan-native path - return conservative
928     // result until this changes.
929     if (EnableVPlanNativePath)
930       return false;
931 
932     auto UniformsPerVF = Uniforms.find(VF);
933     assert(UniformsPerVF != Uniforms.end() &&
934            "VF not yet analyzed for uniformity");
935     return UniformsPerVF->second.find(I) != UniformsPerVF->second.end();
936   }
937 
938   /// Returns true if \p I is known to be scalar after vectorization.
939   bool isScalarAfterVectorization(Instruction *I, unsigned VF) const {
940     if (VF == 1)
941       return true;
942 
943     // Cost model is not run in the VPlan-native path - return conservative
944     // result until this changes.
945     if (EnableVPlanNativePath)
946       return false;
947 
948     auto ScalarsPerVF = Scalars.find(VF);
949     assert(ScalarsPerVF != Scalars.end() &&
950            "Scalar values are not calculated for VF");
951     return ScalarsPerVF->second.find(I) != ScalarsPerVF->second.end();
952   }
953 
954   /// \returns True if instruction \p I can be truncated to a smaller bitwidth
955   /// for vectorization factor \p VF.
956   bool canTruncateToMinimalBitwidth(Instruction *I, unsigned VF) const {
957     return VF > 1 && MinBWs.find(I) != MinBWs.end() &&
958            !isProfitableToScalarize(I, VF) &&
959            !isScalarAfterVectorization(I, VF);
960   }
961 
962   /// Decision that was taken during cost calculation for memory instruction.
963   enum InstWidening {
964     CM_Unknown,
965     CM_Widen,         // For consecutive accesses with stride +1.
966     CM_Widen_Reverse, // For consecutive accesses with stride -1.
967     CM_Interleave,
968     CM_GatherScatter,
969     CM_Scalarize
970   };
971 
972   /// Save vectorization decision \p W and \p Cost taken by the cost model for
973   /// instruction \p I and vector width \p VF.
974   void setWideningDecision(Instruction *I, unsigned VF, InstWidening W,
975                            unsigned Cost) {
976     assert(VF >= 2 && "Expected VF >=2");
977     WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost);
978   }
979 
980   /// Save vectorization decision \p W and \p Cost taken by the cost model for
981   /// interleaving group \p Grp and vector width \p VF.
982   void setWideningDecision(const InterleaveGroup<Instruction> *Grp, unsigned VF,
983                            InstWidening W, unsigned Cost) {
984     assert(VF >= 2 && "Expected VF >=2");
985     /// Broadcast this decicion to all instructions inside the group.
986     /// But the cost will be assigned to one instruction only.
987     for (unsigned i = 0; i < Grp->getFactor(); ++i) {
988       if (auto *I = Grp->getMember(i)) {
989         if (Grp->getInsertPos() == I)
990           WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost);
991         else
992           WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, 0);
993       }
994     }
995   }
996 
997   /// Return the cost model decision for the given instruction \p I and vector
998   /// width \p VF. Return CM_Unknown if this instruction did not pass
999   /// through the cost modeling.
1000   InstWidening getWideningDecision(Instruction *I, unsigned VF) {
1001     assert(VF >= 2 && "Expected VF >=2");
1002 
1003     // Cost model is not run in the VPlan-native path - return conservative
1004     // result until this changes.
1005     if (EnableVPlanNativePath)
1006       return CM_GatherScatter;
1007 
1008     std::pair<Instruction *, unsigned> InstOnVF = std::make_pair(I, VF);
1009     auto Itr = WideningDecisions.find(InstOnVF);
1010     if (Itr == WideningDecisions.end())
1011       return CM_Unknown;
1012     return Itr->second.first;
1013   }
1014 
1015   /// Return the vectorization cost for the given instruction \p I and vector
1016   /// width \p VF.
1017   unsigned getWideningCost(Instruction *I, unsigned VF) {
1018     assert(VF >= 2 && "Expected VF >=2");
1019     std::pair<Instruction *, unsigned> InstOnVF = std::make_pair(I, VF);
1020     assert(WideningDecisions.find(InstOnVF) != WideningDecisions.end() &&
1021            "The cost is not calculated");
1022     return WideningDecisions[InstOnVF].second;
1023   }
1024 
1025   /// Return True if instruction \p I is an optimizable truncate whose operand
1026   /// is an induction variable. Such a truncate will be removed by adding a new
1027   /// induction variable with the destination type.
1028   bool isOptimizableIVTruncate(Instruction *I, unsigned VF) {
1029     // If the instruction is not a truncate, return false.
1030     auto *Trunc = dyn_cast<TruncInst>(I);
1031     if (!Trunc)
1032       return false;
1033 
1034     // Get the source and destination types of the truncate.
1035     Type *SrcTy = ToVectorTy(cast<CastInst>(I)->getSrcTy(), VF);
1036     Type *DestTy = ToVectorTy(cast<CastInst>(I)->getDestTy(), VF);
1037 
1038     // If the truncate is free for the given types, return false. Replacing a
1039     // free truncate with an induction variable would add an induction variable
1040     // update instruction to each iteration of the loop. We exclude from this
1041     // check the primary induction variable since it will need an update
1042     // instruction regardless.
1043     Value *Op = Trunc->getOperand(0);
1044     if (Op != Legal->getPrimaryInduction() && TTI.isTruncateFree(SrcTy, DestTy))
1045       return false;
1046 
1047     // If the truncated value is not an induction variable, return false.
1048     return Legal->isInductionPhi(Op);
1049   }
1050 
1051   /// Collects the instructions to scalarize for each predicated instruction in
1052   /// the loop.
1053   void collectInstsToScalarize(unsigned VF);
1054 
1055   /// Collect Uniform and Scalar values for the given \p VF.
1056   /// The sets depend on CM decision for Load/Store instructions
1057   /// that may be vectorized as interleave, gather-scatter or scalarized.
1058   void collectUniformsAndScalars(unsigned VF) {
1059     // Do the analysis once.
1060     if (VF == 1 || Uniforms.find(VF) != Uniforms.end())
1061       return;
1062     setCostBasedWideningDecision(VF);
1063     collectLoopUniforms(VF);
1064     collectLoopScalars(VF);
1065   }
1066 
1067   /// Returns true if the target machine supports masked store operation
1068   /// for the given \p DataType and kind of access to \p Ptr.
1069   bool isLegalMaskedStore(Type *DataType, Value *Ptr) {
1070     return Legal->isConsecutivePtr(Ptr) && TTI.isLegalMaskedStore(DataType);
1071   }
1072 
1073   /// Returns true if the target machine supports masked load operation
1074   /// for the given \p DataType and kind of access to \p Ptr.
1075   bool isLegalMaskedLoad(Type *DataType, Value *Ptr) {
1076     return Legal->isConsecutivePtr(Ptr) && TTI.isLegalMaskedLoad(DataType);
1077   }
1078 
1079   /// Returns true if the target machine supports masked scatter operation
1080   /// for the given \p DataType.
1081   bool isLegalMaskedScatter(Type *DataType) {
1082     return TTI.isLegalMaskedScatter(DataType);
1083   }
1084 
1085   /// Returns true if the target machine supports masked gather operation
1086   /// for the given \p DataType.
1087   bool isLegalMaskedGather(Type *DataType) {
1088     return TTI.isLegalMaskedGather(DataType);
1089   }
1090 
1091   /// Returns true if the target machine can represent \p V as a masked gather
1092   /// or scatter operation.
1093   bool isLegalGatherOrScatter(Value *V) {
1094     bool LI = isa<LoadInst>(V);
1095     bool SI = isa<StoreInst>(V);
1096     if (!LI && !SI)
1097       return false;
1098     auto *Ty = getMemInstValueType(V);
1099     return (LI && isLegalMaskedGather(Ty)) || (SI && isLegalMaskedScatter(Ty));
1100   }
1101 
1102   /// Returns true if \p I is an instruction that will be scalarized with
1103   /// predication. Such instructions include conditional stores and
1104   /// instructions that may divide by zero.
1105   /// If a non-zero VF has been calculated, we check if I will be scalarized
1106   /// predication for that VF.
1107   bool isScalarWithPredication(Instruction *I, unsigned VF = 1);
1108 
1109   // Returns true if \p I is an instruction that will be predicated either
1110   // through scalar predication or masked load/store or masked gather/scatter.
1111   // Superset of instructions that return true for isScalarWithPredication.
1112   bool isPredicatedInst(Instruction *I) {
1113     if (!blockNeedsPredication(I->getParent()))
1114       return false;
1115     // Loads and stores that need some form of masked operation are predicated
1116     // instructions.
1117     if (isa<LoadInst>(I) || isa<StoreInst>(I))
1118       return Legal->isMaskRequired(I);
1119     return isScalarWithPredication(I);
1120   }
1121 
1122   /// Returns true if \p I is a memory instruction with consecutive memory
1123   /// access that can be widened.
1124   bool memoryInstructionCanBeWidened(Instruction *I, unsigned VF = 1);
1125 
1126   /// Returns true if \p I is a memory instruction in an interleaved-group
1127   /// of memory accesses that can be vectorized with wide vector loads/stores
1128   /// and shuffles.
1129   bool interleavedAccessCanBeWidened(Instruction *I, unsigned VF = 1);
1130 
1131   /// Check if \p Instr belongs to any interleaved access group.
1132   bool isAccessInterleaved(Instruction *Instr) {
1133     return InterleaveInfo.isInterleaved(Instr);
1134   }
1135 
1136   /// Get the interleaved access group that \p Instr belongs to.
1137   const InterleaveGroup<Instruction> *
1138   getInterleavedAccessGroup(Instruction *Instr) {
1139     return InterleaveInfo.getInterleaveGroup(Instr);
1140   }
1141 
1142   /// Returns true if an interleaved group requires a scalar iteration
1143   /// to handle accesses with gaps, and there is nothing preventing us from
1144   /// creating a scalar epilogue.
1145   bool requiresScalarEpilogue() const {
1146     return IsScalarEpilogueAllowed && InterleaveInfo.requiresScalarEpilogue();
1147   }
1148 
1149   /// Returns true if a scalar epilogue is not allowed due to optsize.
1150   bool isScalarEpilogueAllowed() const { return IsScalarEpilogueAllowed; }
1151 
1152   /// Returns true if all loop blocks should be masked to fold tail loop.
1153   bool foldTailByMasking() const { return FoldTailByMasking; }
1154 
1155   bool blockNeedsPredication(BasicBlock *BB) {
1156     return foldTailByMasking() || Legal->blockNeedsPredication(BB);
1157   }
1158 
1159 private:
1160   unsigned NumPredStores = 0;
1161 
1162   /// \return An upper bound for the vectorization factor, larger than zero.
1163   /// One is returned if vectorization should best be avoided due to cost.
1164   unsigned computeFeasibleMaxVF(bool OptForSize, unsigned ConstTripCount);
1165 
1166   /// The vectorization cost is a combination of the cost itself and a boolean
1167   /// indicating whether any of the contributing operations will actually
1168   /// operate on
1169   /// vector values after type legalization in the backend. If this latter value
1170   /// is
1171   /// false, then all operations will be scalarized (i.e. no vectorization has
1172   /// actually taken place).
1173   using VectorizationCostTy = std::pair<unsigned, bool>;
1174 
1175   /// Returns the expected execution cost. The unit of the cost does
1176   /// not matter because we use the 'cost' units to compare different
1177   /// vector widths. The cost that is returned is *not* normalized by
1178   /// the factor width.
1179   VectorizationCostTy expectedCost(unsigned VF);
1180 
1181   /// Returns the execution time cost of an instruction for a given vector
1182   /// width. Vector width of one means scalar.
1183   VectorizationCostTy getInstructionCost(Instruction *I, unsigned VF);
1184 
1185   /// The cost-computation logic from getInstructionCost which provides
1186   /// the vector type as an output parameter.
1187   unsigned getInstructionCost(Instruction *I, unsigned VF, Type *&VectorTy);
1188 
1189   /// Calculate vectorization cost of memory instruction \p I.
1190   unsigned getMemoryInstructionCost(Instruction *I, unsigned VF);
1191 
1192   /// The cost computation for scalarized memory instruction.
1193   unsigned getMemInstScalarizationCost(Instruction *I, unsigned VF);
1194 
1195   /// The cost computation for interleaving group of memory instructions.
1196   unsigned getInterleaveGroupCost(Instruction *I, unsigned VF);
1197 
1198   /// The cost computation for Gather/Scatter instruction.
1199   unsigned getGatherScatterCost(Instruction *I, unsigned VF);
1200 
1201   /// The cost computation for widening instruction \p I with consecutive
1202   /// memory access.
1203   unsigned getConsecutiveMemOpCost(Instruction *I, unsigned VF);
1204 
1205   /// The cost calculation for Load/Store instruction \p I with uniform pointer -
1206   /// Load: scalar load + broadcast.
1207   /// Store: scalar store + (loop invariant value stored? 0 : extract of last
1208   /// element)
1209   unsigned getUniformMemOpCost(Instruction *I, unsigned VF);
1210 
1211   /// Returns whether the instruction is a load or store and will be a emitted
1212   /// as a vector operation.
1213   bool isConsecutiveLoadOrStore(Instruction *I);
1214 
1215   /// Returns true if an artificially high cost for emulated masked memrefs
1216   /// should be used.
1217   bool useEmulatedMaskMemRefHack(Instruction *I);
1218 
1219   /// Create an analysis remark that explains why vectorization failed
1220   ///
1221   /// \p RemarkName is the identifier for the remark.  \return the remark object
1222   /// that can be streamed to.
1223   OptimizationRemarkAnalysis createMissedAnalysis(StringRef RemarkName) {
1224     return createLVMissedAnalysis(Hints->vectorizeAnalysisPassName(),
1225                                   RemarkName, TheLoop);
1226   }
1227 
1228   /// Map of scalar integer values to the smallest bitwidth they can be legally
1229   /// represented as. The vector equivalents of these values should be truncated
1230   /// to this type.
1231   MapVector<Instruction *, uint64_t> MinBWs;
1232 
1233   /// A type representing the costs for instructions if they were to be
1234   /// scalarized rather than vectorized. The entries are Instruction-Cost
1235   /// pairs.
1236   using ScalarCostsTy = DenseMap<Instruction *, unsigned>;
1237 
1238   /// A set containing all BasicBlocks that are known to present after
1239   /// vectorization as a predicated block.
1240   SmallPtrSet<BasicBlock *, 4> PredicatedBBsAfterVectorization;
1241 
1242   /// Records whether it is allowed to have the original scalar loop execute at
1243   /// least once. This may be needed as a fallback loop in case runtime
1244   /// aliasing/dependence checks fail, or to handle the tail/remainder
1245   /// iterations when the trip count is unknown or doesn't divide by the VF,
1246   /// or as a peel-loop to handle gaps in interleave-groups.
1247   /// Under optsize and when the trip count is very small we don't allow any
1248   /// iterations to execute in the scalar loop.
1249   bool IsScalarEpilogueAllowed = true;
1250 
1251   /// All blocks of loop are to be masked to fold tail of scalar iterations.
1252   bool FoldTailByMasking = false;
1253 
1254   /// A map holding scalar costs for different vectorization factors. The
1255   /// presence of a cost for an instruction in the mapping indicates that the
1256   /// instruction will be scalarized when vectorizing with the associated
1257   /// vectorization factor. The entries are VF-ScalarCostTy pairs.
1258   DenseMap<unsigned, ScalarCostsTy> InstsToScalarize;
1259 
1260   /// Holds the instructions known to be uniform after vectorization.
1261   /// The data is collected per VF.
1262   DenseMap<unsigned, SmallPtrSet<Instruction *, 4>> Uniforms;
1263 
1264   /// Holds the instructions known to be scalar after vectorization.
1265   /// The data is collected per VF.
1266   DenseMap<unsigned, SmallPtrSet<Instruction *, 4>> Scalars;
1267 
1268   /// Holds the instructions (address computations) that are forced to be
1269   /// scalarized.
1270   DenseMap<unsigned, SmallPtrSet<Instruction *, 4>> ForcedScalars;
1271 
1272   /// Returns the expected difference in cost from scalarizing the expression
1273   /// feeding a predicated instruction \p PredInst. The instructions to
1274   /// scalarize and their scalar costs are collected in \p ScalarCosts. A
1275   /// non-negative return value implies the expression will be scalarized.
1276   /// Currently, only single-use chains are considered for scalarization.
1277   int computePredInstDiscount(Instruction *PredInst, ScalarCostsTy &ScalarCosts,
1278                               unsigned VF);
1279 
1280   /// Collect the instructions that are uniform after vectorization. An
1281   /// instruction is uniform if we represent it with a single scalar value in
1282   /// the vectorized loop corresponding to each vector iteration. Examples of
1283   /// uniform instructions include pointer operands of consecutive or
1284   /// interleaved memory accesses. Note that although uniformity implies an
1285   /// instruction will be scalar, the reverse is not true. In general, a
1286   /// scalarized instruction will be represented by VF scalar values in the
1287   /// vectorized loop, each corresponding to an iteration of the original
1288   /// scalar loop.
1289   void collectLoopUniforms(unsigned VF);
1290 
1291   /// Collect the instructions that are scalar after vectorization. An
1292   /// instruction is scalar if it is known to be uniform or will be scalarized
1293   /// during vectorization. Non-uniform scalarized instructions will be
1294   /// represented by VF values in the vectorized loop, each corresponding to an
1295   /// iteration of the original scalar loop.
1296   void collectLoopScalars(unsigned VF);
1297 
1298   /// Keeps cost model vectorization decision and cost for instructions.
1299   /// Right now it is used for memory instructions only.
1300   using DecisionList = DenseMap<std::pair<Instruction *, unsigned>,
1301                                 std::pair<InstWidening, unsigned>>;
1302 
1303   DecisionList WideningDecisions;
1304 
1305 public:
1306   /// The loop that we evaluate.
1307   Loop *TheLoop;
1308 
1309   /// Predicated scalar evolution analysis.
1310   PredicatedScalarEvolution &PSE;
1311 
1312   /// Loop Info analysis.
1313   LoopInfo *LI;
1314 
1315   /// Vectorization legality.
1316   LoopVectorizationLegality *Legal;
1317 
1318   /// Vector target information.
1319   const TargetTransformInfo &TTI;
1320 
1321   /// Target Library Info.
1322   const TargetLibraryInfo *TLI;
1323 
1324   /// Demanded bits analysis.
1325   DemandedBits *DB;
1326 
1327   /// Assumption cache.
1328   AssumptionCache *AC;
1329 
1330   /// Interface to emit optimization remarks.
1331   OptimizationRemarkEmitter *ORE;
1332 
1333   const Function *TheFunction;
1334 
1335   /// Loop Vectorize Hint.
1336   const LoopVectorizeHints *Hints;
1337 
1338   /// The interleave access information contains groups of interleaved accesses
1339   /// with the same stride and close to each other.
1340   InterleavedAccessInfo &InterleaveInfo;
1341 
1342   /// Values to ignore in the cost model.
1343   SmallPtrSet<const Value *, 16> ValuesToIgnore;
1344 
1345   /// Values to ignore in the cost model when VF > 1.
1346   SmallPtrSet<const Value *, 16> VecValuesToIgnore;
1347 };
1348 
1349 } // end namespace llvm
1350 
1351 // Return true if \p OuterLp is an outer loop annotated with hints for explicit
1352 // vectorization. The loop needs to be annotated with #pragma omp simd
1353 // simdlen(#) or #pragma clang vectorize(enable) vectorize_width(#). If the
1354 // vector length information is not provided, vectorization is not considered
1355 // explicit. Interleave hints are not allowed either. These limitations will be
1356 // relaxed in the future.
1357 // Please, note that we are currently forced to abuse the pragma 'clang
1358 // vectorize' semantics. This pragma provides *auto-vectorization hints*
1359 // (i.e., LV must check that vectorization is legal) whereas pragma 'omp simd'
1360 // provides *explicit vectorization hints* (LV can bypass legal checks and
1361 // assume that vectorization is legal). However, both hints are implemented
1362 // using the same metadata (llvm.loop.vectorize, processed by
1363 // LoopVectorizeHints). This will be fixed in the future when the native IR
1364 // representation for pragma 'omp simd' is introduced.
1365 static bool isExplicitVecOuterLoop(Loop *OuterLp,
1366                                    OptimizationRemarkEmitter *ORE) {
1367   assert(!OuterLp->empty() && "This is not an outer loop");
1368   LoopVectorizeHints Hints(OuterLp, true /*DisableInterleaving*/, *ORE);
1369 
1370   // Only outer loops with an explicit vectorization hint are supported.
1371   // Unannotated outer loops are ignored.
1372   if (Hints.getForce() == LoopVectorizeHints::FK_Undefined)
1373     return false;
1374 
1375   Function *Fn = OuterLp->getHeader()->getParent();
1376   if (!Hints.allowVectorization(Fn, OuterLp,
1377                                 true /*VectorizeOnlyWhenForced*/)) {
1378     LLVM_DEBUG(dbgs() << "LV: Loop hints prevent outer loop vectorization.\n");
1379     return false;
1380   }
1381 
1382   if (!Hints.getWidth()) {
1383     LLVM_DEBUG(dbgs() << "LV: Not vectorizing: No user vector width.\n");
1384     Hints.emitRemarkWithHints();
1385     return false;
1386   }
1387 
1388   if (Hints.getInterleave() > 1) {
1389     // TODO: Interleave support is future work.
1390     LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Interleave is not supported for "
1391                          "outer loops.\n");
1392     Hints.emitRemarkWithHints();
1393     return false;
1394   }
1395 
1396   return true;
1397 }
1398 
1399 static void collectSupportedLoops(Loop &L, LoopInfo *LI,
1400                                   OptimizationRemarkEmitter *ORE,
1401                                   SmallVectorImpl<Loop *> &V) {
1402   // Collect inner loops and outer loops without irreducible control flow. For
1403   // now, only collect outer loops that have explicit vectorization hints. If we
1404   // are stress testing the VPlan H-CFG construction, we collect the outermost
1405   // loop of every loop nest.
1406   if (L.empty() || VPlanBuildStressTest ||
1407       (EnableVPlanNativePath && isExplicitVecOuterLoop(&L, ORE))) {
1408     LoopBlocksRPO RPOT(&L);
1409     RPOT.perform(LI);
1410     if (!containsIrreducibleCFG<const BasicBlock *>(RPOT, *LI)) {
1411       V.push_back(&L);
1412       // TODO: Collect inner loops inside marked outer loops in case
1413       // vectorization fails for the outer loop. Do not invoke
1414       // 'containsIrreducibleCFG' again for inner loops when the outer loop is
1415       // already known to be reducible. We can use an inherited attribute for
1416       // that.
1417       return;
1418     }
1419   }
1420   for (Loop *InnerL : L)
1421     collectSupportedLoops(*InnerL, LI, ORE, V);
1422 }
1423 
1424 namespace {
1425 
1426 /// The LoopVectorize Pass.
1427 struct LoopVectorize : public FunctionPass {
1428   /// Pass identification, replacement for typeid
1429   static char ID;
1430 
1431   LoopVectorizePass Impl;
1432 
1433   explicit LoopVectorize(bool InterleaveOnlyWhenForced = false,
1434                          bool VectorizeOnlyWhenForced = false)
1435       : FunctionPass(ID) {
1436     Impl.InterleaveOnlyWhenForced = InterleaveOnlyWhenForced;
1437     Impl.VectorizeOnlyWhenForced = VectorizeOnlyWhenForced;
1438     initializeLoopVectorizePass(*PassRegistry::getPassRegistry());
1439   }
1440 
1441   bool runOnFunction(Function &F) override {
1442     if (skipFunction(F))
1443       return false;
1444 
1445     auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
1446     auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
1447     auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
1448     auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
1449     auto *BFI = &getAnalysis<BlockFrequencyInfoWrapperPass>().getBFI();
1450     auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
1451     auto *TLI = TLIP ? &TLIP->getTLI() : nullptr;
1452     auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
1453     auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
1454     auto *LAA = &getAnalysis<LoopAccessLegacyAnalysis>();
1455     auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits();
1456     auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE();
1457 
1458     std::function<const LoopAccessInfo &(Loop &)> GetLAA =
1459         [&](Loop &L) -> const LoopAccessInfo & { return LAA->getInfo(&L); };
1460 
1461     return Impl.runImpl(F, *SE, *LI, *TTI, *DT, *BFI, TLI, *DB, *AA, *AC,
1462                         GetLAA, *ORE);
1463   }
1464 
1465   void getAnalysisUsage(AnalysisUsage &AU) const override {
1466     AU.addRequired<AssumptionCacheTracker>();
1467     AU.addRequired<BlockFrequencyInfoWrapperPass>();
1468     AU.addRequired<DominatorTreeWrapperPass>();
1469     AU.addRequired<LoopInfoWrapperPass>();
1470     AU.addRequired<ScalarEvolutionWrapperPass>();
1471     AU.addRequired<TargetTransformInfoWrapperPass>();
1472     AU.addRequired<AAResultsWrapperPass>();
1473     AU.addRequired<LoopAccessLegacyAnalysis>();
1474     AU.addRequired<DemandedBitsWrapperPass>();
1475     AU.addRequired<OptimizationRemarkEmitterWrapperPass>();
1476 
1477     // We currently do not preserve loopinfo/dominator analyses with outer loop
1478     // vectorization. Until this is addressed, mark these analyses as preserved
1479     // only for non-VPlan-native path.
1480     // TODO: Preserve Loop and Dominator analyses for VPlan-native path.
1481     if (!EnableVPlanNativePath) {
1482       AU.addPreserved<LoopInfoWrapperPass>();
1483       AU.addPreserved<DominatorTreeWrapperPass>();
1484     }
1485 
1486     AU.addPreserved<BasicAAWrapperPass>();
1487     AU.addPreserved<GlobalsAAWrapperPass>();
1488   }
1489 };
1490 
1491 } // end anonymous namespace
1492 
1493 //===----------------------------------------------------------------------===//
1494 // Implementation of LoopVectorizationLegality, InnerLoopVectorizer and
1495 // LoopVectorizationCostModel and LoopVectorizationPlanner.
1496 //===----------------------------------------------------------------------===//
1497 
1498 Value *InnerLoopVectorizer::getBroadcastInstrs(Value *V) {
1499   // We need to place the broadcast of invariant variables outside the loop,
1500   // but only if it's proven safe to do so. Else, broadcast will be inside
1501   // vector loop body.
1502   Instruction *Instr = dyn_cast<Instruction>(V);
1503   bool SafeToHoist = OrigLoop->isLoopInvariant(V) &&
1504                      (!Instr ||
1505                       DT->dominates(Instr->getParent(), LoopVectorPreHeader));
1506   // Place the code for broadcasting invariant variables in the new preheader.
1507   IRBuilder<>::InsertPointGuard Guard(Builder);
1508   if (SafeToHoist)
1509     Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
1510 
1511   // Broadcast the scalar into all locations in the vector.
1512   Value *Shuf = Builder.CreateVectorSplat(VF, V, "broadcast");
1513 
1514   return Shuf;
1515 }
1516 
1517 void InnerLoopVectorizer::createVectorIntOrFpInductionPHI(
1518     const InductionDescriptor &II, Value *Step, Instruction *EntryVal) {
1519   assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) &&
1520          "Expected either an induction phi-node or a truncate of it!");
1521   Value *Start = II.getStartValue();
1522 
1523   // Construct the initial value of the vector IV in the vector loop preheader
1524   auto CurrIP = Builder.saveIP();
1525   Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
1526   if (isa<TruncInst>(EntryVal)) {
1527     assert(Start->getType()->isIntegerTy() &&
1528            "Truncation requires an integer type");
1529     auto *TruncType = cast<IntegerType>(EntryVal->getType());
1530     Step = Builder.CreateTrunc(Step, TruncType);
1531     Start = Builder.CreateCast(Instruction::Trunc, Start, TruncType);
1532   }
1533   Value *SplatStart = Builder.CreateVectorSplat(VF, Start);
1534   Value *SteppedStart =
1535       getStepVector(SplatStart, 0, Step, II.getInductionOpcode());
1536 
1537   // We create vector phi nodes for both integer and floating-point induction
1538   // variables. Here, we determine the kind of arithmetic we will perform.
1539   Instruction::BinaryOps AddOp;
1540   Instruction::BinaryOps MulOp;
1541   if (Step->getType()->isIntegerTy()) {
1542     AddOp = Instruction::Add;
1543     MulOp = Instruction::Mul;
1544   } else {
1545     AddOp = II.getInductionOpcode();
1546     MulOp = Instruction::FMul;
1547   }
1548 
1549   // Multiply the vectorization factor by the step using integer or
1550   // floating-point arithmetic as appropriate.
1551   Value *ConstVF = getSignedIntOrFpConstant(Step->getType(), VF);
1552   Value *Mul = addFastMathFlag(Builder.CreateBinOp(MulOp, Step, ConstVF));
1553 
1554   // Create a vector splat to use in the induction update.
1555   //
1556   // FIXME: If the step is non-constant, we create the vector splat with
1557   //        IRBuilder. IRBuilder can constant-fold the multiply, but it doesn't
1558   //        handle a constant vector splat.
1559   Value *SplatVF = isa<Constant>(Mul)
1560                        ? ConstantVector::getSplat(VF, cast<Constant>(Mul))
1561                        : Builder.CreateVectorSplat(VF, Mul);
1562   Builder.restoreIP(CurrIP);
1563 
1564   // We may need to add the step a number of times, depending on the unroll
1565   // factor. The last of those goes into the PHI.
1566   PHINode *VecInd = PHINode::Create(SteppedStart->getType(), 2, "vec.ind",
1567                                     &*LoopVectorBody->getFirstInsertionPt());
1568   VecInd->setDebugLoc(EntryVal->getDebugLoc());
1569   Instruction *LastInduction = VecInd;
1570   for (unsigned Part = 0; Part < UF; ++Part) {
1571     VectorLoopValueMap.setVectorValue(EntryVal, Part, LastInduction);
1572 
1573     if (isa<TruncInst>(EntryVal))
1574       addMetadata(LastInduction, EntryVal);
1575     recordVectorLoopValueForInductionCast(II, EntryVal, LastInduction, Part);
1576 
1577     LastInduction = cast<Instruction>(addFastMathFlag(
1578         Builder.CreateBinOp(AddOp, LastInduction, SplatVF, "step.add")));
1579     LastInduction->setDebugLoc(EntryVal->getDebugLoc());
1580   }
1581 
1582   // Move the last step to the end of the latch block. This ensures consistent
1583   // placement of all induction updates.
1584   auto *LoopVectorLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch();
1585   auto *Br = cast<BranchInst>(LoopVectorLatch->getTerminator());
1586   auto *ICmp = cast<Instruction>(Br->getCondition());
1587   LastInduction->moveBefore(ICmp);
1588   LastInduction->setName("vec.ind.next");
1589 
1590   VecInd->addIncoming(SteppedStart, LoopVectorPreHeader);
1591   VecInd->addIncoming(LastInduction, LoopVectorLatch);
1592 }
1593 
1594 bool InnerLoopVectorizer::shouldScalarizeInstruction(Instruction *I) const {
1595   return Cost->isScalarAfterVectorization(I, VF) ||
1596          Cost->isProfitableToScalarize(I, VF);
1597 }
1598 
1599 bool InnerLoopVectorizer::needsScalarInduction(Instruction *IV) const {
1600   if (shouldScalarizeInstruction(IV))
1601     return true;
1602   auto isScalarInst = [&](User *U) -> bool {
1603     auto *I = cast<Instruction>(U);
1604     return (OrigLoop->contains(I) && shouldScalarizeInstruction(I));
1605   };
1606   return llvm::any_of(IV->users(), isScalarInst);
1607 }
1608 
1609 void InnerLoopVectorizer::recordVectorLoopValueForInductionCast(
1610     const InductionDescriptor &ID, const Instruction *EntryVal,
1611     Value *VectorLoopVal, unsigned Part, unsigned Lane) {
1612   assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) &&
1613          "Expected either an induction phi-node or a truncate of it!");
1614 
1615   // This induction variable is not the phi from the original loop but the
1616   // newly-created IV based on the proof that casted Phi is equal to the
1617   // uncasted Phi in the vectorized loop (under a runtime guard possibly). It
1618   // re-uses the same InductionDescriptor that original IV uses but we don't
1619   // have to do any recording in this case - that is done when original IV is
1620   // processed.
1621   if (isa<TruncInst>(EntryVal))
1622     return;
1623 
1624   const SmallVectorImpl<Instruction *> &Casts = ID.getCastInsts();
1625   if (Casts.empty())
1626     return;
1627   // Only the first Cast instruction in the Casts vector is of interest.
1628   // The rest of the Casts (if exist) have no uses outside the
1629   // induction update chain itself.
1630   Instruction *CastInst = *Casts.begin();
1631   if (Lane < UINT_MAX)
1632     VectorLoopValueMap.setScalarValue(CastInst, {Part, Lane}, VectorLoopVal);
1633   else
1634     VectorLoopValueMap.setVectorValue(CastInst, Part, VectorLoopVal);
1635 }
1636 
1637 void InnerLoopVectorizer::widenIntOrFpInduction(PHINode *IV, TruncInst *Trunc) {
1638   assert((IV->getType()->isIntegerTy() || IV != OldInduction) &&
1639          "Primary induction variable must have an integer type");
1640 
1641   auto II = Legal->getInductionVars()->find(IV);
1642   assert(II != Legal->getInductionVars()->end() && "IV is not an induction");
1643 
1644   auto ID = II->second;
1645   assert(IV->getType() == ID.getStartValue()->getType() && "Types must match");
1646 
1647   // The scalar value to broadcast. This will be derived from the canonical
1648   // induction variable.
1649   Value *ScalarIV = nullptr;
1650 
1651   // The value from the original loop to which we are mapping the new induction
1652   // variable.
1653   Instruction *EntryVal = Trunc ? cast<Instruction>(Trunc) : IV;
1654 
1655   // True if we have vectorized the induction variable.
1656   auto VectorizedIV = false;
1657 
1658   // Determine if we want a scalar version of the induction variable. This is
1659   // true if the induction variable itself is not widened, or if it has at
1660   // least one user in the loop that is not widened.
1661   auto NeedsScalarIV = VF > 1 && needsScalarInduction(EntryVal);
1662 
1663   // Generate code for the induction step. Note that induction steps are
1664   // required to be loop-invariant
1665   assert(PSE.getSE()->isLoopInvariant(ID.getStep(), OrigLoop) &&
1666          "Induction step should be loop invariant");
1667   auto &DL = OrigLoop->getHeader()->getModule()->getDataLayout();
1668   Value *Step = nullptr;
1669   if (PSE.getSE()->isSCEVable(IV->getType())) {
1670     SCEVExpander Exp(*PSE.getSE(), DL, "induction");
1671     Step = Exp.expandCodeFor(ID.getStep(), ID.getStep()->getType(),
1672                              LoopVectorPreHeader->getTerminator());
1673   } else {
1674     Step = cast<SCEVUnknown>(ID.getStep())->getValue();
1675   }
1676 
1677   // Try to create a new independent vector induction variable. If we can't
1678   // create the phi node, we will splat the scalar induction variable in each
1679   // loop iteration.
1680   if (VF > 1 && !shouldScalarizeInstruction(EntryVal)) {
1681     createVectorIntOrFpInductionPHI(ID, Step, EntryVal);
1682     VectorizedIV = true;
1683   }
1684 
1685   // If we haven't yet vectorized the induction variable, or if we will create
1686   // a scalar one, we need to define the scalar induction variable and step
1687   // values. If we were given a truncation type, truncate the canonical
1688   // induction variable and step. Otherwise, derive these values from the
1689   // induction descriptor.
1690   if (!VectorizedIV || NeedsScalarIV) {
1691     ScalarIV = Induction;
1692     if (IV != OldInduction) {
1693       ScalarIV = IV->getType()->isIntegerTy()
1694                      ? Builder.CreateSExtOrTrunc(Induction, IV->getType())
1695                      : Builder.CreateCast(Instruction::SIToFP, Induction,
1696                                           IV->getType());
1697       ScalarIV = emitTransformedIndex(Builder, ScalarIV, PSE.getSE(), DL, ID);
1698       ScalarIV->setName("offset.idx");
1699     }
1700     if (Trunc) {
1701       auto *TruncType = cast<IntegerType>(Trunc->getType());
1702       assert(Step->getType()->isIntegerTy() &&
1703              "Truncation requires an integer step");
1704       ScalarIV = Builder.CreateTrunc(ScalarIV, TruncType);
1705       Step = Builder.CreateTrunc(Step, TruncType);
1706     }
1707   }
1708 
1709   // If we haven't yet vectorized the induction variable, splat the scalar
1710   // induction variable, and build the necessary step vectors.
1711   // TODO: Don't do it unless the vectorized IV is really required.
1712   if (!VectorizedIV) {
1713     Value *Broadcasted = getBroadcastInstrs(ScalarIV);
1714     for (unsigned Part = 0; Part < UF; ++Part) {
1715       Value *EntryPart =
1716           getStepVector(Broadcasted, VF * Part, Step, ID.getInductionOpcode());
1717       VectorLoopValueMap.setVectorValue(EntryVal, Part, EntryPart);
1718       if (Trunc)
1719         addMetadata(EntryPart, Trunc);
1720       recordVectorLoopValueForInductionCast(ID, EntryVal, EntryPart, Part);
1721     }
1722   }
1723 
1724   // If an induction variable is only used for counting loop iterations or
1725   // calculating addresses, it doesn't need to be widened. Create scalar steps
1726   // that can be used by instructions we will later scalarize. Note that the
1727   // addition of the scalar steps will not increase the number of instructions
1728   // in the loop in the common case prior to InstCombine. We will be trading
1729   // one vector extract for each scalar step.
1730   if (NeedsScalarIV)
1731     buildScalarSteps(ScalarIV, Step, EntryVal, ID);
1732 }
1733 
1734 Value *InnerLoopVectorizer::getStepVector(Value *Val, int StartIdx, Value *Step,
1735                                           Instruction::BinaryOps BinOp) {
1736   // Create and check the types.
1737   assert(Val->getType()->isVectorTy() && "Must be a vector");
1738   int VLen = Val->getType()->getVectorNumElements();
1739 
1740   Type *STy = Val->getType()->getScalarType();
1741   assert((STy->isIntegerTy() || STy->isFloatingPointTy()) &&
1742          "Induction Step must be an integer or FP");
1743   assert(Step->getType() == STy && "Step has wrong type");
1744 
1745   SmallVector<Constant *, 8> Indices;
1746 
1747   if (STy->isIntegerTy()) {
1748     // Create a vector of consecutive numbers from zero to VF.
1749     for (int i = 0; i < VLen; ++i)
1750       Indices.push_back(ConstantInt::get(STy, StartIdx + i));
1751 
1752     // Add the consecutive indices to the vector value.
1753     Constant *Cv = ConstantVector::get(Indices);
1754     assert(Cv->getType() == Val->getType() && "Invalid consecutive vec");
1755     Step = Builder.CreateVectorSplat(VLen, Step);
1756     assert(Step->getType() == Val->getType() && "Invalid step vec");
1757     // FIXME: The newly created binary instructions should contain nsw/nuw flags,
1758     // which can be found from the original scalar operations.
1759     Step = Builder.CreateMul(Cv, Step);
1760     return Builder.CreateAdd(Val, Step, "induction");
1761   }
1762 
1763   // Floating point induction.
1764   assert((BinOp == Instruction::FAdd || BinOp == Instruction::FSub) &&
1765          "Binary Opcode should be specified for FP induction");
1766   // Create a vector of consecutive numbers from zero to VF.
1767   for (int i = 0; i < VLen; ++i)
1768     Indices.push_back(ConstantFP::get(STy, (double)(StartIdx + i)));
1769 
1770   // Add the consecutive indices to the vector value.
1771   Constant *Cv = ConstantVector::get(Indices);
1772 
1773   Step = Builder.CreateVectorSplat(VLen, Step);
1774 
1775   // Floating point operations had to be 'fast' to enable the induction.
1776   FastMathFlags Flags;
1777   Flags.setFast();
1778 
1779   Value *MulOp = Builder.CreateFMul(Cv, Step);
1780   if (isa<Instruction>(MulOp))
1781     // Have to check, MulOp may be a constant
1782     cast<Instruction>(MulOp)->setFastMathFlags(Flags);
1783 
1784   Value *BOp = Builder.CreateBinOp(BinOp, Val, MulOp, "induction");
1785   if (isa<Instruction>(BOp))
1786     cast<Instruction>(BOp)->setFastMathFlags(Flags);
1787   return BOp;
1788 }
1789 
1790 void InnerLoopVectorizer::buildScalarSteps(Value *ScalarIV, Value *Step,
1791                                            Instruction *EntryVal,
1792                                            const InductionDescriptor &ID) {
1793   // We shouldn't have to build scalar steps if we aren't vectorizing.
1794   assert(VF > 1 && "VF should be greater than one");
1795 
1796   // Get the value type and ensure it and the step have the same integer type.
1797   Type *ScalarIVTy = ScalarIV->getType()->getScalarType();
1798   assert(ScalarIVTy == Step->getType() &&
1799          "Val and Step should have the same type");
1800 
1801   // We build scalar steps for both integer and floating-point induction
1802   // variables. Here, we determine the kind of arithmetic we will perform.
1803   Instruction::BinaryOps AddOp;
1804   Instruction::BinaryOps MulOp;
1805   if (ScalarIVTy->isIntegerTy()) {
1806     AddOp = Instruction::Add;
1807     MulOp = Instruction::Mul;
1808   } else {
1809     AddOp = ID.getInductionOpcode();
1810     MulOp = Instruction::FMul;
1811   }
1812 
1813   // Determine the number of scalars we need to generate for each unroll
1814   // iteration. If EntryVal is uniform, we only need to generate the first
1815   // lane. Otherwise, we generate all VF values.
1816   unsigned Lanes =
1817       Cost->isUniformAfterVectorization(cast<Instruction>(EntryVal), VF) ? 1
1818                                                                          : VF;
1819   // Compute the scalar steps and save the results in VectorLoopValueMap.
1820   for (unsigned Part = 0; Part < UF; ++Part) {
1821     for (unsigned Lane = 0; Lane < Lanes; ++Lane) {
1822       auto *StartIdx = getSignedIntOrFpConstant(ScalarIVTy, VF * Part + Lane);
1823       auto *Mul = addFastMathFlag(Builder.CreateBinOp(MulOp, StartIdx, Step));
1824       auto *Add = addFastMathFlag(Builder.CreateBinOp(AddOp, ScalarIV, Mul));
1825       VectorLoopValueMap.setScalarValue(EntryVal, {Part, Lane}, Add);
1826       recordVectorLoopValueForInductionCast(ID, EntryVal, Add, Part, Lane);
1827     }
1828   }
1829 }
1830 
1831 Value *InnerLoopVectorizer::getOrCreateVectorValue(Value *V, unsigned Part) {
1832   assert(V != Induction && "The new induction variable should not be used.");
1833   assert(!V->getType()->isVectorTy() && "Can't widen a vector");
1834   assert(!V->getType()->isVoidTy() && "Type does not produce a value");
1835 
1836   // If we have a stride that is replaced by one, do it here. Defer this for
1837   // the VPlan-native path until we start running Legal checks in that path.
1838   if (!EnableVPlanNativePath && Legal->hasStride(V))
1839     V = ConstantInt::get(V->getType(), 1);
1840 
1841   // If we have a vector mapped to this value, return it.
1842   if (VectorLoopValueMap.hasVectorValue(V, Part))
1843     return VectorLoopValueMap.getVectorValue(V, Part);
1844 
1845   // If the value has not been vectorized, check if it has been scalarized
1846   // instead. If it has been scalarized, and we actually need the value in
1847   // vector form, we will construct the vector values on demand.
1848   if (VectorLoopValueMap.hasAnyScalarValue(V)) {
1849     Value *ScalarValue = VectorLoopValueMap.getScalarValue(V, {Part, 0});
1850 
1851     // If we've scalarized a value, that value should be an instruction.
1852     auto *I = cast<Instruction>(V);
1853 
1854     // If we aren't vectorizing, we can just copy the scalar map values over to
1855     // the vector map.
1856     if (VF == 1) {
1857       VectorLoopValueMap.setVectorValue(V, Part, ScalarValue);
1858       return ScalarValue;
1859     }
1860 
1861     // Get the last scalar instruction we generated for V and Part. If the value
1862     // is known to be uniform after vectorization, this corresponds to lane zero
1863     // of the Part unroll iteration. Otherwise, the last instruction is the one
1864     // we created for the last vector lane of the Part unroll iteration.
1865     unsigned LastLane = Cost->isUniformAfterVectorization(I, VF) ? 0 : VF - 1;
1866     auto *LastInst = cast<Instruction>(
1867         VectorLoopValueMap.getScalarValue(V, {Part, LastLane}));
1868 
1869     // Set the insert point after the last scalarized instruction. This ensures
1870     // the insertelement sequence will directly follow the scalar definitions.
1871     auto OldIP = Builder.saveIP();
1872     auto NewIP = std::next(BasicBlock::iterator(LastInst));
1873     Builder.SetInsertPoint(&*NewIP);
1874 
1875     // However, if we are vectorizing, we need to construct the vector values.
1876     // If the value is known to be uniform after vectorization, we can just
1877     // broadcast the scalar value corresponding to lane zero for each unroll
1878     // iteration. Otherwise, we construct the vector values using insertelement
1879     // instructions. Since the resulting vectors are stored in
1880     // VectorLoopValueMap, we will only generate the insertelements once.
1881     Value *VectorValue = nullptr;
1882     if (Cost->isUniformAfterVectorization(I, VF)) {
1883       VectorValue = getBroadcastInstrs(ScalarValue);
1884       VectorLoopValueMap.setVectorValue(V, Part, VectorValue);
1885     } else {
1886       // Initialize packing with insertelements to start from undef.
1887       Value *Undef = UndefValue::get(VectorType::get(V->getType(), VF));
1888       VectorLoopValueMap.setVectorValue(V, Part, Undef);
1889       for (unsigned Lane = 0; Lane < VF; ++Lane)
1890         packScalarIntoVectorValue(V, {Part, Lane});
1891       VectorValue = VectorLoopValueMap.getVectorValue(V, Part);
1892     }
1893     Builder.restoreIP(OldIP);
1894     return VectorValue;
1895   }
1896 
1897   // If this scalar is unknown, assume that it is a constant or that it is
1898   // loop invariant. Broadcast V and save the value for future uses.
1899   Value *B = getBroadcastInstrs(V);
1900   VectorLoopValueMap.setVectorValue(V, Part, B);
1901   return B;
1902 }
1903 
1904 Value *
1905 InnerLoopVectorizer::getOrCreateScalarValue(Value *V,
1906                                             const VPIteration &Instance) {
1907   // If the value is not an instruction contained in the loop, it should
1908   // already be scalar.
1909   if (OrigLoop->isLoopInvariant(V))
1910     return V;
1911 
1912   assert(Instance.Lane > 0
1913              ? !Cost->isUniformAfterVectorization(cast<Instruction>(V), VF)
1914              : true && "Uniform values only have lane zero");
1915 
1916   // If the value from the original loop has not been vectorized, it is
1917   // represented by UF x VF scalar values in the new loop. Return the requested
1918   // scalar value.
1919   if (VectorLoopValueMap.hasScalarValue(V, Instance))
1920     return VectorLoopValueMap.getScalarValue(V, Instance);
1921 
1922   // If the value has not been scalarized, get its entry in VectorLoopValueMap
1923   // for the given unroll part. If this entry is not a vector type (i.e., the
1924   // vectorization factor is one), there is no need to generate an
1925   // extractelement instruction.
1926   auto *U = getOrCreateVectorValue(V, Instance.Part);
1927   if (!U->getType()->isVectorTy()) {
1928     assert(VF == 1 && "Value not scalarized has non-vector type");
1929     return U;
1930   }
1931 
1932   // Otherwise, the value from the original loop has been vectorized and is
1933   // represented by UF vector values. Extract and return the requested scalar
1934   // value from the appropriate vector lane.
1935   return Builder.CreateExtractElement(U, Builder.getInt32(Instance.Lane));
1936 }
1937 
1938 void InnerLoopVectorizer::packScalarIntoVectorValue(
1939     Value *V, const VPIteration &Instance) {
1940   assert(V != Induction && "The new induction variable should not be used.");
1941   assert(!V->getType()->isVectorTy() && "Can't pack a vector");
1942   assert(!V->getType()->isVoidTy() && "Type does not produce a value");
1943 
1944   Value *ScalarInst = VectorLoopValueMap.getScalarValue(V, Instance);
1945   Value *VectorValue = VectorLoopValueMap.getVectorValue(V, Instance.Part);
1946   VectorValue = Builder.CreateInsertElement(VectorValue, ScalarInst,
1947                                             Builder.getInt32(Instance.Lane));
1948   VectorLoopValueMap.resetVectorValue(V, Instance.Part, VectorValue);
1949 }
1950 
1951 Value *InnerLoopVectorizer::reverseVector(Value *Vec) {
1952   assert(Vec->getType()->isVectorTy() && "Invalid type");
1953   SmallVector<Constant *, 8> ShuffleMask;
1954   for (unsigned i = 0; i < VF; ++i)
1955     ShuffleMask.push_back(Builder.getInt32(VF - i - 1));
1956 
1957   return Builder.CreateShuffleVector(Vec, UndefValue::get(Vec->getType()),
1958                                      ConstantVector::get(ShuffleMask),
1959                                      "reverse");
1960 }
1961 
1962 // Return whether we allow using masked interleave-groups (for dealing with
1963 // strided loads/stores that reside in predicated blocks, or for dealing
1964 // with gaps).
1965 static bool useMaskedInterleavedAccesses(const TargetTransformInfo &TTI) {
1966   // If an override option has been passed in for interleaved accesses, use it.
1967   if (EnableMaskedInterleavedMemAccesses.getNumOccurrences() > 0)
1968     return EnableMaskedInterleavedMemAccesses;
1969 
1970   return TTI.enableMaskedInterleavedAccessVectorization();
1971 }
1972 
1973 // Try to vectorize the interleave group that \p Instr belongs to.
1974 //
1975 // E.g. Translate following interleaved load group (factor = 3):
1976 //   for (i = 0; i < N; i+=3) {
1977 //     R = Pic[i];             // Member of index 0
1978 //     G = Pic[i+1];           // Member of index 1
1979 //     B = Pic[i+2];           // Member of index 2
1980 //     ... // do something to R, G, B
1981 //   }
1982 // To:
1983 //   %wide.vec = load <12 x i32>                       ; Read 4 tuples of R,G,B
1984 //   %R.vec = shuffle %wide.vec, undef, <0, 3, 6, 9>   ; R elements
1985 //   %G.vec = shuffle %wide.vec, undef, <1, 4, 7, 10>  ; G elements
1986 //   %B.vec = shuffle %wide.vec, undef, <2, 5, 8, 11>  ; B elements
1987 //
1988 // Or translate following interleaved store group (factor = 3):
1989 //   for (i = 0; i < N; i+=3) {
1990 //     ... do something to R, G, B
1991 //     Pic[i]   = R;           // Member of index 0
1992 //     Pic[i+1] = G;           // Member of index 1
1993 //     Pic[i+2] = B;           // Member of index 2
1994 //   }
1995 // To:
1996 //   %R_G.vec = shuffle %R.vec, %G.vec, <0, 1, 2, ..., 7>
1997 //   %B_U.vec = shuffle %B.vec, undef, <0, 1, 2, 3, u, u, u, u>
1998 //   %interleaved.vec = shuffle %R_G.vec, %B_U.vec,
1999 //        <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11>    ; Interleave R,G,B elements
2000 //   store <12 x i32> %interleaved.vec              ; Write 4 tuples of R,G,B
2001 void InnerLoopVectorizer::vectorizeInterleaveGroup(Instruction *Instr,
2002                                                    VectorParts *BlockInMask) {
2003   const InterleaveGroup<Instruction> *Group =
2004       Cost->getInterleavedAccessGroup(Instr);
2005   assert(Group && "Fail to get an interleaved access group.");
2006 
2007   // Skip if current instruction is not the insert position.
2008   if (Instr != Group->getInsertPos())
2009     return;
2010 
2011   const DataLayout &DL = Instr->getModule()->getDataLayout();
2012   Value *Ptr = getLoadStorePointerOperand(Instr);
2013 
2014   // Prepare for the vector type of the interleaved load/store.
2015   Type *ScalarTy = getMemInstValueType(Instr);
2016   unsigned InterleaveFactor = Group->getFactor();
2017   Type *VecTy = VectorType::get(ScalarTy, InterleaveFactor * VF);
2018   Type *PtrTy = VecTy->getPointerTo(getLoadStoreAddressSpace(Instr));
2019 
2020   // Prepare for the new pointers.
2021   setDebugLocFromInst(Builder, Ptr);
2022   SmallVector<Value *, 2> NewPtrs;
2023   unsigned Index = Group->getIndex(Instr);
2024 
2025   VectorParts Mask;
2026   bool IsMaskForCondRequired = BlockInMask;
2027   if (IsMaskForCondRequired) {
2028     Mask = *BlockInMask;
2029     // TODO: extend the masked interleaved-group support to reversed access.
2030     assert(!Group->isReverse() && "Reversed masked interleave-group "
2031                                   "not supported.");
2032   }
2033 
2034   // If the group is reverse, adjust the index to refer to the last vector lane
2035   // instead of the first. We adjust the index from the first vector lane,
2036   // rather than directly getting the pointer for lane VF - 1, because the
2037   // pointer operand of the interleaved access is supposed to be uniform. For
2038   // uniform instructions, we're only required to generate a value for the
2039   // first vector lane in each unroll iteration.
2040   if (Group->isReverse())
2041     Index += (VF - 1) * Group->getFactor();
2042 
2043   bool InBounds = false;
2044   if (auto *gep = dyn_cast<GetElementPtrInst>(Ptr->stripPointerCasts()))
2045     InBounds = gep->isInBounds();
2046 
2047   for (unsigned Part = 0; Part < UF; Part++) {
2048     Value *NewPtr = getOrCreateScalarValue(Ptr, {Part, 0});
2049 
2050     // Notice current instruction could be any index. Need to adjust the address
2051     // to the member of index 0.
2052     //
2053     // E.g.  a = A[i+1];     // Member of index 1 (Current instruction)
2054     //       b = A[i];       // Member of index 0
2055     // Current pointer is pointed to A[i+1], adjust it to A[i].
2056     //
2057     // E.g.  A[i+1] = a;     // Member of index 1
2058     //       A[i]   = b;     // Member of index 0
2059     //       A[i+2] = c;     // Member of index 2 (Current instruction)
2060     // Current pointer is pointed to A[i+2], adjust it to A[i].
2061     NewPtr = Builder.CreateGEP(NewPtr, Builder.getInt32(-Index));
2062     if (InBounds)
2063       cast<GetElementPtrInst>(NewPtr)->setIsInBounds(true);
2064 
2065     // Cast to the vector pointer type.
2066     NewPtrs.push_back(Builder.CreateBitCast(NewPtr, PtrTy));
2067   }
2068 
2069   setDebugLocFromInst(Builder, Instr);
2070   Value *UndefVec = UndefValue::get(VecTy);
2071 
2072   Value *MaskForGaps = nullptr;
2073   if (Group->requiresScalarEpilogue() && !Cost->isScalarEpilogueAllowed()) {
2074     MaskForGaps = createBitMaskForGaps(Builder, VF, *Group);
2075     assert(MaskForGaps && "Mask for Gaps is required but it is null");
2076   }
2077 
2078   // Vectorize the interleaved load group.
2079   if (isa<LoadInst>(Instr)) {
2080     // For each unroll part, create a wide load for the group.
2081     SmallVector<Value *, 2> NewLoads;
2082     for (unsigned Part = 0; Part < UF; Part++) {
2083       Instruction *NewLoad;
2084       if (IsMaskForCondRequired || MaskForGaps) {
2085         assert(useMaskedInterleavedAccesses(*TTI) &&
2086                "masked interleaved groups are not allowed.");
2087         Value *GroupMask = MaskForGaps;
2088         if (IsMaskForCondRequired) {
2089           auto *Undefs = UndefValue::get(Mask[Part]->getType());
2090           auto *RepMask = createReplicatedMask(Builder, InterleaveFactor, VF);
2091           Value *ShuffledMask = Builder.CreateShuffleVector(
2092               Mask[Part], Undefs, RepMask, "interleaved.mask");
2093           GroupMask = MaskForGaps
2094                           ? Builder.CreateBinOp(Instruction::And, ShuffledMask,
2095                                                 MaskForGaps)
2096                           : ShuffledMask;
2097         }
2098         NewLoad =
2099             Builder.CreateMaskedLoad(NewPtrs[Part], Group->getAlignment(),
2100                                      GroupMask, UndefVec, "wide.masked.vec");
2101       }
2102       else
2103         NewLoad = Builder.CreateAlignedLoad(NewPtrs[Part],
2104           Group->getAlignment(), "wide.vec");
2105       Group->addMetadata(NewLoad);
2106       NewLoads.push_back(NewLoad);
2107     }
2108 
2109     // For each member in the group, shuffle out the appropriate data from the
2110     // wide loads.
2111     for (unsigned I = 0; I < InterleaveFactor; ++I) {
2112       Instruction *Member = Group->getMember(I);
2113 
2114       // Skip the gaps in the group.
2115       if (!Member)
2116         continue;
2117 
2118       Constant *StrideMask = createStrideMask(Builder, I, InterleaveFactor, VF);
2119       for (unsigned Part = 0; Part < UF; Part++) {
2120         Value *StridedVec = Builder.CreateShuffleVector(
2121             NewLoads[Part], UndefVec, StrideMask, "strided.vec");
2122 
2123         // If this member has different type, cast the result type.
2124         if (Member->getType() != ScalarTy) {
2125           VectorType *OtherVTy = VectorType::get(Member->getType(), VF);
2126           StridedVec = createBitOrPointerCast(StridedVec, OtherVTy, DL);
2127         }
2128 
2129         if (Group->isReverse())
2130           StridedVec = reverseVector(StridedVec);
2131 
2132         VectorLoopValueMap.setVectorValue(Member, Part, StridedVec);
2133       }
2134     }
2135     return;
2136   }
2137 
2138   // The sub vector type for current instruction.
2139   VectorType *SubVT = VectorType::get(ScalarTy, VF);
2140 
2141   // Vectorize the interleaved store group.
2142   for (unsigned Part = 0; Part < UF; Part++) {
2143     // Collect the stored vector from each member.
2144     SmallVector<Value *, 4> StoredVecs;
2145     for (unsigned i = 0; i < InterleaveFactor; i++) {
2146       // Interleaved store group doesn't allow a gap, so each index has a member
2147       Instruction *Member = Group->getMember(i);
2148       assert(Member && "Fail to get a member from an interleaved store group");
2149 
2150       Value *StoredVec = getOrCreateVectorValue(
2151           cast<StoreInst>(Member)->getValueOperand(), Part);
2152       if (Group->isReverse())
2153         StoredVec = reverseVector(StoredVec);
2154 
2155       // If this member has different type, cast it to a unified type.
2156 
2157       if (StoredVec->getType() != SubVT)
2158         StoredVec = createBitOrPointerCast(StoredVec, SubVT, DL);
2159 
2160       StoredVecs.push_back(StoredVec);
2161     }
2162 
2163     // Concatenate all vectors into a wide vector.
2164     Value *WideVec = concatenateVectors(Builder, StoredVecs);
2165 
2166     // Interleave the elements in the wide vector.
2167     Constant *IMask = createInterleaveMask(Builder, VF, InterleaveFactor);
2168     Value *IVec = Builder.CreateShuffleVector(WideVec, UndefVec, IMask,
2169                                               "interleaved.vec");
2170 
2171     Instruction *NewStoreInstr;
2172     if (IsMaskForCondRequired) {
2173       auto *Undefs = UndefValue::get(Mask[Part]->getType());
2174       auto *RepMask = createReplicatedMask(Builder, InterleaveFactor, VF);
2175       Value *ShuffledMask = Builder.CreateShuffleVector(
2176           Mask[Part], Undefs, RepMask, "interleaved.mask");
2177       NewStoreInstr = Builder.CreateMaskedStore(
2178           IVec, NewPtrs[Part], Group->getAlignment(), ShuffledMask);
2179     }
2180     else
2181       NewStoreInstr = Builder.CreateAlignedStore(IVec, NewPtrs[Part],
2182         Group->getAlignment());
2183 
2184     Group->addMetadata(NewStoreInstr);
2185   }
2186 }
2187 
2188 void InnerLoopVectorizer::vectorizeMemoryInstruction(Instruction *Instr,
2189                                                      VectorParts *BlockInMask) {
2190   // Attempt to issue a wide load.
2191   LoadInst *LI = dyn_cast<LoadInst>(Instr);
2192   StoreInst *SI = dyn_cast<StoreInst>(Instr);
2193 
2194   assert((LI || SI) && "Invalid Load/Store instruction");
2195 
2196   LoopVectorizationCostModel::InstWidening Decision =
2197       Cost->getWideningDecision(Instr, VF);
2198   assert(Decision != LoopVectorizationCostModel::CM_Unknown &&
2199          "CM decision should be taken at this point");
2200   if (Decision == LoopVectorizationCostModel::CM_Interleave)
2201     return vectorizeInterleaveGroup(Instr);
2202 
2203   Type *ScalarDataTy = getMemInstValueType(Instr);
2204   Type *DataTy = VectorType::get(ScalarDataTy, VF);
2205   Value *Ptr = getLoadStorePointerOperand(Instr);
2206   unsigned Alignment = getLoadStoreAlignment(Instr);
2207   // An alignment of 0 means target abi alignment. We need to use the scalar's
2208   // target abi alignment in such a case.
2209   const DataLayout &DL = Instr->getModule()->getDataLayout();
2210   if (!Alignment)
2211     Alignment = DL.getABITypeAlignment(ScalarDataTy);
2212   unsigned AddressSpace = getLoadStoreAddressSpace(Instr);
2213 
2214   // Determine if the pointer operand of the access is either consecutive or
2215   // reverse consecutive.
2216   bool Reverse = (Decision == LoopVectorizationCostModel::CM_Widen_Reverse);
2217   bool ConsecutiveStride =
2218       Reverse || (Decision == LoopVectorizationCostModel::CM_Widen);
2219   bool CreateGatherScatter =
2220       (Decision == LoopVectorizationCostModel::CM_GatherScatter);
2221 
2222   // Either Ptr feeds a vector load/store, or a vector GEP should feed a vector
2223   // gather/scatter. Otherwise Decision should have been to Scalarize.
2224   assert((ConsecutiveStride || CreateGatherScatter) &&
2225          "The instruction should be scalarized");
2226 
2227   // Handle consecutive loads/stores.
2228   if (ConsecutiveStride)
2229     Ptr = getOrCreateScalarValue(Ptr, {0, 0});
2230 
2231   VectorParts Mask;
2232   bool isMaskRequired = BlockInMask;
2233   if (isMaskRequired)
2234     Mask = *BlockInMask;
2235 
2236   bool InBounds = false;
2237   if (auto *gep = dyn_cast<GetElementPtrInst>(
2238           getLoadStorePointerOperand(Instr)->stripPointerCasts()))
2239     InBounds = gep->isInBounds();
2240 
2241   const auto CreateVecPtr = [&](unsigned Part, Value *Ptr) -> Value * {
2242     // Calculate the pointer for the specific unroll-part.
2243     GetElementPtrInst *PartPtr = nullptr;
2244 
2245     if (Reverse) {
2246       // If the address is consecutive but reversed, then the
2247       // wide store needs to start at the last vector element.
2248       PartPtr = cast<GetElementPtrInst>(
2249           Builder.CreateGEP(Ptr, Builder.getInt32(-Part * VF)));
2250       PartPtr->setIsInBounds(InBounds);
2251       PartPtr = cast<GetElementPtrInst>(
2252           Builder.CreateGEP(PartPtr, Builder.getInt32(1 - VF)));
2253       PartPtr->setIsInBounds(InBounds);
2254       if (isMaskRequired) // Reverse of a null all-one mask is a null mask.
2255         Mask[Part] = reverseVector(Mask[Part]);
2256     } else {
2257       PartPtr = cast<GetElementPtrInst>(
2258           Builder.CreateGEP(Ptr, Builder.getInt32(Part * VF)));
2259       PartPtr->setIsInBounds(InBounds);
2260     }
2261 
2262     return Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace));
2263   };
2264 
2265   // Handle Stores:
2266   if (SI) {
2267     setDebugLocFromInst(Builder, SI);
2268 
2269     for (unsigned Part = 0; Part < UF; ++Part) {
2270       Instruction *NewSI = nullptr;
2271       Value *StoredVal = getOrCreateVectorValue(SI->getValueOperand(), Part);
2272       if (CreateGatherScatter) {
2273         Value *MaskPart = isMaskRequired ? Mask[Part] : nullptr;
2274         Value *VectorGep = getOrCreateVectorValue(Ptr, Part);
2275         NewSI = Builder.CreateMaskedScatter(StoredVal, VectorGep, Alignment,
2276                                             MaskPart);
2277       } else {
2278         if (Reverse) {
2279           // If we store to reverse consecutive memory locations, then we need
2280           // to reverse the order of elements in the stored value.
2281           StoredVal = reverseVector(StoredVal);
2282           // We don't want to update the value in the map as it might be used in
2283           // another expression. So don't call resetVectorValue(StoredVal).
2284         }
2285         auto *VecPtr = CreateVecPtr(Part, Ptr);
2286         if (isMaskRequired)
2287           NewSI = Builder.CreateMaskedStore(StoredVal, VecPtr, Alignment,
2288                                             Mask[Part]);
2289         else
2290           NewSI = Builder.CreateAlignedStore(StoredVal, VecPtr, Alignment);
2291       }
2292       addMetadata(NewSI, SI);
2293     }
2294     return;
2295   }
2296 
2297   // Handle loads.
2298   assert(LI && "Must have a load instruction");
2299   setDebugLocFromInst(Builder, LI);
2300   for (unsigned Part = 0; Part < UF; ++Part) {
2301     Value *NewLI;
2302     if (CreateGatherScatter) {
2303       Value *MaskPart = isMaskRequired ? Mask[Part] : nullptr;
2304       Value *VectorGep = getOrCreateVectorValue(Ptr, Part);
2305       NewLI = Builder.CreateMaskedGather(VectorGep, Alignment, MaskPart,
2306                                          nullptr, "wide.masked.gather");
2307       addMetadata(NewLI, LI);
2308     } else {
2309       auto *VecPtr = CreateVecPtr(Part, Ptr);
2310       if (isMaskRequired)
2311         NewLI = Builder.CreateMaskedLoad(VecPtr, Alignment, Mask[Part],
2312                                          UndefValue::get(DataTy),
2313                                          "wide.masked.load");
2314       else
2315         NewLI = Builder.CreateAlignedLoad(VecPtr, Alignment, "wide.load");
2316 
2317       // Add metadata to the load, but setVectorValue to the reverse shuffle.
2318       addMetadata(NewLI, LI);
2319       if (Reverse)
2320         NewLI = reverseVector(NewLI);
2321     }
2322     VectorLoopValueMap.setVectorValue(Instr, Part, NewLI);
2323   }
2324 }
2325 
2326 void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr,
2327                                                const VPIteration &Instance,
2328                                                bool IfPredicateInstr) {
2329   assert(!Instr->getType()->isAggregateType() && "Can't handle vectors");
2330 
2331   setDebugLocFromInst(Builder, Instr);
2332 
2333   // Does this instruction return a value ?
2334   bool IsVoidRetTy = Instr->getType()->isVoidTy();
2335 
2336   Instruction *Cloned = Instr->clone();
2337   if (!IsVoidRetTy)
2338     Cloned->setName(Instr->getName() + ".cloned");
2339 
2340   // Replace the operands of the cloned instructions with their scalar
2341   // equivalents in the new loop.
2342   for (unsigned op = 0, e = Instr->getNumOperands(); op != e; ++op) {
2343     auto *NewOp = getOrCreateScalarValue(Instr->getOperand(op), Instance);
2344     Cloned->setOperand(op, NewOp);
2345   }
2346   addNewMetadata(Cloned, Instr);
2347 
2348   // Place the cloned scalar in the new loop.
2349   Builder.Insert(Cloned);
2350 
2351   // Add the cloned scalar to the scalar map entry.
2352   VectorLoopValueMap.setScalarValue(Instr, Instance, Cloned);
2353 
2354   // If we just cloned a new assumption, add it the assumption cache.
2355   if (auto *II = dyn_cast<IntrinsicInst>(Cloned))
2356     if (II->getIntrinsicID() == Intrinsic::assume)
2357       AC->registerAssumption(II);
2358 
2359   // End if-block.
2360   if (IfPredicateInstr)
2361     PredicatedInstructions.push_back(Cloned);
2362 }
2363 
2364 PHINode *InnerLoopVectorizer::createInductionVariable(Loop *L, Value *Start,
2365                                                       Value *End, Value *Step,
2366                                                       Instruction *DL) {
2367   BasicBlock *Header = L->getHeader();
2368   BasicBlock *Latch = L->getLoopLatch();
2369   // As we're just creating this loop, it's possible no latch exists
2370   // yet. If so, use the header as this will be a single block loop.
2371   if (!Latch)
2372     Latch = Header;
2373 
2374   IRBuilder<> Builder(&*Header->getFirstInsertionPt());
2375   Instruction *OldInst = getDebugLocFromInstOrOperands(OldInduction);
2376   setDebugLocFromInst(Builder, OldInst);
2377   auto *Induction = Builder.CreatePHI(Start->getType(), 2, "index");
2378 
2379   Builder.SetInsertPoint(Latch->getTerminator());
2380   setDebugLocFromInst(Builder, OldInst);
2381 
2382   // Create i+1 and fill the PHINode.
2383   Value *Next = Builder.CreateAdd(Induction, Step, "index.next");
2384   Induction->addIncoming(Start, L->getLoopPreheader());
2385   Induction->addIncoming(Next, Latch);
2386   // Create the compare.
2387   Value *ICmp = Builder.CreateICmpEQ(Next, End);
2388   Builder.CreateCondBr(ICmp, L->getExitBlock(), Header);
2389 
2390   // Now we have two terminators. Remove the old one from the block.
2391   Latch->getTerminator()->eraseFromParent();
2392 
2393   return Induction;
2394 }
2395 
2396 Value *InnerLoopVectorizer::getOrCreateTripCount(Loop *L) {
2397   if (TripCount)
2398     return TripCount;
2399 
2400   assert(L && "Create Trip Count for null loop.");
2401   IRBuilder<> Builder(L->getLoopPreheader()->getTerminator());
2402   // Find the loop boundaries.
2403   ScalarEvolution *SE = PSE.getSE();
2404   const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount();
2405   assert(BackedgeTakenCount != SE->getCouldNotCompute() &&
2406          "Invalid loop count");
2407 
2408   Type *IdxTy = Legal->getWidestInductionType();
2409   assert(IdxTy && "No type for induction");
2410 
2411   // The exit count might have the type of i64 while the phi is i32. This can
2412   // happen if we have an induction variable that is sign extended before the
2413   // compare. The only way that we get a backedge taken count is that the
2414   // induction variable was signed and as such will not overflow. In such a case
2415   // truncation is legal.
2416   if (BackedgeTakenCount->getType()->getPrimitiveSizeInBits() >
2417       IdxTy->getPrimitiveSizeInBits())
2418     BackedgeTakenCount = SE->getTruncateOrNoop(BackedgeTakenCount, IdxTy);
2419   BackedgeTakenCount = SE->getNoopOrZeroExtend(BackedgeTakenCount, IdxTy);
2420 
2421   // Get the total trip count from the count by adding 1.
2422   const SCEV *ExitCount = SE->getAddExpr(
2423       BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType()));
2424 
2425   const DataLayout &DL = L->getHeader()->getModule()->getDataLayout();
2426 
2427   // Expand the trip count and place the new instructions in the preheader.
2428   // Notice that the pre-header does not change, only the loop body.
2429   SCEVExpander Exp(*SE, DL, "induction");
2430 
2431   // Count holds the overall loop count (N).
2432   TripCount = Exp.expandCodeFor(ExitCount, ExitCount->getType(),
2433                                 L->getLoopPreheader()->getTerminator());
2434 
2435   if (TripCount->getType()->isPointerTy())
2436     TripCount =
2437         CastInst::CreatePointerCast(TripCount, IdxTy, "exitcount.ptrcnt.to.int",
2438                                     L->getLoopPreheader()->getTerminator());
2439 
2440   return TripCount;
2441 }
2442 
2443 Value *InnerLoopVectorizer::getOrCreateVectorTripCount(Loop *L) {
2444   if (VectorTripCount)
2445     return VectorTripCount;
2446 
2447   Value *TC = getOrCreateTripCount(L);
2448   IRBuilder<> Builder(L->getLoopPreheader()->getTerminator());
2449 
2450   Type *Ty = TC->getType();
2451   Constant *Step = ConstantInt::get(Ty, VF * UF);
2452 
2453   // If the tail is to be folded by masking, round the number of iterations N
2454   // up to a multiple of Step instead of rounding down. This is done by first
2455   // adding Step-1 and then rounding down. Note that it's ok if this addition
2456   // overflows: the vector induction variable will eventually wrap to zero given
2457   // that it starts at zero and its Step is a power of two; the loop will then
2458   // exit, with the last early-exit vector comparison also producing all-true.
2459   if (Cost->foldTailByMasking()) {
2460     assert(isPowerOf2_32(VF * UF) &&
2461            "VF*UF must be a power of 2 when folding tail by masking");
2462     TC = Builder.CreateAdd(TC, ConstantInt::get(Ty, VF * UF - 1), "n.rnd.up");
2463   }
2464 
2465   // Now we need to generate the expression for the part of the loop that the
2466   // vectorized body will execute. This is equal to N - (N % Step) if scalar
2467   // iterations are not required for correctness, or N - Step, otherwise. Step
2468   // is equal to the vectorization factor (number of SIMD elements) times the
2469   // unroll factor (number of SIMD instructions).
2470   Value *R = Builder.CreateURem(TC, Step, "n.mod.vf");
2471 
2472   // If there is a non-reversed interleaved group that may speculatively access
2473   // memory out-of-bounds, we need to ensure that there will be at least one
2474   // iteration of the scalar epilogue loop. Thus, if the step evenly divides
2475   // the trip count, we set the remainder to be equal to the step. If the step
2476   // does not evenly divide the trip count, no adjustment is necessary since
2477   // there will already be scalar iterations. Note that the minimum iterations
2478   // check ensures that N >= Step.
2479   if (VF > 1 && Cost->requiresScalarEpilogue()) {
2480     auto *IsZero = Builder.CreateICmpEQ(R, ConstantInt::get(R->getType(), 0));
2481     R = Builder.CreateSelect(IsZero, Step, R);
2482   }
2483 
2484   VectorTripCount = Builder.CreateSub(TC, R, "n.vec");
2485 
2486   return VectorTripCount;
2487 }
2488 
2489 Value *InnerLoopVectorizer::createBitOrPointerCast(Value *V, VectorType *DstVTy,
2490                                                    const DataLayout &DL) {
2491   // Verify that V is a vector type with same number of elements as DstVTy.
2492   unsigned VF = DstVTy->getNumElements();
2493   VectorType *SrcVecTy = cast<VectorType>(V->getType());
2494   assert((VF == SrcVecTy->getNumElements()) && "Vector dimensions do not match");
2495   Type *SrcElemTy = SrcVecTy->getElementType();
2496   Type *DstElemTy = DstVTy->getElementType();
2497   assert((DL.getTypeSizeInBits(SrcElemTy) == DL.getTypeSizeInBits(DstElemTy)) &&
2498          "Vector elements must have same size");
2499 
2500   // Do a direct cast if element types are castable.
2501   if (CastInst::isBitOrNoopPointerCastable(SrcElemTy, DstElemTy, DL)) {
2502     return Builder.CreateBitOrPointerCast(V, DstVTy);
2503   }
2504   // V cannot be directly casted to desired vector type.
2505   // May happen when V is a floating point vector but DstVTy is a vector of
2506   // pointers or vice-versa. Handle this using a two-step bitcast using an
2507   // intermediate Integer type for the bitcast i.e. Ptr <-> Int <-> Float.
2508   assert((DstElemTy->isPointerTy() != SrcElemTy->isPointerTy()) &&
2509          "Only one type should be a pointer type");
2510   assert((DstElemTy->isFloatingPointTy() != SrcElemTy->isFloatingPointTy()) &&
2511          "Only one type should be a floating point type");
2512   Type *IntTy =
2513       IntegerType::getIntNTy(V->getContext(), DL.getTypeSizeInBits(SrcElemTy));
2514   VectorType *VecIntTy = VectorType::get(IntTy, VF);
2515   Value *CastVal = Builder.CreateBitOrPointerCast(V, VecIntTy);
2516   return Builder.CreateBitOrPointerCast(CastVal, DstVTy);
2517 }
2518 
2519 void InnerLoopVectorizer::emitMinimumIterationCountCheck(Loop *L,
2520                                                          BasicBlock *Bypass) {
2521   Value *Count = getOrCreateTripCount(L);
2522   BasicBlock *BB = L->getLoopPreheader();
2523   IRBuilder<> Builder(BB->getTerminator());
2524 
2525   // Generate code to check if the loop's trip count is less than VF * UF, or
2526   // equal to it in case a scalar epilogue is required; this implies that the
2527   // vector trip count is zero. This check also covers the case where adding one
2528   // to the backedge-taken count overflowed leading to an incorrect trip count
2529   // of zero. In this case we will also jump to the scalar loop.
2530   auto P = Cost->requiresScalarEpilogue() ? ICmpInst::ICMP_ULE
2531                                           : ICmpInst::ICMP_ULT;
2532 
2533   // If tail is to be folded, vector loop takes care of all iterations.
2534   Value *CheckMinIters = Builder.getFalse();
2535   if (!Cost->foldTailByMasking())
2536     CheckMinIters = Builder.CreateICmp(
2537         P, Count, ConstantInt::get(Count->getType(), VF * UF),
2538         "min.iters.check");
2539 
2540   BasicBlock *NewBB = BB->splitBasicBlock(BB->getTerminator(), "vector.ph");
2541   // Update dominator tree immediately if the generated block is a
2542   // LoopBypassBlock because SCEV expansions to generate loop bypass
2543   // checks may query it before the current function is finished.
2544   DT->addNewBlock(NewBB, BB);
2545   if (L->getParentLoop())
2546     L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI);
2547   ReplaceInstWithInst(BB->getTerminator(),
2548                       BranchInst::Create(Bypass, NewBB, CheckMinIters));
2549   LoopBypassBlocks.push_back(BB);
2550 }
2551 
2552 void InnerLoopVectorizer::emitSCEVChecks(Loop *L, BasicBlock *Bypass) {
2553   BasicBlock *BB = L->getLoopPreheader();
2554 
2555   // Generate the code to check that the SCEV assumptions that we made.
2556   // We want the new basic block to start at the first instruction in a
2557   // sequence of instructions that form a check.
2558   SCEVExpander Exp(*PSE.getSE(), Bypass->getModule()->getDataLayout(),
2559                    "scev.check");
2560   Value *SCEVCheck =
2561       Exp.expandCodeForPredicate(&PSE.getUnionPredicate(), BB->getTerminator());
2562 
2563   if (auto *C = dyn_cast<ConstantInt>(SCEVCheck))
2564     if (C->isZero())
2565       return;
2566 
2567   assert(!Cost->foldTailByMasking() &&
2568          "Cannot SCEV check stride or overflow when folding tail");
2569   // Create a new block containing the stride check.
2570   BB->setName("vector.scevcheck");
2571   auto *NewBB = BB->splitBasicBlock(BB->getTerminator(), "vector.ph");
2572   // Update dominator tree immediately if the generated block is a
2573   // LoopBypassBlock because SCEV expansions to generate loop bypass
2574   // checks may query it before the current function is finished.
2575   DT->addNewBlock(NewBB, BB);
2576   if (L->getParentLoop())
2577     L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI);
2578   ReplaceInstWithInst(BB->getTerminator(),
2579                       BranchInst::Create(Bypass, NewBB, SCEVCheck));
2580   LoopBypassBlocks.push_back(BB);
2581   AddedSafetyChecks = true;
2582 }
2583 
2584 void InnerLoopVectorizer::emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass) {
2585   // VPlan-native path does not do any analysis for runtime checks currently.
2586   if (EnableVPlanNativePath)
2587     return;
2588 
2589   BasicBlock *BB = L->getLoopPreheader();
2590 
2591   // Generate the code that checks in runtime if arrays overlap. We put the
2592   // checks into a separate block to make the more common case of few elements
2593   // faster.
2594   Instruction *FirstCheckInst;
2595   Instruction *MemRuntimeCheck;
2596   std::tie(FirstCheckInst, MemRuntimeCheck) =
2597       Legal->getLAI()->addRuntimeChecks(BB->getTerminator());
2598   if (!MemRuntimeCheck)
2599     return;
2600 
2601   assert(!Cost->foldTailByMasking() && "Cannot check memory when folding tail");
2602   // Create a new block containing the memory check.
2603   BB->setName("vector.memcheck");
2604   auto *NewBB = BB->splitBasicBlock(BB->getTerminator(), "vector.ph");
2605   // Update dominator tree immediately if the generated block is a
2606   // LoopBypassBlock because SCEV expansions to generate loop bypass
2607   // checks may query it before the current function is finished.
2608   DT->addNewBlock(NewBB, BB);
2609   if (L->getParentLoop())
2610     L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI);
2611   ReplaceInstWithInst(BB->getTerminator(),
2612                       BranchInst::Create(Bypass, NewBB, MemRuntimeCheck));
2613   LoopBypassBlocks.push_back(BB);
2614   AddedSafetyChecks = true;
2615 
2616   // We currently don't use LoopVersioning for the actual loop cloning but we
2617   // still use it to add the noalias metadata.
2618   LVer = llvm::make_unique<LoopVersioning>(*Legal->getLAI(), OrigLoop, LI, DT,
2619                                            PSE.getSE());
2620   LVer->prepareNoAliasMetadata();
2621 }
2622 
2623 Value *InnerLoopVectorizer::emitTransformedIndex(
2624     IRBuilder<> &B, Value *Index, ScalarEvolution *SE, const DataLayout &DL,
2625     const InductionDescriptor &ID) const {
2626 
2627   SCEVExpander Exp(*SE, DL, "induction");
2628   auto Step = ID.getStep();
2629   auto StartValue = ID.getStartValue();
2630   assert(Index->getType() == Step->getType() &&
2631          "Index type does not match StepValue type");
2632 
2633   // Note: the IR at this point is broken. We cannot use SE to create any new
2634   // SCEV and then expand it, hoping that SCEV's simplification will give us
2635   // a more optimal code. Unfortunately, attempt of doing so on invalid IR may
2636   // lead to various SCEV crashes. So all we can do is to use builder and rely
2637   // on InstCombine for future simplifications. Here we handle some trivial
2638   // cases only.
2639   auto CreateAdd = [&B](Value *X, Value *Y) {
2640     assert(X->getType() == Y->getType() && "Types don't match!");
2641     if (auto *CX = dyn_cast<ConstantInt>(X))
2642       if (CX->isZero())
2643         return Y;
2644     if (auto *CY = dyn_cast<ConstantInt>(Y))
2645       if (CY->isZero())
2646         return X;
2647     return B.CreateAdd(X, Y);
2648   };
2649 
2650   auto CreateMul = [&B](Value *X, Value *Y) {
2651     assert(X->getType() == Y->getType() && "Types don't match!");
2652     if (auto *CX = dyn_cast<ConstantInt>(X))
2653       if (CX->isOne())
2654         return Y;
2655     if (auto *CY = dyn_cast<ConstantInt>(Y))
2656       if (CY->isOne())
2657         return X;
2658     return B.CreateMul(X, Y);
2659   };
2660 
2661   switch (ID.getKind()) {
2662   case InductionDescriptor::IK_IntInduction: {
2663     assert(Index->getType() == StartValue->getType() &&
2664            "Index type does not match StartValue type");
2665     if (ID.getConstIntStepValue() && ID.getConstIntStepValue()->isMinusOne())
2666       return B.CreateSub(StartValue, Index);
2667     auto *Offset = CreateMul(
2668         Index, Exp.expandCodeFor(Step, Index->getType(), &*B.GetInsertPoint()));
2669     return CreateAdd(StartValue, Offset);
2670   }
2671   case InductionDescriptor::IK_PtrInduction: {
2672     assert(isa<SCEVConstant>(Step) &&
2673            "Expected constant step for pointer induction");
2674     return B.CreateGEP(
2675         nullptr, StartValue,
2676         CreateMul(Index, Exp.expandCodeFor(Step, Index->getType(),
2677                                            &*B.GetInsertPoint())));
2678   }
2679   case InductionDescriptor::IK_FpInduction: {
2680     assert(Step->getType()->isFloatingPointTy() && "Expected FP Step value");
2681     auto InductionBinOp = ID.getInductionBinOp();
2682     assert(InductionBinOp &&
2683            (InductionBinOp->getOpcode() == Instruction::FAdd ||
2684             InductionBinOp->getOpcode() == Instruction::FSub) &&
2685            "Original bin op should be defined for FP induction");
2686 
2687     Value *StepValue = cast<SCEVUnknown>(Step)->getValue();
2688 
2689     // Floating point operations had to be 'fast' to enable the induction.
2690     FastMathFlags Flags;
2691     Flags.setFast();
2692 
2693     Value *MulExp = B.CreateFMul(StepValue, Index);
2694     if (isa<Instruction>(MulExp))
2695       // We have to check, the MulExp may be a constant.
2696       cast<Instruction>(MulExp)->setFastMathFlags(Flags);
2697 
2698     Value *BOp = B.CreateBinOp(InductionBinOp->getOpcode(), StartValue, MulExp,
2699                                "induction");
2700     if (isa<Instruction>(BOp))
2701       cast<Instruction>(BOp)->setFastMathFlags(Flags);
2702 
2703     return BOp;
2704   }
2705   case InductionDescriptor::IK_NoInduction:
2706     return nullptr;
2707   }
2708   llvm_unreachable("invalid enum");
2709 }
2710 
2711 BasicBlock *InnerLoopVectorizer::createVectorizedLoopSkeleton() {
2712   /*
2713    In this function we generate a new loop. The new loop will contain
2714    the vectorized instructions while the old loop will continue to run the
2715    scalar remainder.
2716 
2717        [ ] <-- loop iteration number check.
2718     /   |
2719    /    v
2720   |    [ ] <-- vector loop bypass (may consist of multiple blocks).
2721   |  /  |
2722   | /   v
2723   ||   [ ]     <-- vector pre header.
2724   |/    |
2725   |     v
2726   |    [  ] \
2727   |    [  ]_|   <-- vector loop.
2728   |     |
2729   |     v
2730   |   -[ ]   <--- middle-block.
2731   |  /  |
2732   | /   v
2733   -|- >[ ]     <--- new preheader.
2734    |    |
2735    |    v
2736    |   [ ] \
2737    |   [ ]_|   <-- old scalar loop to handle remainder.
2738     \   |
2739      \  v
2740       >[ ]     <-- exit block.
2741    ...
2742    */
2743 
2744   BasicBlock *OldBasicBlock = OrigLoop->getHeader();
2745   BasicBlock *VectorPH = OrigLoop->getLoopPreheader();
2746   BasicBlock *ExitBlock = OrigLoop->getExitBlock();
2747   MDNode *OrigLoopID = OrigLoop->getLoopID();
2748   assert(VectorPH && "Invalid loop structure");
2749   assert(ExitBlock && "Must have an exit block");
2750 
2751   // Some loops have a single integer induction variable, while other loops
2752   // don't. One example is c++ iterators that often have multiple pointer
2753   // induction variables. In the code below we also support a case where we
2754   // don't have a single induction variable.
2755   //
2756   // We try to obtain an induction variable from the original loop as hard
2757   // as possible. However if we don't find one that:
2758   //   - is an integer
2759   //   - counts from zero, stepping by one
2760   //   - is the size of the widest induction variable type
2761   // then we create a new one.
2762   OldInduction = Legal->getPrimaryInduction();
2763   Type *IdxTy = Legal->getWidestInductionType();
2764 
2765   // Split the single block loop into the two loop structure described above.
2766   BasicBlock *VecBody =
2767       VectorPH->splitBasicBlock(VectorPH->getTerminator(), "vector.body");
2768   BasicBlock *MiddleBlock =
2769       VecBody->splitBasicBlock(VecBody->getTerminator(), "middle.block");
2770   BasicBlock *ScalarPH =
2771       MiddleBlock->splitBasicBlock(MiddleBlock->getTerminator(), "scalar.ph");
2772 
2773   // Create and register the new vector loop.
2774   Loop *Lp = LI->AllocateLoop();
2775   Loop *ParentLoop = OrigLoop->getParentLoop();
2776 
2777   // Insert the new loop into the loop nest and register the new basic blocks
2778   // before calling any utilities such as SCEV that require valid LoopInfo.
2779   if (ParentLoop) {
2780     ParentLoop->addChildLoop(Lp);
2781     ParentLoop->addBasicBlockToLoop(ScalarPH, *LI);
2782     ParentLoop->addBasicBlockToLoop(MiddleBlock, *LI);
2783   } else {
2784     LI->addTopLevelLoop(Lp);
2785   }
2786   Lp->addBasicBlockToLoop(VecBody, *LI);
2787 
2788   // Find the loop boundaries.
2789   Value *Count = getOrCreateTripCount(Lp);
2790 
2791   Value *StartIdx = ConstantInt::get(IdxTy, 0);
2792 
2793   // Now, compare the new count to zero. If it is zero skip the vector loop and
2794   // jump to the scalar loop. This check also covers the case where the
2795   // backedge-taken count is uint##_max: adding one to it will overflow leading
2796   // to an incorrect trip count of zero. In this (rare) case we will also jump
2797   // to the scalar loop.
2798   emitMinimumIterationCountCheck(Lp, ScalarPH);
2799 
2800   // Generate the code to check any assumptions that we've made for SCEV
2801   // expressions.
2802   emitSCEVChecks(Lp, ScalarPH);
2803 
2804   // Generate the code that checks in runtime if arrays overlap. We put the
2805   // checks into a separate block to make the more common case of few elements
2806   // faster.
2807   emitMemRuntimeChecks(Lp, ScalarPH);
2808 
2809   // Generate the induction variable.
2810   // The loop step is equal to the vectorization factor (num of SIMD elements)
2811   // times the unroll factor (num of SIMD instructions).
2812   Value *CountRoundDown = getOrCreateVectorTripCount(Lp);
2813   Constant *Step = ConstantInt::get(IdxTy, VF * UF);
2814   Induction =
2815       createInductionVariable(Lp, StartIdx, CountRoundDown, Step,
2816                               getDebugLocFromInstOrOperands(OldInduction));
2817 
2818   // We are going to resume the execution of the scalar loop.
2819   // Go over all of the induction variables that we found and fix the
2820   // PHIs that are left in the scalar version of the loop.
2821   // The starting values of PHI nodes depend on the counter of the last
2822   // iteration in the vectorized loop.
2823   // If we come from a bypass edge then we need to start from the original
2824   // start value.
2825 
2826   // This variable saves the new starting index for the scalar loop. It is used
2827   // to test if there are any tail iterations left once the vector loop has
2828   // completed.
2829   LoopVectorizationLegality::InductionList *List = Legal->getInductionVars();
2830   for (auto &InductionEntry : *List) {
2831     PHINode *OrigPhi = InductionEntry.first;
2832     InductionDescriptor II = InductionEntry.second;
2833 
2834     // Create phi nodes to merge from the  backedge-taken check block.
2835     PHINode *BCResumeVal = PHINode::Create(
2836         OrigPhi->getType(), 3, "bc.resume.val", ScalarPH->getTerminator());
2837     // Copy original phi DL over to the new one.
2838     BCResumeVal->setDebugLoc(OrigPhi->getDebugLoc());
2839     Value *&EndValue = IVEndValues[OrigPhi];
2840     if (OrigPhi == OldInduction) {
2841       // We know what the end value is.
2842       EndValue = CountRoundDown;
2843     } else {
2844       IRBuilder<> B(Lp->getLoopPreheader()->getTerminator());
2845       Type *StepType = II.getStep()->getType();
2846       Instruction::CastOps CastOp =
2847         CastInst::getCastOpcode(CountRoundDown, true, StepType, true);
2848       Value *CRD = B.CreateCast(CastOp, CountRoundDown, StepType, "cast.crd");
2849       const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout();
2850       EndValue = emitTransformedIndex(B, CRD, PSE.getSE(), DL, II);
2851       EndValue->setName("ind.end");
2852     }
2853 
2854     // The new PHI merges the original incoming value, in case of a bypass,
2855     // or the value at the end of the vectorized loop.
2856     BCResumeVal->addIncoming(EndValue, MiddleBlock);
2857 
2858     // Fix the scalar body counter (PHI node).
2859     unsigned BlockIdx = OrigPhi->getBasicBlockIndex(ScalarPH);
2860 
2861     // The old induction's phi node in the scalar body needs the truncated
2862     // value.
2863     for (BasicBlock *BB : LoopBypassBlocks)
2864       BCResumeVal->addIncoming(II.getStartValue(), BB);
2865     OrigPhi->setIncomingValue(BlockIdx, BCResumeVal);
2866   }
2867 
2868   // Add a check in the middle block to see if we have completed
2869   // all of the iterations in the first vector loop.
2870   // If (N - N%VF) == N, then we *don't* need to run the remainder.
2871   // If tail is to be folded, we know we don't need to run the remainder.
2872   Value *CmpN = Builder.getTrue();
2873   if (!Cost->foldTailByMasking())
2874     CmpN =
2875         CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ, Count,
2876                         CountRoundDown, "cmp.n", MiddleBlock->getTerminator());
2877   ReplaceInstWithInst(MiddleBlock->getTerminator(),
2878                       BranchInst::Create(ExitBlock, ScalarPH, CmpN));
2879 
2880   // Get ready to start creating new instructions into the vectorized body.
2881   Builder.SetInsertPoint(&*VecBody->getFirstInsertionPt());
2882 
2883   // Save the state.
2884   LoopVectorPreHeader = Lp->getLoopPreheader();
2885   LoopScalarPreHeader = ScalarPH;
2886   LoopMiddleBlock = MiddleBlock;
2887   LoopExitBlock = ExitBlock;
2888   LoopVectorBody = VecBody;
2889   LoopScalarBody = OldBasicBlock;
2890 
2891   Optional<MDNode *> VectorizedLoopID =
2892       makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll,
2893                                       LLVMLoopVectorizeFollowupVectorized});
2894   if (VectorizedLoopID.hasValue()) {
2895     Lp->setLoopID(VectorizedLoopID.getValue());
2896 
2897     // Do not setAlreadyVectorized if loop attributes have been defined
2898     // explicitly.
2899     return LoopVectorPreHeader;
2900   }
2901 
2902   // Keep all loop hints from the original loop on the vector loop (we'll
2903   // replace the vectorizer-specific hints below).
2904   if (MDNode *LID = OrigLoop->getLoopID())
2905     Lp->setLoopID(LID);
2906 
2907   LoopVectorizeHints Hints(Lp, true, *ORE);
2908   Hints.setAlreadyVectorized();
2909 
2910   return LoopVectorPreHeader;
2911 }
2912 
2913 // Fix up external users of the induction variable. At this point, we are
2914 // in LCSSA form, with all external PHIs that use the IV having one input value,
2915 // coming from the remainder loop. We need those PHIs to also have a correct
2916 // value for the IV when arriving directly from the middle block.
2917 void InnerLoopVectorizer::fixupIVUsers(PHINode *OrigPhi,
2918                                        const InductionDescriptor &II,
2919                                        Value *CountRoundDown, Value *EndValue,
2920                                        BasicBlock *MiddleBlock) {
2921   // There are two kinds of external IV usages - those that use the value
2922   // computed in the last iteration (the PHI) and those that use the penultimate
2923   // value (the value that feeds into the phi from the loop latch).
2924   // We allow both, but they, obviously, have different values.
2925 
2926   assert(OrigLoop->getExitBlock() && "Expected a single exit block");
2927 
2928   DenseMap<Value *, Value *> MissingVals;
2929 
2930   // An external user of the last iteration's value should see the value that
2931   // the remainder loop uses to initialize its own IV.
2932   Value *PostInc = OrigPhi->getIncomingValueForBlock(OrigLoop->getLoopLatch());
2933   for (User *U : PostInc->users()) {
2934     Instruction *UI = cast<Instruction>(U);
2935     if (!OrigLoop->contains(UI)) {
2936       assert(isa<PHINode>(UI) && "Expected LCSSA form");
2937       MissingVals[UI] = EndValue;
2938     }
2939   }
2940 
2941   // An external user of the penultimate value need to see EndValue - Step.
2942   // The simplest way to get this is to recompute it from the constituent SCEVs,
2943   // that is Start + (Step * (CRD - 1)).
2944   for (User *U : OrigPhi->users()) {
2945     auto *UI = cast<Instruction>(U);
2946     if (!OrigLoop->contains(UI)) {
2947       const DataLayout &DL =
2948           OrigLoop->getHeader()->getModule()->getDataLayout();
2949       assert(isa<PHINode>(UI) && "Expected LCSSA form");
2950 
2951       IRBuilder<> B(MiddleBlock->getTerminator());
2952       Value *CountMinusOne = B.CreateSub(
2953           CountRoundDown, ConstantInt::get(CountRoundDown->getType(), 1));
2954       Value *CMO =
2955           !II.getStep()->getType()->isIntegerTy()
2956               ? B.CreateCast(Instruction::SIToFP, CountMinusOne,
2957                              II.getStep()->getType())
2958               : B.CreateSExtOrTrunc(CountMinusOne, II.getStep()->getType());
2959       CMO->setName("cast.cmo");
2960       Value *Escape = emitTransformedIndex(B, CMO, PSE.getSE(), DL, II);
2961       Escape->setName("ind.escape");
2962       MissingVals[UI] = Escape;
2963     }
2964   }
2965 
2966   for (auto &I : MissingVals) {
2967     PHINode *PHI = cast<PHINode>(I.first);
2968     // One corner case we have to handle is two IVs "chasing" each-other,
2969     // that is %IV2 = phi [...], [ %IV1, %latch ]
2970     // In this case, if IV1 has an external use, we need to avoid adding both
2971     // "last value of IV1" and "penultimate value of IV2". So, verify that we
2972     // don't already have an incoming value for the middle block.
2973     if (PHI->getBasicBlockIndex(MiddleBlock) == -1)
2974       PHI->addIncoming(I.second, MiddleBlock);
2975   }
2976 }
2977 
2978 namespace {
2979 
2980 struct CSEDenseMapInfo {
2981   static bool canHandle(const Instruction *I) {
2982     return isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) ||
2983            isa<ShuffleVectorInst>(I) || isa<GetElementPtrInst>(I);
2984   }
2985 
2986   static inline Instruction *getEmptyKey() {
2987     return DenseMapInfo<Instruction *>::getEmptyKey();
2988   }
2989 
2990   static inline Instruction *getTombstoneKey() {
2991     return DenseMapInfo<Instruction *>::getTombstoneKey();
2992   }
2993 
2994   static unsigned getHashValue(const Instruction *I) {
2995     assert(canHandle(I) && "Unknown instruction!");
2996     return hash_combine(I->getOpcode(), hash_combine_range(I->value_op_begin(),
2997                                                            I->value_op_end()));
2998   }
2999 
3000   static bool isEqual(const Instruction *LHS, const Instruction *RHS) {
3001     if (LHS == getEmptyKey() || RHS == getEmptyKey() ||
3002         LHS == getTombstoneKey() || RHS == getTombstoneKey())
3003       return LHS == RHS;
3004     return LHS->isIdenticalTo(RHS);
3005   }
3006 };
3007 
3008 } // end anonymous namespace
3009 
3010 ///Perform cse of induction variable instructions.
3011 static void cse(BasicBlock *BB) {
3012   // Perform simple cse.
3013   SmallDenseMap<Instruction *, Instruction *, 4, CSEDenseMapInfo> CSEMap;
3014   for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) {
3015     Instruction *In = &*I++;
3016 
3017     if (!CSEDenseMapInfo::canHandle(In))
3018       continue;
3019 
3020     // Check if we can replace this instruction with any of the
3021     // visited instructions.
3022     if (Instruction *V = CSEMap.lookup(In)) {
3023       In->replaceAllUsesWith(V);
3024       In->eraseFromParent();
3025       continue;
3026     }
3027 
3028     CSEMap[In] = In;
3029   }
3030 }
3031 
3032 /// Estimate the overhead of scalarizing an instruction. This is a
3033 /// convenience wrapper for the type-based getScalarizationOverhead API.
3034 static unsigned getScalarizationOverhead(Instruction *I, unsigned VF,
3035                                          const TargetTransformInfo &TTI) {
3036   if (VF == 1)
3037     return 0;
3038 
3039   unsigned Cost = 0;
3040   Type *RetTy = ToVectorTy(I->getType(), VF);
3041   if (!RetTy->isVoidTy() &&
3042       (!isa<LoadInst>(I) ||
3043        !TTI.supportsEfficientVectorElementLoadStore()))
3044     Cost += TTI.getScalarizationOverhead(RetTy, true, false);
3045 
3046   // Some targets keep addresses scalar.
3047   if (isa<LoadInst>(I) && !TTI.prefersVectorizedAddressing())
3048     return Cost;
3049 
3050   if (CallInst *CI = dyn_cast<CallInst>(I)) {
3051     SmallVector<const Value *, 4> Operands(CI->arg_operands());
3052     Cost += TTI.getOperandsScalarizationOverhead(Operands, VF);
3053   }
3054   else if (!isa<StoreInst>(I) ||
3055            !TTI.supportsEfficientVectorElementLoadStore()) {
3056     SmallVector<const Value *, 4> Operands(I->operand_values());
3057     Cost += TTI.getOperandsScalarizationOverhead(Operands, VF);
3058   }
3059 
3060   return Cost;
3061 }
3062 
3063 // Estimate cost of a call instruction CI if it were vectorized with factor VF.
3064 // Return the cost of the instruction, including scalarization overhead if it's
3065 // needed. The flag NeedToScalarize shows if the call needs to be scalarized -
3066 // i.e. either vector version isn't available, or is too expensive.
3067 static unsigned getVectorCallCost(CallInst *CI, unsigned VF,
3068                                   const TargetTransformInfo &TTI,
3069                                   const TargetLibraryInfo *TLI,
3070                                   bool &NeedToScalarize) {
3071   Function *F = CI->getCalledFunction();
3072   StringRef FnName = CI->getCalledFunction()->getName();
3073   Type *ScalarRetTy = CI->getType();
3074   SmallVector<Type *, 4> Tys, ScalarTys;
3075   for (auto &ArgOp : CI->arg_operands())
3076     ScalarTys.push_back(ArgOp->getType());
3077 
3078   // Estimate cost of scalarized vector call. The source operands are assumed
3079   // to be vectors, so we need to extract individual elements from there,
3080   // execute VF scalar calls, and then gather the result into the vector return
3081   // value.
3082   unsigned ScalarCallCost = TTI.getCallInstrCost(F, ScalarRetTy, ScalarTys);
3083   if (VF == 1)
3084     return ScalarCallCost;
3085 
3086   // Compute corresponding vector type for return value and arguments.
3087   Type *RetTy = ToVectorTy(ScalarRetTy, VF);
3088   for (Type *ScalarTy : ScalarTys)
3089     Tys.push_back(ToVectorTy(ScalarTy, VF));
3090 
3091   // Compute costs of unpacking argument values for the scalar calls and
3092   // packing the return values to a vector.
3093   unsigned ScalarizationCost = getScalarizationOverhead(CI, VF, TTI);
3094 
3095   unsigned Cost = ScalarCallCost * VF + ScalarizationCost;
3096 
3097   // If we can't emit a vector call for this function, then the currently found
3098   // cost is the cost we need to return.
3099   NeedToScalarize = true;
3100   if (!TLI || !TLI->isFunctionVectorizable(FnName, VF) || CI->isNoBuiltin())
3101     return Cost;
3102 
3103   // If the corresponding vector cost is cheaper, return its cost.
3104   unsigned VectorCallCost = TTI.getCallInstrCost(nullptr, RetTy, Tys);
3105   if (VectorCallCost < Cost) {
3106     NeedToScalarize = false;
3107     return VectorCallCost;
3108   }
3109   return Cost;
3110 }
3111 
3112 // Estimate cost of an intrinsic call instruction CI if it were vectorized with
3113 // factor VF.  Return the cost of the instruction, including scalarization
3114 // overhead if it's needed.
3115 static unsigned getVectorIntrinsicCost(CallInst *CI, unsigned VF,
3116                                        const TargetTransformInfo &TTI,
3117                                        const TargetLibraryInfo *TLI) {
3118   Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
3119   assert(ID && "Expected intrinsic call!");
3120 
3121   FastMathFlags FMF;
3122   if (auto *FPMO = dyn_cast<FPMathOperator>(CI))
3123     FMF = FPMO->getFastMathFlags();
3124 
3125   SmallVector<Value *, 4> Operands(CI->arg_operands());
3126   return TTI.getIntrinsicInstrCost(ID, CI->getType(), Operands, FMF, VF);
3127 }
3128 
3129 static Type *smallestIntegerVectorType(Type *T1, Type *T2) {
3130   auto *I1 = cast<IntegerType>(T1->getVectorElementType());
3131   auto *I2 = cast<IntegerType>(T2->getVectorElementType());
3132   return I1->getBitWidth() < I2->getBitWidth() ? T1 : T2;
3133 }
3134 static Type *largestIntegerVectorType(Type *T1, Type *T2) {
3135   auto *I1 = cast<IntegerType>(T1->getVectorElementType());
3136   auto *I2 = cast<IntegerType>(T2->getVectorElementType());
3137   return I1->getBitWidth() > I2->getBitWidth() ? T1 : T2;
3138 }
3139 
3140 void InnerLoopVectorizer::truncateToMinimalBitwidths() {
3141   // For every instruction `I` in MinBWs, truncate the operands, create a
3142   // truncated version of `I` and reextend its result. InstCombine runs
3143   // later and will remove any ext/trunc pairs.
3144   SmallPtrSet<Value *, 4> Erased;
3145   for (const auto &KV : Cost->getMinimalBitwidths()) {
3146     // If the value wasn't vectorized, we must maintain the original scalar
3147     // type. The absence of the value from VectorLoopValueMap indicates that it
3148     // wasn't vectorized.
3149     if (!VectorLoopValueMap.hasAnyVectorValue(KV.first))
3150       continue;
3151     for (unsigned Part = 0; Part < UF; ++Part) {
3152       Value *I = getOrCreateVectorValue(KV.first, Part);
3153       if (Erased.find(I) != Erased.end() || I->use_empty() ||
3154           !isa<Instruction>(I))
3155         continue;
3156       Type *OriginalTy = I->getType();
3157       Type *ScalarTruncatedTy =
3158           IntegerType::get(OriginalTy->getContext(), KV.second);
3159       Type *TruncatedTy = VectorType::get(ScalarTruncatedTy,
3160                                           OriginalTy->getVectorNumElements());
3161       if (TruncatedTy == OriginalTy)
3162         continue;
3163 
3164       IRBuilder<> B(cast<Instruction>(I));
3165       auto ShrinkOperand = [&](Value *V) -> Value * {
3166         if (auto *ZI = dyn_cast<ZExtInst>(V))
3167           if (ZI->getSrcTy() == TruncatedTy)
3168             return ZI->getOperand(0);
3169         return B.CreateZExtOrTrunc(V, TruncatedTy);
3170       };
3171 
3172       // The actual instruction modification depends on the instruction type,
3173       // unfortunately.
3174       Value *NewI = nullptr;
3175       if (auto *BO = dyn_cast<BinaryOperator>(I)) {
3176         NewI = B.CreateBinOp(BO->getOpcode(), ShrinkOperand(BO->getOperand(0)),
3177                              ShrinkOperand(BO->getOperand(1)));
3178 
3179         // Any wrapping introduced by shrinking this operation shouldn't be
3180         // considered undefined behavior. So, we can't unconditionally copy
3181         // arithmetic wrapping flags to NewI.
3182         cast<BinaryOperator>(NewI)->copyIRFlags(I, /*IncludeWrapFlags=*/false);
3183       } else if (auto *CI = dyn_cast<ICmpInst>(I)) {
3184         NewI =
3185             B.CreateICmp(CI->getPredicate(), ShrinkOperand(CI->getOperand(0)),
3186                          ShrinkOperand(CI->getOperand(1)));
3187       } else if (auto *SI = dyn_cast<SelectInst>(I)) {
3188         NewI = B.CreateSelect(SI->getCondition(),
3189                               ShrinkOperand(SI->getTrueValue()),
3190                               ShrinkOperand(SI->getFalseValue()));
3191       } else if (auto *CI = dyn_cast<CastInst>(I)) {
3192         switch (CI->getOpcode()) {
3193         default:
3194           llvm_unreachable("Unhandled cast!");
3195         case Instruction::Trunc:
3196           NewI = ShrinkOperand(CI->getOperand(0));
3197           break;
3198         case Instruction::SExt:
3199           NewI = B.CreateSExtOrTrunc(
3200               CI->getOperand(0),
3201               smallestIntegerVectorType(OriginalTy, TruncatedTy));
3202           break;
3203         case Instruction::ZExt:
3204           NewI = B.CreateZExtOrTrunc(
3205               CI->getOperand(0),
3206               smallestIntegerVectorType(OriginalTy, TruncatedTy));
3207           break;
3208         }
3209       } else if (auto *SI = dyn_cast<ShuffleVectorInst>(I)) {
3210         auto Elements0 = SI->getOperand(0)->getType()->getVectorNumElements();
3211         auto *O0 = B.CreateZExtOrTrunc(
3212             SI->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements0));
3213         auto Elements1 = SI->getOperand(1)->getType()->getVectorNumElements();
3214         auto *O1 = B.CreateZExtOrTrunc(
3215             SI->getOperand(1), VectorType::get(ScalarTruncatedTy, Elements1));
3216 
3217         NewI = B.CreateShuffleVector(O0, O1, SI->getMask());
3218       } else if (isa<LoadInst>(I) || isa<PHINode>(I)) {
3219         // Don't do anything with the operands, just extend the result.
3220         continue;
3221       } else if (auto *IE = dyn_cast<InsertElementInst>(I)) {
3222         auto Elements = IE->getOperand(0)->getType()->getVectorNumElements();
3223         auto *O0 = B.CreateZExtOrTrunc(
3224             IE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements));
3225         auto *O1 = B.CreateZExtOrTrunc(IE->getOperand(1), ScalarTruncatedTy);
3226         NewI = B.CreateInsertElement(O0, O1, IE->getOperand(2));
3227       } else if (auto *EE = dyn_cast<ExtractElementInst>(I)) {
3228         auto Elements = EE->getOperand(0)->getType()->getVectorNumElements();
3229         auto *O0 = B.CreateZExtOrTrunc(
3230             EE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements));
3231         NewI = B.CreateExtractElement(O0, EE->getOperand(2));
3232       } else {
3233         // If we don't know what to do, be conservative and don't do anything.
3234         continue;
3235       }
3236 
3237       // Lastly, extend the result.
3238       NewI->takeName(cast<Instruction>(I));
3239       Value *Res = B.CreateZExtOrTrunc(NewI, OriginalTy);
3240       I->replaceAllUsesWith(Res);
3241       cast<Instruction>(I)->eraseFromParent();
3242       Erased.insert(I);
3243       VectorLoopValueMap.resetVectorValue(KV.first, Part, Res);
3244     }
3245   }
3246 
3247   // We'll have created a bunch of ZExts that are now parentless. Clean up.
3248   for (const auto &KV : Cost->getMinimalBitwidths()) {
3249     // If the value wasn't vectorized, we must maintain the original scalar
3250     // type. The absence of the value from VectorLoopValueMap indicates that it
3251     // wasn't vectorized.
3252     if (!VectorLoopValueMap.hasAnyVectorValue(KV.first))
3253       continue;
3254     for (unsigned Part = 0; Part < UF; ++Part) {
3255       Value *I = getOrCreateVectorValue(KV.first, Part);
3256       ZExtInst *Inst = dyn_cast<ZExtInst>(I);
3257       if (Inst && Inst->use_empty()) {
3258         Value *NewI = Inst->getOperand(0);
3259         Inst->eraseFromParent();
3260         VectorLoopValueMap.resetVectorValue(KV.first, Part, NewI);
3261       }
3262     }
3263   }
3264 }
3265 
3266 void InnerLoopVectorizer::fixVectorizedLoop() {
3267   // Insert truncates and extends for any truncated instructions as hints to
3268   // InstCombine.
3269   if (VF > 1)
3270     truncateToMinimalBitwidths();
3271 
3272   // Fix widened non-induction PHIs by setting up the PHI operands.
3273   if (OrigPHIsToFix.size()) {
3274     assert(EnableVPlanNativePath &&
3275            "Unexpected non-induction PHIs for fixup in non VPlan-native path");
3276     fixNonInductionPHIs();
3277   }
3278 
3279   // At this point every instruction in the original loop is widened to a
3280   // vector form. Now we need to fix the recurrences in the loop. These PHI
3281   // nodes are currently empty because we did not want to introduce cycles.
3282   // This is the second stage of vectorizing recurrences.
3283   fixCrossIterationPHIs();
3284 
3285   // Update the dominator tree.
3286   //
3287   // FIXME: After creating the structure of the new loop, the dominator tree is
3288   //        no longer up-to-date, and it remains that way until we update it
3289   //        here. An out-of-date dominator tree is problematic for SCEV,
3290   //        because SCEVExpander uses it to guide code generation. The
3291   //        vectorizer use SCEVExpanders in several places. Instead, we should
3292   //        keep the dominator tree up-to-date as we go.
3293   updateAnalysis();
3294 
3295   // Fix-up external users of the induction variables.
3296   for (auto &Entry : *Legal->getInductionVars())
3297     fixupIVUsers(Entry.first, Entry.second,
3298                  getOrCreateVectorTripCount(LI->getLoopFor(LoopVectorBody)),
3299                  IVEndValues[Entry.first], LoopMiddleBlock);
3300 
3301   fixLCSSAPHIs();
3302   for (Instruction *PI : PredicatedInstructions)
3303     sinkScalarOperands(&*PI);
3304 
3305   // Remove redundant induction instructions.
3306   cse(LoopVectorBody);
3307 }
3308 
3309 void InnerLoopVectorizer::fixCrossIterationPHIs() {
3310   // In order to support recurrences we need to be able to vectorize Phi nodes.
3311   // Phi nodes have cycles, so we need to vectorize them in two stages. This is
3312   // stage #2: We now need to fix the recurrences by adding incoming edges to
3313   // the currently empty PHI nodes. At this point every instruction in the
3314   // original loop is widened to a vector form so we can use them to construct
3315   // the incoming edges.
3316   for (PHINode &Phi : OrigLoop->getHeader()->phis()) {
3317     // Handle first-order recurrences and reductions that need to be fixed.
3318     if (Legal->isFirstOrderRecurrence(&Phi))
3319       fixFirstOrderRecurrence(&Phi);
3320     else if (Legal->isReductionVariable(&Phi))
3321       fixReduction(&Phi);
3322   }
3323 }
3324 
3325 void InnerLoopVectorizer::fixFirstOrderRecurrence(PHINode *Phi) {
3326   // This is the second phase of vectorizing first-order recurrences. An
3327   // overview of the transformation is described below. Suppose we have the
3328   // following loop.
3329   //
3330   //   for (int i = 0; i < n; ++i)
3331   //     b[i] = a[i] - a[i - 1];
3332   //
3333   // There is a first-order recurrence on "a". For this loop, the shorthand
3334   // scalar IR looks like:
3335   //
3336   //   scalar.ph:
3337   //     s_init = a[-1]
3338   //     br scalar.body
3339   //
3340   //   scalar.body:
3341   //     i = phi [0, scalar.ph], [i+1, scalar.body]
3342   //     s1 = phi [s_init, scalar.ph], [s2, scalar.body]
3343   //     s2 = a[i]
3344   //     b[i] = s2 - s1
3345   //     br cond, scalar.body, ...
3346   //
3347   // In this example, s1 is a recurrence because it's value depends on the
3348   // previous iteration. In the first phase of vectorization, we created a
3349   // temporary value for s1. We now complete the vectorization and produce the
3350   // shorthand vector IR shown below (for VF = 4, UF = 1).
3351   //
3352   //   vector.ph:
3353   //     v_init = vector(..., ..., ..., a[-1])
3354   //     br vector.body
3355   //
3356   //   vector.body
3357   //     i = phi [0, vector.ph], [i+4, vector.body]
3358   //     v1 = phi [v_init, vector.ph], [v2, vector.body]
3359   //     v2 = a[i, i+1, i+2, i+3];
3360   //     v3 = vector(v1(3), v2(0, 1, 2))
3361   //     b[i, i+1, i+2, i+3] = v2 - v3
3362   //     br cond, vector.body, middle.block
3363   //
3364   //   middle.block:
3365   //     x = v2(3)
3366   //     br scalar.ph
3367   //
3368   //   scalar.ph:
3369   //     s_init = phi [x, middle.block], [a[-1], otherwise]
3370   //     br scalar.body
3371   //
3372   // After execution completes the vector loop, we extract the next value of
3373   // the recurrence (x) to use as the initial value in the scalar loop.
3374 
3375   // Get the original loop preheader and single loop latch.
3376   auto *Preheader = OrigLoop->getLoopPreheader();
3377   auto *Latch = OrigLoop->getLoopLatch();
3378 
3379   // Get the initial and previous values of the scalar recurrence.
3380   auto *ScalarInit = Phi->getIncomingValueForBlock(Preheader);
3381   auto *Previous = Phi->getIncomingValueForBlock(Latch);
3382 
3383   // Create a vector from the initial value.
3384   auto *VectorInit = ScalarInit;
3385   if (VF > 1) {
3386     Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
3387     VectorInit = Builder.CreateInsertElement(
3388         UndefValue::get(VectorType::get(VectorInit->getType(), VF)), VectorInit,
3389         Builder.getInt32(VF - 1), "vector.recur.init");
3390   }
3391 
3392   // We constructed a temporary phi node in the first phase of vectorization.
3393   // This phi node will eventually be deleted.
3394   Builder.SetInsertPoint(
3395       cast<Instruction>(VectorLoopValueMap.getVectorValue(Phi, 0)));
3396 
3397   // Create a phi node for the new recurrence. The current value will either be
3398   // the initial value inserted into a vector or loop-varying vector value.
3399   auto *VecPhi = Builder.CreatePHI(VectorInit->getType(), 2, "vector.recur");
3400   VecPhi->addIncoming(VectorInit, LoopVectorPreHeader);
3401 
3402   // Get the vectorized previous value of the last part UF - 1. It appears last
3403   // among all unrolled iterations, due to the order of their construction.
3404   Value *PreviousLastPart = getOrCreateVectorValue(Previous, UF - 1);
3405 
3406   // Set the insertion point after the previous value if it is an instruction.
3407   // Note that the previous value may have been constant-folded so it is not
3408   // guaranteed to be an instruction in the vector loop. Also, if the previous
3409   // value is a phi node, we should insert after all the phi nodes to avoid
3410   // breaking basic block verification.
3411   if (LI->getLoopFor(LoopVectorBody)->isLoopInvariant(PreviousLastPart) ||
3412       isa<PHINode>(PreviousLastPart))
3413     Builder.SetInsertPoint(&*LoopVectorBody->getFirstInsertionPt());
3414   else
3415     Builder.SetInsertPoint(
3416         &*++BasicBlock::iterator(cast<Instruction>(PreviousLastPart)));
3417 
3418   // We will construct a vector for the recurrence by combining the values for
3419   // the current and previous iterations. This is the required shuffle mask.
3420   SmallVector<Constant *, 8> ShuffleMask(VF);
3421   ShuffleMask[0] = Builder.getInt32(VF - 1);
3422   for (unsigned I = 1; I < VF; ++I)
3423     ShuffleMask[I] = Builder.getInt32(I + VF - 1);
3424 
3425   // The vector from which to take the initial value for the current iteration
3426   // (actual or unrolled). Initially, this is the vector phi node.
3427   Value *Incoming = VecPhi;
3428 
3429   // Shuffle the current and previous vector and update the vector parts.
3430   for (unsigned Part = 0; Part < UF; ++Part) {
3431     Value *PreviousPart = getOrCreateVectorValue(Previous, Part);
3432     Value *PhiPart = VectorLoopValueMap.getVectorValue(Phi, Part);
3433     auto *Shuffle =
3434         VF > 1 ? Builder.CreateShuffleVector(Incoming, PreviousPart,
3435                                              ConstantVector::get(ShuffleMask))
3436                : Incoming;
3437     PhiPart->replaceAllUsesWith(Shuffle);
3438     cast<Instruction>(PhiPart)->eraseFromParent();
3439     VectorLoopValueMap.resetVectorValue(Phi, Part, Shuffle);
3440     Incoming = PreviousPart;
3441   }
3442 
3443   // Fix the latch value of the new recurrence in the vector loop.
3444   VecPhi->addIncoming(Incoming, LI->getLoopFor(LoopVectorBody)->getLoopLatch());
3445 
3446   // Extract the last vector element in the middle block. This will be the
3447   // initial value for the recurrence when jumping to the scalar loop.
3448   auto *ExtractForScalar = Incoming;
3449   if (VF > 1) {
3450     Builder.SetInsertPoint(LoopMiddleBlock->getTerminator());
3451     ExtractForScalar = Builder.CreateExtractElement(
3452         ExtractForScalar, Builder.getInt32(VF - 1), "vector.recur.extract");
3453   }
3454   // Extract the second last element in the middle block if the
3455   // Phi is used outside the loop. We need to extract the phi itself
3456   // and not the last element (the phi update in the current iteration). This
3457   // will be the value when jumping to the exit block from the LoopMiddleBlock,
3458   // when the scalar loop is not run at all.
3459   Value *ExtractForPhiUsedOutsideLoop = nullptr;
3460   if (VF > 1)
3461     ExtractForPhiUsedOutsideLoop = Builder.CreateExtractElement(
3462         Incoming, Builder.getInt32(VF - 2), "vector.recur.extract.for.phi");
3463   // When loop is unrolled without vectorizing, initialize
3464   // ExtractForPhiUsedOutsideLoop with the value just prior to unrolled value of
3465   // `Incoming`. This is analogous to the vectorized case above: extracting the
3466   // second last element when VF > 1.
3467   else if (UF > 1)
3468     ExtractForPhiUsedOutsideLoop = getOrCreateVectorValue(Previous, UF - 2);
3469 
3470   // Fix the initial value of the original recurrence in the scalar loop.
3471   Builder.SetInsertPoint(&*LoopScalarPreHeader->begin());
3472   auto *Start = Builder.CreatePHI(Phi->getType(), 2, "scalar.recur.init");
3473   for (auto *BB : predecessors(LoopScalarPreHeader)) {
3474     auto *Incoming = BB == LoopMiddleBlock ? ExtractForScalar : ScalarInit;
3475     Start->addIncoming(Incoming, BB);
3476   }
3477 
3478   Phi->setIncomingValue(Phi->getBasicBlockIndex(LoopScalarPreHeader), Start);
3479   Phi->setName("scalar.recur");
3480 
3481   // Finally, fix users of the recurrence outside the loop. The users will need
3482   // either the last value of the scalar recurrence or the last value of the
3483   // vector recurrence we extracted in the middle block. Since the loop is in
3484   // LCSSA form, we just need to find all the phi nodes for the original scalar
3485   // recurrence in the exit block, and then add an edge for the middle block.
3486   for (PHINode &LCSSAPhi : LoopExitBlock->phis()) {
3487     if (LCSSAPhi.getIncomingValue(0) == Phi) {
3488       LCSSAPhi.addIncoming(ExtractForPhiUsedOutsideLoop, LoopMiddleBlock);
3489     }
3490   }
3491 }
3492 
3493 void InnerLoopVectorizer::fixReduction(PHINode *Phi) {
3494   Constant *Zero = Builder.getInt32(0);
3495 
3496   // Get it's reduction variable descriptor.
3497   assert(Legal->isReductionVariable(Phi) &&
3498          "Unable to find the reduction variable");
3499   RecurrenceDescriptor RdxDesc = (*Legal->getReductionVars())[Phi];
3500 
3501   RecurrenceDescriptor::RecurrenceKind RK = RdxDesc.getRecurrenceKind();
3502   TrackingVH<Value> ReductionStartValue = RdxDesc.getRecurrenceStartValue();
3503   Instruction *LoopExitInst = RdxDesc.getLoopExitInstr();
3504   RecurrenceDescriptor::MinMaxRecurrenceKind MinMaxKind =
3505     RdxDesc.getMinMaxRecurrenceKind();
3506   setDebugLocFromInst(Builder, ReductionStartValue);
3507 
3508   // We need to generate a reduction vector from the incoming scalar.
3509   // To do so, we need to generate the 'identity' vector and override
3510   // one of the elements with the incoming scalar reduction. We need
3511   // to do it in the vector-loop preheader.
3512   Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
3513 
3514   // This is the vector-clone of the value that leaves the loop.
3515   Type *VecTy = getOrCreateVectorValue(LoopExitInst, 0)->getType();
3516 
3517   // Find the reduction identity variable. Zero for addition, or, xor,
3518   // one for multiplication, -1 for And.
3519   Value *Identity;
3520   Value *VectorStart;
3521   if (RK == RecurrenceDescriptor::RK_IntegerMinMax ||
3522       RK == RecurrenceDescriptor::RK_FloatMinMax) {
3523     // MinMax reduction have the start value as their identify.
3524     if (VF == 1) {
3525       VectorStart = Identity = ReductionStartValue;
3526     } else {
3527       VectorStart = Identity =
3528         Builder.CreateVectorSplat(VF, ReductionStartValue, "minmax.ident");
3529     }
3530   } else {
3531     // Handle other reduction kinds:
3532     Constant *Iden = RecurrenceDescriptor::getRecurrenceIdentity(
3533         RK, VecTy->getScalarType());
3534     if (VF == 1) {
3535       Identity = Iden;
3536       // This vector is the Identity vector where the first element is the
3537       // incoming scalar reduction.
3538       VectorStart = ReductionStartValue;
3539     } else {
3540       Identity = ConstantVector::getSplat(VF, Iden);
3541 
3542       // This vector is the Identity vector where the first element is the
3543       // incoming scalar reduction.
3544       VectorStart =
3545         Builder.CreateInsertElement(Identity, ReductionStartValue, Zero);
3546     }
3547   }
3548 
3549   // Fix the vector-loop phi.
3550 
3551   // Reductions do not have to start at zero. They can start with
3552   // any loop invariant values.
3553   BasicBlock *Latch = OrigLoop->getLoopLatch();
3554   Value *LoopVal = Phi->getIncomingValueForBlock(Latch);
3555   for (unsigned Part = 0; Part < UF; ++Part) {
3556     Value *VecRdxPhi = getOrCreateVectorValue(Phi, Part);
3557     Value *Val = getOrCreateVectorValue(LoopVal, Part);
3558     // Make sure to add the reduction stat value only to the
3559     // first unroll part.
3560     Value *StartVal = (Part == 0) ? VectorStart : Identity;
3561     cast<PHINode>(VecRdxPhi)->addIncoming(StartVal, LoopVectorPreHeader);
3562     cast<PHINode>(VecRdxPhi)
3563       ->addIncoming(Val, LI->getLoopFor(LoopVectorBody)->getLoopLatch());
3564   }
3565 
3566   // Before each round, move the insertion point right between
3567   // the PHIs and the values we are going to write.
3568   // This allows us to write both PHINodes and the extractelement
3569   // instructions.
3570   Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt());
3571 
3572   setDebugLocFromInst(Builder, LoopExitInst);
3573 
3574   // If the vector reduction can be performed in a smaller type, we truncate
3575   // then extend the loop exit value to enable InstCombine to evaluate the
3576   // entire expression in the smaller type.
3577   if (VF > 1 && Phi->getType() != RdxDesc.getRecurrenceType()) {
3578     Type *RdxVecTy = VectorType::get(RdxDesc.getRecurrenceType(), VF);
3579     Builder.SetInsertPoint(
3580         LI->getLoopFor(LoopVectorBody)->getLoopLatch()->getTerminator());
3581     VectorParts RdxParts(UF);
3582     for (unsigned Part = 0; Part < UF; ++Part) {
3583       RdxParts[Part] = VectorLoopValueMap.getVectorValue(LoopExitInst, Part);
3584       Value *Trunc = Builder.CreateTrunc(RdxParts[Part], RdxVecTy);
3585       Value *Extnd = RdxDesc.isSigned() ? Builder.CreateSExt(Trunc, VecTy)
3586                                         : Builder.CreateZExt(Trunc, VecTy);
3587       for (Value::user_iterator UI = RdxParts[Part]->user_begin();
3588            UI != RdxParts[Part]->user_end();)
3589         if (*UI != Trunc) {
3590           (*UI++)->replaceUsesOfWith(RdxParts[Part], Extnd);
3591           RdxParts[Part] = Extnd;
3592         } else {
3593           ++UI;
3594         }
3595     }
3596     Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt());
3597     for (unsigned Part = 0; Part < UF; ++Part) {
3598       RdxParts[Part] = Builder.CreateTrunc(RdxParts[Part], RdxVecTy);
3599       VectorLoopValueMap.resetVectorValue(LoopExitInst, Part, RdxParts[Part]);
3600     }
3601   }
3602 
3603   // Reduce all of the unrolled parts into a single vector.
3604   Value *ReducedPartRdx = VectorLoopValueMap.getVectorValue(LoopExitInst, 0);
3605   unsigned Op = RecurrenceDescriptor::getRecurrenceBinOp(RK);
3606   setDebugLocFromInst(Builder, ReducedPartRdx);
3607   for (unsigned Part = 1; Part < UF; ++Part) {
3608     Value *RdxPart = VectorLoopValueMap.getVectorValue(LoopExitInst, Part);
3609     if (Op != Instruction::ICmp && Op != Instruction::FCmp)
3610       // Floating point operations had to be 'fast' to enable the reduction.
3611       ReducedPartRdx = addFastMathFlag(
3612           Builder.CreateBinOp((Instruction::BinaryOps)Op, RdxPart,
3613                               ReducedPartRdx, "bin.rdx"));
3614     else
3615       ReducedPartRdx = createMinMaxOp(Builder, MinMaxKind, ReducedPartRdx,
3616                                       RdxPart);
3617   }
3618 
3619   if (VF > 1) {
3620     bool NoNaN = Legal->hasFunNoNaNAttr();
3621     ReducedPartRdx =
3622         createTargetReduction(Builder, TTI, RdxDesc, ReducedPartRdx, NoNaN);
3623     // If the reduction can be performed in a smaller type, we need to extend
3624     // the reduction to the wider type before we branch to the original loop.
3625     if (Phi->getType() != RdxDesc.getRecurrenceType())
3626       ReducedPartRdx =
3627         RdxDesc.isSigned()
3628         ? Builder.CreateSExt(ReducedPartRdx, Phi->getType())
3629         : Builder.CreateZExt(ReducedPartRdx, Phi->getType());
3630   }
3631 
3632   // Create a phi node that merges control-flow from the backedge-taken check
3633   // block and the middle block.
3634   PHINode *BCBlockPhi = PHINode::Create(Phi->getType(), 2, "bc.merge.rdx",
3635                                         LoopScalarPreHeader->getTerminator());
3636   for (unsigned I = 0, E = LoopBypassBlocks.size(); I != E; ++I)
3637     BCBlockPhi->addIncoming(ReductionStartValue, LoopBypassBlocks[I]);
3638   BCBlockPhi->addIncoming(ReducedPartRdx, LoopMiddleBlock);
3639 
3640   // Now, we need to fix the users of the reduction variable
3641   // inside and outside of the scalar remainder loop.
3642   // We know that the loop is in LCSSA form. We need to update the
3643   // PHI nodes in the exit blocks.
3644   for (PHINode &LCSSAPhi : LoopExitBlock->phis()) {
3645     // All PHINodes need to have a single entry edge, or two if
3646     // we already fixed them.
3647     assert(LCSSAPhi.getNumIncomingValues() < 3 && "Invalid LCSSA PHI");
3648 
3649     // We found a reduction value exit-PHI. Update it with the
3650     // incoming bypass edge.
3651     if (LCSSAPhi.getIncomingValue(0) == LoopExitInst)
3652       LCSSAPhi.addIncoming(ReducedPartRdx, LoopMiddleBlock);
3653   } // end of the LCSSA phi scan.
3654 
3655     // Fix the scalar loop reduction variable with the incoming reduction sum
3656     // from the vector body and from the backedge value.
3657   int IncomingEdgeBlockIdx =
3658     Phi->getBasicBlockIndex(OrigLoop->getLoopLatch());
3659   assert(IncomingEdgeBlockIdx >= 0 && "Invalid block index");
3660   // Pick the other block.
3661   int SelfEdgeBlockIdx = (IncomingEdgeBlockIdx ? 0 : 1);
3662   Phi->setIncomingValue(SelfEdgeBlockIdx, BCBlockPhi);
3663   Phi->setIncomingValue(IncomingEdgeBlockIdx, LoopExitInst);
3664 }
3665 
3666 void InnerLoopVectorizer::fixLCSSAPHIs() {
3667   for (PHINode &LCSSAPhi : LoopExitBlock->phis()) {
3668     if (LCSSAPhi.getNumIncomingValues() == 1) {
3669       auto *IncomingValue = LCSSAPhi.getIncomingValue(0);
3670       // Non-instruction incoming values will have only one value.
3671       unsigned LastLane = 0;
3672       if (isa<Instruction>(IncomingValue))
3673           LastLane = Cost->isUniformAfterVectorization(
3674                          cast<Instruction>(IncomingValue), VF)
3675                          ? 0
3676                          : VF - 1;
3677       // Can be a loop invariant incoming value or the last scalar value to be
3678       // extracted from the vectorized loop.
3679       Builder.SetInsertPoint(LoopMiddleBlock->getTerminator());
3680       Value *lastIncomingValue =
3681           getOrCreateScalarValue(IncomingValue, { UF - 1, LastLane });
3682       LCSSAPhi.addIncoming(lastIncomingValue, LoopMiddleBlock);
3683     }
3684   }
3685 }
3686 
3687 void InnerLoopVectorizer::sinkScalarOperands(Instruction *PredInst) {
3688   // The basic block and loop containing the predicated instruction.
3689   auto *PredBB = PredInst->getParent();
3690   auto *VectorLoop = LI->getLoopFor(PredBB);
3691 
3692   // Initialize a worklist with the operands of the predicated instruction.
3693   SetVector<Value *> Worklist(PredInst->op_begin(), PredInst->op_end());
3694 
3695   // Holds instructions that we need to analyze again. An instruction may be
3696   // reanalyzed if we don't yet know if we can sink it or not.
3697   SmallVector<Instruction *, 8> InstsToReanalyze;
3698 
3699   // Returns true if a given use occurs in the predicated block. Phi nodes use
3700   // their operands in their corresponding predecessor blocks.
3701   auto isBlockOfUsePredicated = [&](Use &U) -> bool {
3702     auto *I = cast<Instruction>(U.getUser());
3703     BasicBlock *BB = I->getParent();
3704     if (auto *Phi = dyn_cast<PHINode>(I))
3705       BB = Phi->getIncomingBlock(
3706           PHINode::getIncomingValueNumForOperand(U.getOperandNo()));
3707     return BB == PredBB;
3708   };
3709 
3710   // Iteratively sink the scalarized operands of the predicated instruction
3711   // into the block we created for it. When an instruction is sunk, it's
3712   // operands are then added to the worklist. The algorithm ends after one pass
3713   // through the worklist doesn't sink a single instruction.
3714   bool Changed;
3715   do {
3716     // Add the instructions that need to be reanalyzed to the worklist, and
3717     // reset the changed indicator.
3718     Worklist.insert(InstsToReanalyze.begin(), InstsToReanalyze.end());
3719     InstsToReanalyze.clear();
3720     Changed = false;
3721 
3722     while (!Worklist.empty()) {
3723       auto *I = dyn_cast<Instruction>(Worklist.pop_back_val());
3724 
3725       // We can't sink an instruction if it is a phi node, is already in the
3726       // predicated block, is not in the loop, or may have side effects.
3727       if (!I || isa<PHINode>(I) || I->getParent() == PredBB ||
3728           !VectorLoop->contains(I) || I->mayHaveSideEffects())
3729         continue;
3730 
3731       // It's legal to sink the instruction if all its uses occur in the
3732       // predicated block. Otherwise, there's nothing to do yet, and we may
3733       // need to reanalyze the instruction.
3734       if (!llvm::all_of(I->uses(), isBlockOfUsePredicated)) {
3735         InstsToReanalyze.push_back(I);
3736         continue;
3737       }
3738 
3739       // Move the instruction to the beginning of the predicated block, and add
3740       // it's operands to the worklist.
3741       I->moveBefore(&*PredBB->getFirstInsertionPt());
3742       Worklist.insert(I->op_begin(), I->op_end());
3743 
3744       // The sinking may have enabled other instructions to be sunk, so we will
3745       // need to iterate.
3746       Changed = true;
3747     }
3748   } while (Changed);
3749 }
3750 
3751 void InnerLoopVectorizer::fixNonInductionPHIs() {
3752   for (PHINode *OrigPhi : OrigPHIsToFix) {
3753     PHINode *NewPhi =
3754         cast<PHINode>(VectorLoopValueMap.getVectorValue(OrigPhi, 0));
3755     unsigned NumIncomingValues = OrigPhi->getNumIncomingValues();
3756 
3757     SmallVector<BasicBlock *, 2> ScalarBBPredecessors(
3758         predecessors(OrigPhi->getParent()));
3759     SmallVector<BasicBlock *, 2> VectorBBPredecessors(
3760         predecessors(NewPhi->getParent()));
3761     assert(ScalarBBPredecessors.size() == VectorBBPredecessors.size() &&
3762            "Scalar and Vector BB should have the same number of predecessors");
3763 
3764     // The insertion point in Builder may be invalidated by the time we get
3765     // here. Force the Builder insertion point to something valid so that we do
3766     // not run into issues during insertion point restore in
3767     // getOrCreateVectorValue calls below.
3768     Builder.SetInsertPoint(NewPhi);
3769 
3770     // The predecessor order is preserved and we can rely on mapping between
3771     // scalar and vector block predecessors.
3772     for (unsigned i = 0; i < NumIncomingValues; ++i) {
3773       BasicBlock *NewPredBB = VectorBBPredecessors[i];
3774 
3775       // When looking up the new scalar/vector values to fix up, use incoming
3776       // values from original phi.
3777       Value *ScIncV =
3778           OrigPhi->getIncomingValueForBlock(ScalarBBPredecessors[i]);
3779 
3780       // Scalar incoming value may need a broadcast
3781       Value *NewIncV = getOrCreateVectorValue(ScIncV, 0);
3782       NewPhi->addIncoming(NewIncV, NewPredBB);
3783     }
3784   }
3785 }
3786 
3787 void InnerLoopVectorizer::widenPHIInstruction(Instruction *PN, unsigned UF,
3788                                               unsigned VF) {
3789   PHINode *P = cast<PHINode>(PN);
3790   if (EnableVPlanNativePath) {
3791     // Currently we enter here in the VPlan-native path for non-induction
3792     // PHIs where all control flow is uniform. We simply widen these PHIs.
3793     // Create a vector phi with no operands - the vector phi operands will be
3794     // set at the end of vector code generation.
3795     Type *VecTy =
3796         (VF == 1) ? PN->getType() : VectorType::get(PN->getType(), VF);
3797     Value *VecPhi = Builder.CreatePHI(VecTy, PN->getNumOperands(), "vec.phi");
3798     VectorLoopValueMap.setVectorValue(P, 0, VecPhi);
3799     OrigPHIsToFix.push_back(P);
3800 
3801     return;
3802   }
3803 
3804   assert(PN->getParent() == OrigLoop->getHeader() &&
3805          "Non-header phis should have been handled elsewhere");
3806 
3807   // In order to support recurrences we need to be able to vectorize Phi nodes.
3808   // Phi nodes have cycles, so we need to vectorize them in two stages. This is
3809   // stage #1: We create a new vector PHI node with no incoming edges. We'll use
3810   // this value when we vectorize all of the instructions that use the PHI.
3811   if (Legal->isReductionVariable(P) || Legal->isFirstOrderRecurrence(P)) {
3812     for (unsigned Part = 0; Part < UF; ++Part) {
3813       // This is phase one of vectorizing PHIs.
3814       Type *VecTy =
3815           (VF == 1) ? PN->getType() : VectorType::get(PN->getType(), VF);
3816       Value *EntryPart = PHINode::Create(
3817           VecTy, 2, "vec.phi", &*LoopVectorBody->getFirstInsertionPt());
3818       VectorLoopValueMap.setVectorValue(P, Part, EntryPart);
3819     }
3820     return;
3821   }
3822 
3823   setDebugLocFromInst(Builder, P);
3824 
3825   // This PHINode must be an induction variable.
3826   // Make sure that we know about it.
3827   assert(Legal->getInductionVars()->count(P) && "Not an induction variable");
3828 
3829   InductionDescriptor II = Legal->getInductionVars()->lookup(P);
3830   const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout();
3831 
3832   // FIXME: The newly created binary instructions should contain nsw/nuw flags,
3833   // which can be found from the original scalar operations.
3834   switch (II.getKind()) {
3835   case InductionDescriptor::IK_NoInduction:
3836     llvm_unreachable("Unknown induction");
3837   case InductionDescriptor::IK_IntInduction:
3838   case InductionDescriptor::IK_FpInduction:
3839     llvm_unreachable("Integer/fp induction is handled elsewhere.");
3840   case InductionDescriptor::IK_PtrInduction: {
3841     // Handle the pointer induction variable case.
3842     assert(P->getType()->isPointerTy() && "Unexpected type.");
3843     // This is the normalized GEP that starts counting at zero.
3844     Value *PtrInd = Induction;
3845     PtrInd = Builder.CreateSExtOrTrunc(PtrInd, II.getStep()->getType());
3846     // Determine the number of scalars we need to generate for each unroll
3847     // iteration. If the instruction is uniform, we only need to generate the
3848     // first lane. Otherwise, we generate all VF values.
3849     unsigned Lanes = Cost->isUniformAfterVectorization(P, VF) ? 1 : VF;
3850     // These are the scalar results. Notice that we don't generate vector GEPs
3851     // because scalar GEPs result in better code.
3852     for (unsigned Part = 0; Part < UF; ++Part) {
3853       for (unsigned Lane = 0; Lane < Lanes; ++Lane) {
3854         Constant *Idx = ConstantInt::get(PtrInd->getType(), Lane + Part * VF);
3855         Value *GlobalIdx = Builder.CreateAdd(PtrInd, Idx);
3856         Value *SclrGep =
3857             emitTransformedIndex(Builder, GlobalIdx, PSE.getSE(), DL, II);
3858         SclrGep->setName("next.gep");
3859         VectorLoopValueMap.setScalarValue(P, {Part, Lane}, SclrGep);
3860       }
3861     }
3862     return;
3863   }
3864   }
3865 }
3866 
3867 /// A helper function for checking whether an integer division-related
3868 /// instruction may divide by zero (in which case it must be predicated if
3869 /// executed conditionally in the scalar code).
3870 /// TODO: It may be worthwhile to generalize and check isKnownNonZero().
3871 /// Non-zero divisors that are non compile-time constants will not be
3872 /// converted into multiplication, so we will still end up scalarizing
3873 /// the division, but can do so w/o predication.
3874 static bool mayDivideByZero(Instruction &I) {
3875   assert((I.getOpcode() == Instruction::UDiv ||
3876           I.getOpcode() == Instruction::SDiv ||
3877           I.getOpcode() == Instruction::URem ||
3878           I.getOpcode() == Instruction::SRem) &&
3879          "Unexpected instruction");
3880   Value *Divisor = I.getOperand(1);
3881   auto *CInt = dyn_cast<ConstantInt>(Divisor);
3882   return !CInt || CInt->isZero();
3883 }
3884 
3885 void InnerLoopVectorizer::widenInstruction(Instruction &I) {
3886   switch (I.getOpcode()) {
3887   case Instruction::Br:
3888   case Instruction::PHI:
3889     llvm_unreachable("This instruction is handled by a different recipe.");
3890   case Instruction::GetElementPtr: {
3891     // Construct a vector GEP by widening the operands of the scalar GEP as
3892     // necessary. We mark the vector GEP 'inbounds' if appropriate. A GEP
3893     // results in a vector of pointers when at least one operand of the GEP
3894     // is vector-typed. Thus, to keep the representation compact, we only use
3895     // vector-typed operands for loop-varying values.
3896     auto *GEP = cast<GetElementPtrInst>(&I);
3897 
3898     if (VF > 1 && OrigLoop->hasLoopInvariantOperands(GEP)) {
3899       // If we are vectorizing, but the GEP has only loop-invariant operands,
3900       // the GEP we build (by only using vector-typed operands for
3901       // loop-varying values) would be a scalar pointer. Thus, to ensure we
3902       // produce a vector of pointers, we need to either arbitrarily pick an
3903       // operand to broadcast, or broadcast a clone of the original GEP.
3904       // Here, we broadcast a clone of the original.
3905       //
3906       // TODO: If at some point we decide to scalarize instructions having
3907       //       loop-invariant operands, this special case will no longer be
3908       //       required. We would add the scalarization decision to
3909       //       collectLoopScalars() and teach getVectorValue() to broadcast
3910       //       the lane-zero scalar value.
3911       auto *Clone = Builder.Insert(GEP->clone());
3912       for (unsigned Part = 0; Part < UF; ++Part) {
3913         Value *EntryPart = Builder.CreateVectorSplat(VF, Clone);
3914         VectorLoopValueMap.setVectorValue(&I, Part, EntryPart);
3915         addMetadata(EntryPart, GEP);
3916       }
3917     } else {
3918       // If the GEP has at least one loop-varying operand, we are sure to
3919       // produce a vector of pointers. But if we are only unrolling, we want
3920       // to produce a scalar GEP for each unroll part. Thus, the GEP we
3921       // produce with the code below will be scalar (if VF == 1) or vector
3922       // (otherwise). Note that for the unroll-only case, we still maintain
3923       // values in the vector mapping with initVector, as we do for other
3924       // instructions.
3925       for (unsigned Part = 0; Part < UF; ++Part) {
3926         // The pointer operand of the new GEP. If it's loop-invariant, we
3927         // won't broadcast it.
3928         auto *Ptr =
3929             OrigLoop->isLoopInvariant(GEP->getPointerOperand())
3930                 ? GEP->getPointerOperand()
3931                 : getOrCreateVectorValue(GEP->getPointerOperand(), Part);
3932 
3933         // Collect all the indices for the new GEP. If any index is
3934         // loop-invariant, we won't broadcast it.
3935         SmallVector<Value *, 4> Indices;
3936         for (auto &U : make_range(GEP->idx_begin(), GEP->idx_end())) {
3937           if (OrigLoop->isLoopInvariant(U.get()))
3938             Indices.push_back(U.get());
3939           else
3940             Indices.push_back(getOrCreateVectorValue(U.get(), Part));
3941         }
3942 
3943         // Create the new GEP. Note that this GEP may be a scalar if VF == 1,
3944         // but it should be a vector, otherwise.
3945         auto *NewGEP = GEP->isInBounds()
3946                            ? Builder.CreateInBoundsGEP(Ptr, Indices)
3947                            : Builder.CreateGEP(Ptr, Indices);
3948         assert((VF == 1 || NewGEP->getType()->isVectorTy()) &&
3949                "NewGEP is not a pointer vector");
3950         VectorLoopValueMap.setVectorValue(&I, Part, NewGEP);
3951         addMetadata(NewGEP, GEP);
3952       }
3953     }
3954 
3955     break;
3956   }
3957   case Instruction::UDiv:
3958   case Instruction::SDiv:
3959   case Instruction::SRem:
3960   case Instruction::URem:
3961   case Instruction::Add:
3962   case Instruction::FAdd:
3963   case Instruction::Sub:
3964   case Instruction::FSub:
3965   case Instruction::Mul:
3966   case Instruction::FMul:
3967   case Instruction::FDiv:
3968   case Instruction::FRem:
3969   case Instruction::Shl:
3970   case Instruction::LShr:
3971   case Instruction::AShr:
3972   case Instruction::And:
3973   case Instruction::Or:
3974   case Instruction::Xor: {
3975     // Just widen binops.
3976     auto *BinOp = cast<BinaryOperator>(&I);
3977     setDebugLocFromInst(Builder, BinOp);
3978 
3979     for (unsigned Part = 0; Part < UF; ++Part) {
3980       Value *A = getOrCreateVectorValue(BinOp->getOperand(0), Part);
3981       Value *B = getOrCreateVectorValue(BinOp->getOperand(1), Part);
3982       Value *V = Builder.CreateBinOp(BinOp->getOpcode(), A, B);
3983 
3984       if (BinaryOperator *VecOp = dyn_cast<BinaryOperator>(V))
3985         VecOp->copyIRFlags(BinOp);
3986 
3987       // Use this vector value for all users of the original instruction.
3988       VectorLoopValueMap.setVectorValue(&I, Part, V);
3989       addMetadata(V, BinOp);
3990     }
3991 
3992     break;
3993   }
3994   case Instruction::Select: {
3995     // Widen selects.
3996     // If the selector is loop invariant we can create a select
3997     // instruction with a scalar condition. Otherwise, use vector-select.
3998     auto *SE = PSE.getSE();
3999     bool InvariantCond =
4000         SE->isLoopInvariant(PSE.getSCEV(I.getOperand(0)), OrigLoop);
4001     setDebugLocFromInst(Builder, &I);
4002 
4003     // The condition can be loop invariant  but still defined inside the
4004     // loop. This means that we can't just use the original 'cond' value.
4005     // We have to take the 'vectorized' value and pick the first lane.
4006     // Instcombine will make this a no-op.
4007 
4008     auto *ScalarCond = getOrCreateScalarValue(I.getOperand(0), {0, 0});
4009 
4010     for (unsigned Part = 0; Part < UF; ++Part) {
4011       Value *Cond = getOrCreateVectorValue(I.getOperand(0), Part);
4012       Value *Op0 = getOrCreateVectorValue(I.getOperand(1), Part);
4013       Value *Op1 = getOrCreateVectorValue(I.getOperand(2), Part);
4014       Value *Sel =
4015           Builder.CreateSelect(InvariantCond ? ScalarCond : Cond, Op0, Op1);
4016       VectorLoopValueMap.setVectorValue(&I, Part, Sel);
4017       addMetadata(Sel, &I);
4018     }
4019 
4020     break;
4021   }
4022 
4023   case Instruction::ICmp:
4024   case Instruction::FCmp: {
4025     // Widen compares. Generate vector compares.
4026     bool FCmp = (I.getOpcode() == Instruction::FCmp);
4027     auto *Cmp = dyn_cast<CmpInst>(&I);
4028     setDebugLocFromInst(Builder, Cmp);
4029     for (unsigned Part = 0; Part < UF; ++Part) {
4030       Value *A = getOrCreateVectorValue(Cmp->getOperand(0), Part);
4031       Value *B = getOrCreateVectorValue(Cmp->getOperand(1), Part);
4032       Value *C = nullptr;
4033       if (FCmp) {
4034         // Propagate fast math flags.
4035         IRBuilder<>::FastMathFlagGuard FMFG(Builder);
4036         Builder.setFastMathFlags(Cmp->getFastMathFlags());
4037         C = Builder.CreateFCmp(Cmp->getPredicate(), A, B);
4038       } else {
4039         C = Builder.CreateICmp(Cmp->getPredicate(), A, B);
4040       }
4041       VectorLoopValueMap.setVectorValue(&I, Part, C);
4042       addMetadata(C, &I);
4043     }
4044 
4045     break;
4046   }
4047 
4048   case Instruction::ZExt:
4049   case Instruction::SExt:
4050   case Instruction::FPToUI:
4051   case Instruction::FPToSI:
4052   case Instruction::FPExt:
4053   case Instruction::PtrToInt:
4054   case Instruction::IntToPtr:
4055   case Instruction::SIToFP:
4056   case Instruction::UIToFP:
4057   case Instruction::Trunc:
4058   case Instruction::FPTrunc:
4059   case Instruction::BitCast: {
4060     auto *CI = dyn_cast<CastInst>(&I);
4061     setDebugLocFromInst(Builder, CI);
4062 
4063     /// Vectorize casts.
4064     Type *DestTy =
4065         (VF == 1) ? CI->getType() : VectorType::get(CI->getType(), VF);
4066 
4067     for (unsigned Part = 0; Part < UF; ++Part) {
4068       Value *A = getOrCreateVectorValue(CI->getOperand(0), Part);
4069       Value *Cast = Builder.CreateCast(CI->getOpcode(), A, DestTy);
4070       VectorLoopValueMap.setVectorValue(&I, Part, Cast);
4071       addMetadata(Cast, &I);
4072     }
4073     break;
4074   }
4075 
4076   case Instruction::Call: {
4077     // Ignore dbg intrinsics.
4078     if (isa<DbgInfoIntrinsic>(I))
4079       break;
4080     setDebugLocFromInst(Builder, &I);
4081 
4082     Module *M = I.getParent()->getParent()->getParent();
4083     auto *CI = cast<CallInst>(&I);
4084 
4085     StringRef FnName = CI->getCalledFunction()->getName();
4086     Function *F = CI->getCalledFunction();
4087     Type *RetTy = ToVectorTy(CI->getType(), VF);
4088     SmallVector<Type *, 4> Tys;
4089     for (Value *ArgOperand : CI->arg_operands())
4090       Tys.push_back(ToVectorTy(ArgOperand->getType(), VF));
4091 
4092     Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
4093 
4094     // The flag shows whether we use Intrinsic or a usual Call for vectorized
4095     // version of the instruction.
4096     // Is it beneficial to perform intrinsic call compared to lib call?
4097     bool NeedToScalarize;
4098     unsigned CallCost = getVectorCallCost(CI, VF, *TTI, TLI, NeedToScalarize);
4099     bool UseVectorIntrinsic =
4100         ID && getVectorIntrinsicCost(CI, VF, *TTI, TLI) <= CallCost;
4101     assert((UseVectorIntrinsic || !NeedToScalarize) &&
4102            "Instruction should be scalarized elsewhere.");
4103 
4104     for (unsigned Part = 0; Part < UF; ++Part) {
4105       SmallVector<Value *, 4> Args;
4106       for (unsigned i = 0, ie = CI->getNumArgOperands(); i != ie; ++i) {
4107         Value *Arg = CI->getArgOperand(i);
4108         // Some intrinsics have a scalar argument - don't replace it with a
4109         // vector.
4110         if (!UseVectorIntrinsic || !hasVectorInstrinsicScalarOpd(ID, i))
4111           Arg = getOrCreateVectorValue(CI->getArgOperand(i), Part);
4112         Args.push_back(Arg);
4113       }
4114 
4115       Function *VectorF;
4116       if (UseVectorIntrinsic) {
4117         // Use vector version of the intrinsic.
4118         Type *TysForDecl[] = {CI->getType()};
4119         if (VF > 1)
4120           TysForDecl[0] = VectorType::get(CI->getType()->getScalarType(), VF);
4121         VectorF = Intrinsic::getDeclaration(M, ID, TysForDecl);
4122       } else {
4123         // Use vector version of the library call.
4124         StringRef VFnName = TLI->getVectorizedFunction(FnName, VF);
4125         assert(!VFnName.empty() && "Vector function name is empty.");
4126         VectorF = M->getFunction(VFnName);
4127         if (!VectorF) {
4128           // Generate a declaration
4129           FunctionType *FTy = FunctionType::get(RetTy, Tys, false);
4130           VectorF =
4131               Function::Create(FTy, Function::ExternalLinkage, VFnName, M);
4132           VectorF->copyAttributesFrom(F);
4133         }
4134       }
4135       assert(VectorF && "Can't create vector function.");
4136 
4137       SmallVector<OperandBundleDef, 1> OpBundles;
4138       CI->getOperandBundlesAsDefs(OpBundles);
4139       CallInst *V = Builder.CreateCall(VectorF, Args, OpBundles);
4140 
4141       if (isa<FPMathOperator>(V))
4142         V->copyFastMathFlags(CI);
4143 
4144       VectorLoopValueMap.setVectorValue(&I, Part, V);
4145       addMetadata(V, &I);
4146     }
4147 
4148     break;
4149   }
4150 
4151   default:
4152     // This instruction is not vectorized by simple widening.
4153     LLVM_DEBUG(dbgs() << "LV: Found an unhandled instruction: " << I);
4154     llvm_unreachable("Unhandled instruction!");
4155   } // end of switch.
4156 }
4157 
4158 void InnerLoopVectorizer::updateAnalysis() {
4159   // Forget the original basic block.
4160   PSE.getSE()->forgetLoop(OrigLoop);
4161 
4162   // DT is not kept up-to-date for outer loop vectorization
4163   if (EnableVPlanNativePath)
4164     return;
4165 
4166   // Update the dominator tree information.
4167   assert(DT->properlyDominates(LoopBypassBlocks.front(), LoopExitBlock) &&
4168          "Entry does not dominate exit.");
4169 
4170   DT->addNewBlock(LoopMiddleBlock,
4171                   LI->getLoopFor(LoopVectorBody)->getLoopLatch());
4172   DT->addNewBlock(LoopScalarPreHeader, LoopBypassBlocks[0]);
4173   DT->changeImmediateDominator(LoopScalarBody, LoopScalarPreHeader);
4174   DT->changeImmediateDominator(LoopExitBlock, LoopBypassBlocks[0]);
4175   assert(DT->verify(DominatorTree::VerificationLevel::Fast));
4176 }
4177 
4178 void LoopVectorizationCostModel::collectLoopScalars(unsigned VF) {
4179   // We should not collect Scalars more than once per VF. Right now, this
4180   // function is called from collectUniformsAndScalars(), which already does
4181   // this check. Collecting Scalars for VF=1 does not make any sense.
4182   assert(VF >= 2 && Scalars.find(VF) == Scalars.end() &&
4183          "This function should not be visited twice for the same VF");
4184 
4185   SmallSetVector<Instruction *, 8> Worklist;
4186 
4187   // These sets are used to seed the analysis with pointers used by memory
4188   // accesses that will remain scalar.
4189   SmallSetVector<Instruction *, 8> ScalarPtrs;
4190   SmallPtrSet<Instruction *, 8> PossibleNonScalarPtrs;
4191 
4192   // A helper that returns true if the use of Ptr by MemAccess will be scalar.
4193   // The pointer operands of loads and stores will be scalar as long as the
4194   // memory access is not a gather or scatter operation. The value operand of a
4195   // store will remain scalar if the store is scalarized.
4196   auto isScalarUse = [&](Instruction *MemAccess, Value *Ptr) {
4197     InstWidening WideningDecision = getWideningDecision(MemAccess, VF);
4198     assert(WideningDecision != CM_Unknown &&
4199            "Widening decision should be ready at this moment");
4200     if (auto *Store = dyn_cast<StoreInst>(MemAccess))
4201       if (Ptr == Store->getValueOperand())
4202         return WideningDecision == CM_Scalarize;
4203     assert(Ptr == getLoadStorePointerOperand(MemAccess) &&
4204            "Ptr is neither a value or pointer operand");
4205     return WideningDecision != CM_GatherScatter;
4206   };
4207 
4208   // A helper that returns true if the given value is a bitcast or
4209   // getelementptr instruction contained in the loop.
4210   auto isLoopVaryingBitCastOrGEP = [&](Value *V) {
4211     return ((isa<BitCastInst>(V) && V->getType()->isPointerTy()) ||
4212             isa<GetElementPtrInst>(V)) &&
4213            !TheLoop->isLoopInvariant(V);
4214   };
4215 
4216   // A helper that evaluates a memory access's use of a pointer. If the use
4217   // will be a scalar use, and the pointer is only used by memory accesses, we
4218   // place the pointer in ScalarPtrs. Otherwise, the pointer is placed in
4219   // PossibleNonScalarPtrs.
4220   auto evaluatePtrUse = [&](Instruction *MemAccess, Value *Ptr) {
4221     // We only care about bitcast and getelementptr instructions contained in
4222     // the loop.
4223     if (!isLoopVaryingBitCastOrGEP(Ptr))
4224       return;
4225 
4226     // If the pointer has already been identified as scalar (e.g., if it was
4227     // also identified as uniform), there's nothing to do.
4228     auto *I = cast<Instruction>(Ptr);
4229     if (Worklist.count(I))
4230       return;
4231 
4232     // If the use of the pointer will be a scalar use, and all users of the
4233     // pointer are memory accesses, place the pointer in ScalarPtrs. Otherwise,
4234     // place the pointer in PossibleNonScalarPtrs.
4235     if (isScalarUse(MemAccess, Ptr) && llvm::all_of(I->users(), [&](User *U) {
4236           return isa<LoadInst>(U) || isa<StoreInst>(U);
4237         }))
4238       ScalarPtrs.insert(I);
4239     else
4240       PossibleNonScalarPtrs.insert(I);
4241   };
4242 
4243   // We seed the scalars analysis with three classes of instructions: (1)
4244   // instructions marked uniform-after-vectorization, (2) bitcast and
4245   // getelementptr instructions used by memory accesses requiring a scalar use,
4246   // and (3) pointer induction variables and their update instructions (we
4247   // currently only scalarize these).
4248   //
4249   // (1) Add to the worklist all instructions that have been identified as
4250   // uniform-after-vectorization.
4251   Worklist.insert(Uniforms[VF].begin(), Uniforms[VF].end());
4252 
4253   // (2) Add to the worklist all bitcast and getelementptr instructions used by
4254   // memory accesses requiring a scalar use. The pointer operands of loads and
4255   // stores will be scalar as long as the memory accesses is not a gather or
4256   // scatter operation. The value operand of a store will remain scalar if the
4257   // store is scalarized.
4258   for (auto *BB : TheLoop->blocks())
4259     for (auto &I : *BB) {
4260       if (auto *Load = dyn_cast<LoadInst>(&I)) {
4261         evaluatePtrUse(Load, Load->getPointerOperand());
4262       } else if (auto *Store = dyn_cast<StoreInst>(&I)) {
4263         evaluatePtrUse(Store, Store->getPointerOperand());
4264         evaluatePtrUse(Store, Store->getValueOperand());
4265       }
4266     }
4267   for (auto *I : ScalarPtrs)
4268     if (PossibleNonScalarPtrs.find(I) == PossibleNonScalarPtrs.end()) {
4269       LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *I << "\n");
4270       Worklist.insert(I);
4271     }
4272 
4273   // (3) Add to the worklist all pointer induction variables and their update
4274   // instructions.
4275   //
4276   // TODO: Once we are able to vectorize pointer induction variables we should
4277   //       no longer insert them into the worklist here.
4278   auto *Latch = TheLoop->getLoopLatch();
4279   for (auto &Induction : *Legal->getInductionVars()) {
4280     auto *Ind = Induction.first;
4281     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
4282     if (Induction.second.getKind() != InductionDescriptor::IK_PtrInduction)
4283       continue;
4284     Worklist.insert(Ind);
4285     Worklist.insert(IndUpdate);
4286     LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n");
4287     LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate
4288                       << "\n");
4289   }
4290 
4291   // Insert the forced scalars.
4292   // FIXME: Currently widenPHIInstruction() often creates a dead vector
4293   // induction variable when the PHI user is scalarized.
4294   auto ForcedScalar = ForcedScalars.find(VF);
4295   if (ForcedScalar != ForcedScalars.end())
4296     for (auto *I : ForcedScalar->second)
4297       Worklist.insert(I);
4298 
4299   // Expand the worklist by looking through any bitcasts and getelementptr
4300   // instructions we've already identified as scalar. This is similar to the
4301   // expansion step in collectLoopUniforms(); however, here we're only
4302   // expanding to include additional bitcasts and getelementptr instructions.
4303   unsigned Idx = 0;
4304   while (Idx != Worklist.size()) {
4305     Instruction *Dst = Worklist[Idx++];
4306     if (!isLoopVaryingBitCastOrGEP(Dst->getOperand(0)))
4307       continue;
4308     auto *Src = cast<Instruction>(Dst->getOperand(0));
4309     if (llvm::all_of(Src->users(), [&](User *U) -> bool {
4310           auto *J = cast<Instruction>(U);
4311           return !TheLoop->contains(J) || Worklist.count(J) ||
4312                  ((isa<LoadInst>(J) || isa<StoreInst>(J)) &&
4313                   isScalarUse(J, Src));
4314         })) {
4315       Worklist.insert(Src);
4316       LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Src << "\n");
4317     }
4318   }
4319 
4320   // An induction variable will remain scalar if all users of the induction
4321   // variable and induction variable update remain scalar.
4322   for (auto &Induction : *Legal->getInductionVars()) {
4323     auto *Ind = Induction.first;
4324     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
4325 
4326     // We already considered pointer induction variables, so there's no reason
4327     // to look at their users again.
4328     //
4329     // TODO: Once we are able to vectorize pointer induction variables we
4330     //       should no longer skip over them here.
4331     if (Induction.second.getKind() == InductionDescriptor::IK_PtrInduction)
4332       continue;
4333 
4334     // Determine if all users of the induction variable are scalar after
4335     // vectorization.
4336     auto ScalarInd = llvm::all_of(Ind->users(), [&](User *U) -> bool {
4337       auto *I = cast<Instruction>(U);
4338       return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I);
4339     });
4340     if (!ScalarInd)
4341       continue;
4342 
4343     // Determine if all users of the induction variable update instruction are
4344     // scalar after vectorization.
4345     auto ScalarIndUpdate =
4346         llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
4347           auto *I = cast<Instruction>(U);
4348           return I == Ind || !TheLoop->contains(I) || Worklist.count(I);
4349         });
4350     if (!ScalarIndUpdate)
4351       continue;
4352 
4353     // The induction variable and its update instruction will remain scalar.
4354     Worklist.insert(Ind);
4355     Worklist.insert(IndUpdate);
4356     LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n");
4357     LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate
4358                       << "\n");
4359   }
4360 
4361   Scalars[VF].insert(Worklist.begin(), Worklist.end());
4362 }
4363 
4364 bool LoopVectorizationCostModel::isScalarWithPredication(Instruction *I, unsigned VF) {
4365   if (!blockNeedsPredication(I->getParent()))
4366     return false;
4367   switch(I->getOpcode()) {
4368   default:
4369     break;
4370   case Instruction::Load:
4371   case Instruction::Store: {
4372     if (!Legal->isMaskRequired(I))
4373       return false;
4374     auto *Ptr = getLoadStorePointerOperand(I);
4375     auto *Ty = getMemInstValueType(I);
4376     // We have already decided how to vectorize this instruction, get that
4377     // result.
4378     if (VF > 1) {
4379       InstWidening WideningDecision = getWideningDecision(I, VF);
4380       assert(WideningDecision != CM_Unknown &&
4381              "Widening decision should be ready at this moment");
4382       return WideningDecision == CM_Scalarize;
4383     }
4384     return isa<LoadInst>(I) ?
4385         !(isLegalMaskedLoad(Ty, Ptr)  || isLegalMaskedGather(Ty))
4386       : !(isLegalMaskedStore(Ty, Ptr) || isLegalMaskedScatter(Ty));
4387   }
4388   case Instruction::UDiv:
4389   case Instruction::SDiv:
4390   case Instruction::SRem:
4391   case Instruction::URem:
4392     return mayDivideByZero(*I);
4393   }
4394   return false;
4395 }
4396 
4397 bool LoopVectorizationCostModel::interleavedAccessCanBeWidened(Instruction *I,
4398                                                                unsigned VF) {
4399   assert(isAccessInterleaved(I) && "Expecting interleaved access.");
4400   assert(getWideningDecision(I, VF) == CM_Unknown &&
4401          "Decision should not be set yet.");
4402   auto *Group = getInterleavedAccessGroup(I);
4403   assert(Group && "Must have a group.");
4404 
4405   // Check if masking is required.
4406   // A Group may need masking for one of two reasons: it resides in a block that
4407   // needs predication, or it was decided to use masking to deal with gaps.
4408   bool PredicatedAccessRequiresMasking =
4409       Legal->blockNeedsPredication(I->getParent()) && Legal->isMaskRequired(I);
4410   bool AccessWithGapsRequiresMasking =
4411       Group->requiresScalarEpilogue() && !IsScalarEpilogueAllowed;
4412   if (!PredicatedAccessRequiresMasking && !AccessWithGapsRequiresMasking)
4413     return true;
4414 
4415   // If masked interleaving is required, we expect that the user/target had
4416   // enabled it, because otherwise it either wouldn't have been created or
4417   // it should have been invalidated by the CostModel.
4418   assert(useMaskedInterleavedAccesses(TTI) &&
4419          "Masked interleave-groups for predicated accesses are not enabled.");
4420 
4421   auto *Ty = getMemInstValueType(I);
4422   return isa<LoadInst>(I) ? TTI.isLegalMaskedLoad(Ty)
4423                           : TTI.isLegalMaskedStore(Ty);
4424 }
4425 
4426 bool LoopVectorizationCostModel::memoryInstructionCanBeWidened(Instruction *I,
4427                                                                unsigned VF) {
4428   // Get and ensure we have a valid memory instruction.
4429   LoadInst *LI = dyn_cast<LoadInst>(I);
4430   StoreInst *SI = dyn_cast<StoreInst>(I);
4431   assert((LI || SI) && "Invalid memory instruction");
4432 
4433   auto *Ptr = getLoadStorePointerOperand(I);
4434 
4435   // In order to be widened, the pointer should be consecutive, first of all.
4436   if (!Legal->isConsecutivePtr(Ptr))
4437     return false;
4438 
4439   // If the instruction is a store located in a predicated block, it will be
4440   // scalarized.
4441   if (isScalarWithPredication(I))
4442     return false;
4443 
4444   // If the instruction's allocated size doesn't equal it's type size, it
4445   // requires padding and will be scalarized.
4446   auto &DL = I->getModule()->getDataLayout();
4447   auto *ScalarTy = LI ? LI->getType() : SI->getValueOperand()->getType();
4448   if (hasIrregularType(ScalarTy, DL, VF))
4449     return false;
4450 
4451   return true;
4452 }
4453 
4454 void LoopVectorizationCostModel::collectLoopUniforms(unsigned VF) {
4455   // We should not collect Uniforms more than once per VF. Right now,
4456   // this function is called from collectUniformsAndScalars(), which
4457   // already does this check. Collecting Uniforms for VF=1 does not make any
4458   // sense.
4459 
4460   assert(VF >= 2 && Uniforms.find(VF) == Uniforms.end() &&
4461          "This function should not be visited twice for the same VF");
4462 
4463   // Visit the list of Uniforms. If we'll not find any uniform value, we'll
4464   // not analyze again.  Uniforms.count(VF) will return 1.
4465   Uniforms[VF].clear();
4466 
4467   // We now know that the loop is vectorizable!
4468   // Collect instructions inside the loop that will remain uniform after
4469   // vectorization.
4470 
4471   // Global values, params and instructions outside of current loop are out of
4472   // scope.
4473   auto isOutOfScope = [&](Value *V) -> bool {
4474     Instruction *I = dyn_cast<Instruction>(V);
4475     return (!I || !TheLoop->contains(I));
4476   };
4477 
4478   SetVector<Instruction *> Worklist;
4479   BasicBlock *Latch = TheLoop->getLoopLatch();
4480 
4481   // Start with the conditional branch. If the branch condition is an
4482   // instruction contained in the loop that is only used by the branch, it is
4483   // uniform.
4484   auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0));
4485   if (Cmp && TheLoop->contains(Cmp) && Cmp->hasOneUse()) {
4486     Worklist.insert(Cmp);
4487     LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *Cmp << "\n");
4488   }
4489 
4490   // Holds consecutive and consecutive-like pointers. Consecutive-like pointers
4491   // are pointers that are treated like consecutive pointers during
4492   // vectorization. The pointer operands of interleaved accesses are an
4493   // example.
4494   SmallSetVector<Instruction *, 8> ConsecutiveLikePtrs;
4495 
4496   // Holds pointer operands of instructions that are possibly non-uniform.
4497   SmallPtrSet<Instruction *, 8> PossibleNonUniformPtrs;
4498 
4499   auto isUniformDecision = [&](Instruction *I, unsigned VF) {
4500     InstWidening WideningDecision = getWideningDecision(I, VF);
4501     assert(WideningDecision != CM_Unknown &&
4502            "Widening decision should be ready at this moment");
4503 
4504     return (WideningDecision == CM_Widen ||
4505             WideningDecision == CM_Widen_Reverse ||
4506             WideningDecision == CM_Interleave);
4507   };
4508   // Iterate over the instructions in the loop, and collect all
4509   // consecutive-like pointer operands in ConsecutiveLikePtrs. If it's possible
4510   // that a consecutive-like pointer operand will be scalarized, we collect it
4511   // in PossibleNonUniformPtrs instead. We use two sets here because a single
4512   // getelementptr instruction can be used by both vectorized and scalarized
4513   // memory instructions. For example, if a loop loads and stores from the same
4514   // location, but the store is conditional, the store will be scalarized, and
4515   // the getelementptr won't remain uniform.
4516   for (auto *BB : TheLoop->blocks())
4517     for (auto &I : *BB) {
4518       // If there's no pointer operand, there's nothing to do.
4519       auto *Ptr = dyn_cast_or_null<Instruction>(getLoadStorePointerOperand(&I));
4520       if (!Ptr)
4521         continue;
4522 
4523       // True if all users of Ptr are memory accesses that have Ptr as their
4524       // pointer operand.
4525       auto UsersAreMemAccesses =
4526           llvm::all_of(Ptr->users(), [&](User *U) -> bool {
4527             return getLoadStorePointerOperand(U) == Ptr;
4528           });
4529 
4530       // Ensure the memory instruction will not be scalarized or used by
4531       // gather/scatter, making its pointer operand non-uniform. If the pointer
4532       // operand is used by any instruction other than a memory access, we
4533       // conservatively assume the pointer operand may be non-uniform.
4534       if (!UsersAreMemAccesses || !isUniformDecision(&I, VF))
4535         PossibleNonUniformPtrs.insert(Ptr);
4536 
4537       // If the memory instruction will be vectorized and its pointer operand
4538       // is consecutive-like, or interleaving - the pointer operand should
4539       // remain uniform.
4540       else
4541         ConsecutiveLikePtrs.insert(Ptr);
4542     }
4543 
4544   // Add to the Worklist all consecutive and consecutive-like pointers that
4545   // aren't also identified as possibly non-uniform.
4546   for (auto *V : ConsecutiveLikePtrs)
4547     if (PossibleNonUniformPtrs.find(V) == PossibleNonUniformPtrs.end()) {
4548       LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *V << "\n");
4549       Worklist.insert(V);
4550     }
4551 
4552   // Expand Worklist in topological order: whenever a new instruction
4553   // is added , its users should be already inside Worklist.  It ensures
4554   // a uniform instruction will only be used by uniform instructions.
4555   unsigned idx = 0;
4556   while (idx != Worklist.size()) {
4557     Instruction *I = Worklist[idx++];
4558 
4559     for (auto OV : I->operand_values()) {
4560       // isOutOfScope operands cannot be uniform instructions.
4561       if (isOutOfScope(OV))
4562         continue;
4563       // First order recurrence Phi's should typically be considered
4564       // non-uniform.
4565       auto *OP = dyn_cast<PHINode>(OV);
4566       if (OP && Legal->isFirstOrderRecurrence(OP))
4567         continue;
4568       // If all the users of the operand are uniform, then add the
4569       // operand into the uniform worklist.
4570       auto *OI = cast<Instruction>(OV);
4571       if (llvm::all_of(OI->users(), [&](User *U) -> bool {
4572             auto *J = cast<Instruction>(U);
4573             return Worklist.count(J) ||
4574                    (OI == getLoadStorePointerOperand(J) &&
4575                     isUniformDecision(J, VF));
4576           })) {
4577         Worklist.insert(OI);
4578         LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *OI << "\n");
4579       }
4580     }
4581   }
4582 
4583   // Returns true if Ptr is the pointer operand of a memory access instruction
4584   // I, and I is known to not require scalarization.
4585   auto isVectorizedMemAccessUse = [&](Instruction *I, Value *Ptr) -> bool {
4586     return getLoadStorePointerOperand(I) == Ptr && isUniformDecision(I, VF);
4587   };
4588 
4589   // For an instruction to be added into Worklist above, all its users inside
4590   // the loop should also be in Worklist. However, this condition cannot be
4591   // true for phi nodes that form a cyclic dependence. We must process phi
4592   // nodes separately. An induction variable will remain uniform if all users
4593   // of the induction variable and induction variable update remain uniform.
4594   // The code below handles both pointer and non-pointer induction variables.
4595   for (auto &Induction : *Legal->getInductionVars()) {
4596     auto *Ind = Induction.first;
4597     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
4598 
4599     // Determine if all users of the induction variable are uniform after
4600     // vectorization.
4601     auto UniformInd = llvm::all_of(Ind->users(), [&](User *U) -> bool {
4602       auto *I = cast<Instruction>(U);
4603       return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) ||
4604              isVectorizedMemAccessUse(I, Ind);
4605     });
4606     if (!UniformInd)
4607       continue;
4608 
4609     // Determine if all users of the induction variable update instruction are
4610     // uniform after vectorization.
4611     auto UniformIndUpdate =
4612         llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
4613           auto *I = cast<Instruction>(U);
4614           return I == Ind || !TheLoop->contains(I) || Worklist.count(I) ||
4615                  isVectorizedMemAccessUse(I, IndUpdate);
4616         });
4617     if (!UniformIndUpdate)
4618       continue;
4619 
4620     // The induction variable and its update instruction will remain uniform.
4621     Worklist.insert(Ind);
4622     Worklist.insert(IndUpdate);
4623     LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *Ind << "\n");
4624     LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *IndUpdate
4625                       << "\n");
4626   }
4627 
4628   Uniforms[VF].insert(Worklist.begin(), Worklist.end());
4629 }
4630 
4631 Optional<unsigned> LoopVectorizationCostModel::computeMaxVF(bool OptForSize) {
4632   if (Legal->getRuntimePointerChecking()->Need && TTI.hasBranchDivergence()) {
4633     // TODO: It may by useful to do since it's still likely to be dynamically
4634     // uniform if the target can skip.
4635     LLVM_DEBUG(
4636         dbgs() << "LV: Not inserting runtime ptr check for divergent target");
4637 
4638     ORE->emit(
4639       createMissedAnalysis("CantVersionLoopWithDivergentTarget")
4640       << "runtime pointer checks needed. Not enabled for divergent target");
4641 
4642     return None;
4643   }
4644 
4645   unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop);
4646   if (!OptForSize) // Remaining checks deal with scalar loop when OptForSize.
4647     return computeFeasibleMaxVF(OptForSize, TC);
4648 
4649   if (Legal->getRuntimePointerChecking()->Need) {
4650     ORE->emit(createMissedAnalysis("CantVersionLoopWithOptForSize")
4651               << "runtime pointer checks needed. Enable vectorization of this "
4652                  "loop with '#pragma clang loop vectorize(enable)' when "
4653                  "compiling with -Os/-Oz");
4654     LLVM_DEBUG(
4655         dbgs()
4656         << "LV: Aborting. Runtime ptr check is required with -Os/-Oz.\n");
4657     return None;
4658   }
4659 
4660   if (!PSE.getUnionPredicate().getPredicates().empty()) {
4661     ORE->emit(createMissedAnalysis("CantVersionLoopWithOptForSize")
4662               << "runtime SCEV checks needed. Enable vectorization of this "
4663                  "loop with '#pragma clang loop vectorize(enable)' when "
4664                  "compiling with -Os/-Oz");
4665     LLVM_DEBUG(
4666         dbgs()
4667         << "LV: Aborting. Runtime SCEV check is required with -Os/-Oz.\n");
4668     return None;
4669   }
4670 
4671   // FIXME: Avoid specializing for stride==1 instead of bailing out.
4672   if (!Legal->getLAI()->getSymbolicStrides().empty()) {
4673     ORE->emit(createMissedAnalysis("CantVersionLoopWithOptForSize")
4674               << "runtime stride == 1 checks needed. Enable vectorization of "
4675                  "this loop with '#pragma clang loop vectorize(enable)' when "
4676                  "compiling with -Os/-Oz");
4677     LLVM_DEBUG(
4678         dbgs()
4679         << "LV: Aborting. Runtime stride check is required with -Os/-Oz.\n");
4680     return None;
4681   }
4682 
4683   // If we optimize the program for size, avoid creating the tail loop.
4684   LLVM_DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n');
4685 
4686   if (TC == 1) {
4687     ORE->emit(createMissedAnalysis("SingleIterationLoop")
4688               << "loop trip count is one, irrelevant for vectorization");
4689     LLVM_DEBUG(dbgs() << "LV: Aborting, single iteration (non) loop.\n");
4690     return None;
4691   }
4692 
4693   // Record that scalar epilogue is not allowed.
4694   LLVM_DEBUG(dbgs() << "LV: Not allowing scalar epilogue due to -Os/-Oz.\n");
4695 
4696   IsScalarEpilogueAllowed = !OptForSize;
4697 
4698   // We don't create an epilogue when optimizing for size.
4699   // Invalidate interleave groups that require an epilogue if we can't mask
4700   // the interleave-group.
4701   if (!useMaskedInterleavedAccesses(TTI))
4702     InterleaveInfo.invalidateGroupsRequiringScalarEpilogue();
4703 
4704   unsigned MaxVF = computeFeasibleMaxVF(OptForSize, TC);
4705 
4706   if (TC > 0 && TC % MaxVF == 0) {
4707     LLVM_DEBUG(dbgs() << "LV: No tail will remain for any chosen VF.\n");
4708     return MaxVF;
4709   }
4710 
4711   // If we don't know the precise trip count, or if the trip count that we
4712   // found modulo the vectorization factor is not zero, try to fold the tail
4713   // by masking.
4714   // FIXME: look for a smaller MaxVF that does divide TC rather than masking.
4715   if (Legal->canFoldTailByMasking()) {
4716     FoldTailByMasking = true;
4717     return MaxVF;
4718   }
4719 
4720   if (TC == 0) {
4721     ORE->emit(
4722         createMissedAnalysis("UnknownLoopCountComplexCFG")
4723         << "unable to calculate the loop count due to complex control flow");
4724     return None;
4725   }
4726 
4727   ORE->emit(createMissedAnalysis("NoTailLoopWithOptForSize")
4728             << "cannot optimize for size and vectorize at the same time. "
4729                "Enable vectorization of this loop with '#pragma clang loop "
4730                "vectorize(enable)' when compiling with -Os/-Oz");
4731   return None;
4732 }
4733 
4734 unsigned
4735 LoopVectorizationCostModel::computeFeasibleMaxVF(bool OptForSize,
4736                                                  unsigned ConstTripCount) {
4737   MinBWs = computeMinimumValueSizes(TheLoop->getBlocks(), *DB, &TTI);
4738   unsigned SmallestType, WidestType;
4739   std::tie(SmallestType, WidestType) = getSmallestAndWidestTypes();
4740   unsigned WidestRegister = TTI.getRegisterBitWidth(true);
4741 
4742   // Get the maximum safe dependence distance in bits computed by LAA.
4743   // It is computed by MaxVF * sizeOf(type) * 8, where type is taken from
4744   // the memory accesses that is most restrictive (involved in the smallest
4745   // dependence distance).
4746   unsigned MaxSafeRegisterWidth = Legal->getMaxSafeRegisterWidth();
4747 
4748   WidestRegister = std::min(WidestRegister, MaxSafeRegisterWidth);
4749 
4750   unsigned MaxVectorSize = WidestRegister / WidestType;
4751 
4752   LLVM_DEBUG(dbgs() << "LV: The Smallest and Widest types: " << SmallestType
4753                     << " / " << WidestType << " bits.\n");
4754   LLVM_DEBUG(dbgs() << "LV: The Widest register safe to use is: "
4755                     << WidestRegister << " bits.\n");
4756 
4757   assert(MaxVectorSize <= 256 && "Did not expect to pack so many elements"
4758                                  " into one vector!");
4759   if (MaxVectorSize == 0) {
4760     LLVM_DEBUG(dbgs() << "LV: The target has no vector registers.\n");
4761     MaxVectorSize = 1;
4762     return MaxVectorSize;
4763   } else if (ConstTripCount && ConstTripCount < MaxVectorSize &&
4764              isPowerOf2_32(ConstTripCount)) {
4765     // We need to clamp the VF to be the ConstTripCount. There is no point in
4766     // choosing a higher viable VF as done in the loop below.
4767     LLVM_DEBUG(dbgs() << "LV: Clamping the MaxVF to the constant trip count: "
4768                       << ConstTripCount << "\n");
4769     MaxVectorSize = ConstTripCount;
4770     return MaxVectorSize;
4771   }
4772 
4773   unsigned MaxVF = MaxVectorSize;
4774   if (TTI.shouldMaximizeVectorBandwidth(OptForSize) ||
4775       (MaximizeBandwidth && !OptForSize)) {
4776     // Collect all viable vectorization factors larger than the default MaxVF
4777     // (i.e. MaxVectorSize).
4778     SmallVector<unsigned, 8> VFs;
4779     unsigned NewMaxVectorSize = WidestRegister / SmallestType;
4780     for (unsigned VS = MaxVectorSize * 2; VS <= NewMaxVectorSize; VS *= 2)
4781       VFs.push_back(VS);
4782 
4783     // For each VF calculate its register usage.
4784     auto RUs = calculateRegisterUsage(VFs);
4785 
4786     // Select the largest VF which doesn't require more registers than existing
4787     // ones.
4788     unsigned TargetNumRegisters = TTI.getNumberOfRegisters(true);
4789     for (int i = RUs.size() - 1; i >= 0; --i) {
4790       if (RUs[i].MaxLocalUsers <= TargetNumRegisters) {
4791         MaxVF = VFs[i];
4792         break;
4793       }
4794     }
4795     if (unsigned MinVF = TTI.getMinimumVF(SmallestType)) {
4796       if (MaxVF < MinVF) {
4797         LLVM_DEBUG(dbgs() << "LV: Overriding calculated MaxVF(" << MaxVF
4798                           << ") with target's minimum: " << MinVF << '\n');
4799         MaxVF = MinVF;
4800       }
4801     }
4802   }
4803   return MaxVF;
4804 }
4805 
4806 VectorizationFactor
4807 LoopVectorizationCostModel::selectVectorizationFactor(unsigned MaxVF) {
4808   float Cost = expectedCost(1).first;
4809   const float ScalarCost = Cost;
4810   unsigned Width = 1;
4811   LLVM_DEBUG(dbgs() << "LV: Scalar loop costs: " << (int)ScalarCost << ".\n");
4812 
4813   bool ForceVectorization = Hints->getForce() == LoopVectorizeHints::FK_Enabled;
4814   if (ForceVectorization && MaxVF > 1) {
4815     // Ignore scalar width, because the user explicitly wants vectorization.
4816     // Initialize cost to max so that VF = 2 is, at least, chosen during cost
4817     // evaluation.
4818     Cost = std::numeric_limits<float>::max();
4819   }
4820 
4821   for (unsigned i = 2; i <= MaxVF; i *= 2) {
4822     // Notice that the vector loop needs to be executed less times, so
4823     // we need to divide the cost of the vector loops by the width of
4824     // the vector elements.
4825     VectorizationCostTy C = expectedCost(i);
4826     float VectorCost = C.first / (float)i;
4827     LLVM_DEBUG(dbgs() << "LV: Vector loop of width " << i
4828                       << " costs: " << (int)VectorCost << ".\n");
4829     if (!C.second && !ForceVectorization) {
4830       LLVM_DEBUG(
4831           dbgs() << "LV: Not considering vector loop of width " << i
4832                  << " because it will not generate any vector instructions.\n");
4833       continue;
4834     }
4835     if (VectorCost < Cost) {
4836       Cost = VectorCost;
4837       Width = i;
4838     }
4839   }
4840 
4841   if (!EnableCondStoresVectorization && NumPredStores) {
4842     ORE->emit(createMissedAnalysis("ConditionalStore")
4843               << "store that is conditionally executed prevents vectorization");
4844     LLVM_DEBUG(
4845         dbgs() << "LV: No vectorization. There are conditional stores.\n");
4846     Width = 1;
4847     Cost = ScalarCost;
4848   }
4849 
4850   LLVM_DEBUG(if (ForceVectorization && Width > 1 && Cost >= ScalarCost) dbgs()
4851              << "LV: Vectorization seems to be not beneficial, "
4852              << "but was forced by a user.\n");
4853   LLVM_DEBUG(dbgs() << "LV: Selecting VF: " << Width << ".\n");
4854   VectorizationFactor Factor = {Width, (unsigned)(Width * Cost)};
4855   return Factor;
4856 }
4857 
4858 std::pair<unsigned, unsigned>
4859 LoopVectorizationCostModel::getSmallestAndWidestTypes() {
4860   unsigned MinWidth = -1U;
4861   unsigned MaxWidth = 8;
4862   const DataLayout &DL = TheFunction->getParent()->getDataLayout();
4863 
4864   // For each block.
4865   for (BasicBlock *BB : TheLoop->blocks()) {
4866     // For each instruction in the loop.
4867     for (Instruction &I : BB->instructionsWithoutDebug()) {
4868       Type *T = I.getType();
4869 
4870       // Skip ignored values.
4871       if (ValuesToIgnore.find(&I) != ValuesToIgnore.end())
4872         continue;
4873 
4874       // Only examine Loads, Stores and PHINodes.
4875       if (!isa<LoadInst>(I) && !isa<StoreInst>(I) && !isa<PHINode>(I))
4876         continue;
4877 
4878       // Examine PHI nodes that are reduction variables. Update the type to
4879       // account for the recurrence type.
4880       if (auto *PN = dyn_cast<PHINode>(&I)) {
4881         if (!Legal->isReductionVariable(PN))
4882           continue;
4883         RecurrenceDescriptor RdxDesc = (*Legal->getReductionVars())[PN];
4884         T = RdxDesc.getRecurrenceType();
4885       }
4886 
4887       // Examine the stored values.
4888       if (auto *ST = dyn_cast<StoreInst>(&I))
4889         T = ST->getValueOperand()->getType();
4890 
4891       // Ignore loaded pointer types and stored pointer types that are not
4892       // vectorizable.
4893       //
4894       // FIXME: The check here attempts to predict whether a load or store will
4895       //        be vectorized. We only know this for certain after a VF has
4896       //        been selected. Here, we assume that if an access can be
4897       //        vectorized, it will be. We should also look at extending this
4898       //        optimization to non-pointer types.
4899       //
4900       if (T->isPointerTy() && !isConsecutiveLoadOrStore(&I) &&
4901           !isAccessInterleaved(&I) && !isLegalGatherOrScatter(&I))
4902         continue;
4903 
4904       MinWidth = std::min(MinWidth,
4905                           (unsigned)DL.getTypeSizeInBits(T->getScalarType()));
4906       MaxWidth = std::max(MaxWidth,
4907                           (unsigned)DL.getTypeSizeInBits(T->getScalarType()));
4908     }
4909   }
4910 
4911   return {MinWidth, MaxWidth};
4912 }
4913 
4914 unsigned LoopVectorizationCostModel::selectInterleaveCount(bool OptForSize,
4915                                                            unsigned VF,
4916                                                            unsigned LoopCost) {
4917   // -- The interleave heuristics --
4918   // We interleave the loop in order to expose ILP and reduce the loop overhead.
4919   // There are many micro-architectural considerations that we can't predict
4920   // at this level. For example, frontend pressure (on decode or fetch) due to
4921   // code size, or the number and capabilities of the execution ports.
4922   //
4923   // We use the following heuristics to select the interleave count:
4924   // 1. If the code has reductions, then we interleave to break the cross
4925   // iteration dependency.
4926   // 2. If the loop is really small, then we interleave to reduce the loop
4927   // overhead.
4928   // 3. We don't interleave if we think that we will spill registers to memory
4929   // due to the increased register pressure.
4930 
4931   // When we optimize for size, we don't interleave.
4932   if (OptForSize)
4933     return 1;
4934 
4935   // We used the distance for the interleave count.
4936   if (Legal->getMaxSafeDepDistBytes() != -1U)
4937     return 1;
4938 
4939   // Do not interleave loops with a relatively small trip count.
4940   unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop);
4941   if (TC > 1 && TC < TinyTripCountInterleaveThreshold)
4942     return 1;
4943 
4944   unsigned TargetNumRegisters = TTI.getNumberOfRegisters(VF > 1);
4945   LLVM_DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters
4946                     << " registers\n");
4947 
4948   if (VF == 1) {
4949     if (ForceTargetNumScalarRegs.getNumOccurrences() > 0)
4950       TargetNumRegisters = ForceTargetNumScalarRegs;
4951   } else {
4952     if (ForceTargetNumVectorRegs.getNumOccurrences() > 0)
4953       TargetNumRegisters = ForceTargetNumVectorRegs;
4954   }
4955 
4956   RegisterUsage R = calculateRegisterUsage({VF})[0];
4957   // We divide by these constants so assume that we have at least one
4958   // instruction that uses at least one register.
4959   R.MaxLocalUsers = std::max(R.MaxLocalUsers, 1U);
4960 
4961   // We calculate the interleave count using the following formula.
4962   // Subtract the number of loop invariants from the number of available
4963   // registers. These registers are used by all of the interleaved instances.
4964   // Next, divide the remaining registers by the number of registers that is
4965   // required by the loop, in order to estimate how many parallel instances
4966   // fit without causing spills. All of this is rounded down if necessary to be
4967   // a power of two. We want power of two interleave count to simplify any
4968   // addressing operations or alignment considerations.
4969   // We also want power of two interleave counts to ensure that the induction
4970   // variable of the vector loop wraps to zero, when tail is folded by masking;
4971   // this currently happens when OptForSize, in which case IC is set to 1 above.
4972   unsigned IC = PowerOf2Floor((TargetNumRegisters - R.LoopInvariantRegs) /
4973                               R.MaxLocalUsers);
4974 
4975   // Don't count the induction variable as interleaved.
4976   if (EnableIndVarRegisterHeur)
4977     IC = PowerOf2Floor((TargetNumRegisters - R.LoopInvariantRegs - 1) /
4978                        std::max(1U, (R.MaxLocalUsers - 1)));
4979 
4980   // Clamp the interleave ranges to reasonable counts.
4981   unsigned MaxInterleaveCount = TTI.getMaxInterleaveFactor(VF);
4982 
4983   // Check if the user has overridden the max.
4984   if (VF == 1) {
4985     if (ForceTargetMaxScalarInterleaveFactor.getNumOccurrences() > 0)
4986       MaxInterleaveCount = ForceTargetMaxScalarInterleaveFactor;
4987   } else {
4988     if (ForceTargetMaxVectorInterleaveFactor.getNumOccurrences() > 0)
4989       MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor;
4990   }
4991 
4992   // If we did not calculate the cost for VF (because the user selected the VF)
4993   // then we calculate the cost of VF here.
4994   if (LoopCost == 0)
4995     LoopCost = expectedCost(VF).first;
4996 
4997   // Clamp the calculated IC to be between the 1 and the max interleave count
4998   // that the target allows.
4999   if (IC > MaxInterleaveCount)
5000     IC = MaxInterleaveCount;
5001   else if (IC < 1)
5002     IC = 1;
5003 
5004   // Interleave if we vectorized this loop and there is a reduction that could
5005   // benefit from interleaving.
5006   if (VF > 1 && !Legal->getReductionVars()->empty()) {
5007     LLVM_DEBUG(dbgs() << "LV: Interleaving because of reductions.\n");
5008     return IC;
5009   }
5010 
5011   // Note that if we've already vectorized the loop we will have done the
5012   // runtime check and so interleaving won't require further checks.
5013   bool InterleavingRequiresRuntimePointerCheck =
5014       (VF == 1 && Legal->getRuntimePointerChecking()->Need);
5015 
5016   // We want to interleave small loops in order to reduce the loop overhead and
5017   // potentially expose ILP opportunities.
5018   LLVM_DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n');
5019   if (!InterleavingRequiresRuntimePointerCheck && LoopCost < SmallLoopCost) {
5020     // We assume that the cost overhead is 1 and we use the cost model
5021     // to estimate the cost of the loop and interleave until the cost of the
5022     // loop overhead is about 5% of the cost of the loop.
5023     unsigned SmallIC =
5024         std::min(IC, (unsigned)PowerOf2Floor(SmallLoopCost / LoopCost));
5025 
5026     // Interleave until store/load ports (estimated by max interleave count) are
5027     // saturated.
5028     unsigned NumStores = Legal->getNumStores();
5029     unsigned NumLoads = Legal->getNumLoads();
5030     unsigned StoresIC = IC / (NumStores ? NumStores : 1);
5031     unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1);
5032 
5033     // If we have a scalar reduction (vector reductions are already dealt with
5034     // by this point), we can increase the critical path length if the loop
5035     // we're interleaving is inside another loop. Limit, by default to 2, so the
5036     // critical path only gets increased by one reduction operation.
5037     if (!Legal->getReductionVars()->empty() && TheLoop->getLoopDepth() > 1) {
5038       unsigned F = static_cast<unsigned>(MaxNestedScalarReductionIC);
5039       SmallIC = std::min(SmallIC, F);
5040       StoresIC = std::min(StoresIC, F);
5041       LoadsIC = std::min(LoadsIC, F);
5042     }
5043 
5044     if (EnableLoadStoreRuntimeInterleave &&
5045         std::max(StoresIC, LoadsIC) > SmallIC) {
5046       LLVM_DEBUG(
5047           dbgs() << "LV: Interleaving to saturate store or load ports.\n");
5048       return std::max(StoresIC, LoadsIC);
5049     }
5050 
5051     LLVM_DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n");
5052     return SmallIC;
5053   }
5054 
5055   // Interleave if this is a large loop (small loops are already dealt with by
5056   // this point) that could benefit from interleaving.
5057   bool HasReductions = !Legal->getReductionVars()->empty();
5058   if (TTI.enableAggressiveInterleaving(HasReductions)) {
5059     LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n");
5060     return IC;
5061   }
5062 
5063   LLVM_DEBUG(dbgs() << "LV: Not Interleaving.\n");
5064   return 1;
5065 }
5066 
5067 SmallVector<LoopVectorizationCostModel::RegisterUsage, 8>
5068 LoopVectorizationCostModel::calculateRegisterUsage(ArrayRef<unsigned> VFs) {
5069   // This function calculates the register usage by measuring the highest number
5070   // of values that are alive at a single location. Obviously, this is a very
5071   // rough estimation. We scan the loop in a topological order in order and
5072   // assign a number to each instruction. We use RPO to ensure that defs are
5073   // met before their users. We assume that each instruction that has in-loop
5074   // users starts an interval. We record every time that an in-loop value is
5075   // used, so we have a list of the first and last occurrences of each
5076   // instruction. Next, we transpose this data structure into a multi map that
5077   // holds the list of intervals that *end* at a specific location. This multi
5078   // map allows us to perform a linear search. We scan the instructions linearly
5079   // and record each time that a new interval starts, by placing it in a set.
5080   // If we find this value in the multi-map then we remove it from the set.
5081   // The max register usage is the maximum size of the set.
5082   // We also search for instructions that are defined outside the loop, but are
5083   // used inside the loop. We need this number separately from the max-interval
5084   // usage number because when we unroll, loop-invariant values do not take
5085   // more register.
5086   LoopBlocksDFS DFS(TheLoop);
5087   DFS.perform(LI);
5088 
5089   RegisterUsage RU;
5090 
5091   // Each 'key' in the map opens a new interval. The values
5092   // of the map are the index of the 'last seen' usage of the
5093   // instruction that is the key.
5094   using IntervalMap = DenseMap<Instruction *, unsigned>;
5095 
5096   // Maps instruction to its index.
5097   SmallVector<Instruction *, 64> IdxToInstr;
5098   // Marks the end of each interval.
5099   IntervalMap EndPoint;
5100   // Saves the list of instruction indices that are used in the loop.
5101   SmallPtrSet<Instruction *, 8> Ends;
5102   // Saves the list of values that are used in the loop but are
5103   // defined outside the loop, such as arguments and constants.
5104   SmallPtrSet<Value *, 8> LoopInvariants;
5105 
5106   for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) {
5107     for (Instruction &I : BB->instructionsWithoutDebug()) {
5108       IdxToInstr.push_back(&I);
5109 
5110       // Save the end location of each USE.
5111       for (Value *U : I.operands()) {
5112         auto *Instr = dyn_cast<Instruction>(U);
5113 
5114         // Ignore non-instruction values such as arguments, constants, etc.
5115         if (!Instr)
5116           continue;
5117 
5118         // If this instruction is outside the loop then record it and continue.
5119         if (!TheLoop->contains(Instr)) {
5120           LoopInvariants.insert(Instr);
5121           continue;
5122         }
5123 
5124         // Overwrite previous end points.
5125         EndPoint[Instr] = IdxToInstr.size();
5126         Ends.insert(Instr);
5127       }
5128     }
5129   }
5130 
5131   // Saves the list of intervals that end with the index in 'key'.
5132   using InstrList = SmallVector<Instruction *, 2>;
5133   DenseMap<unsigned, InstrList> TransposeEnds;
5134 
5135   // Transpose the EndPoints to a list of values that end at each index.
5136   for (auto &Interval : EndPoint)
5137     TransposeEnds[Interval.second].push_back(Interval.first);
5138 
5139   SmallPtrSet<Instruction *, 8> OpenIntervals;
5140 
5141   // Get the size of the widest register.
5142   unsigned MaxSafeDepDist = -1U;
5143   if (Legal->getMaxSafeDepDistBytes() != -1U)
5144     MaxSafeDepDist = Legal->getMaxSafeDepDistBytes() * 8;
5145   unsigned WidestRegister =
5146       std::min(TTI.getRegisterBitWidth(true), MaxSafeDepDist);
5147   const DataLayout &DL = TheFunction->getParent()->getDataLayout();
5148 
5149   SmallVector<RegisterUsage, 8> RUs(VFs.size());
5150   SmallVector<unsigned, 8> MaxUsages(VFs.size(), 0);
5151 
5152   LLVM_DEBUG(dbgs() << "LV(REG): Calculating max register usage:\n");
5153 
5154   // A lambda that gets the register usage for the given type and VF.
5155   auto GetRegUsage = [&DL, WidestRegister](Type *Ty, unsigned VF) {
5156     if (Ty->isTokenTy())
5157       return 0U;
5158     unsigned TypeSize = DL.getTypeSizeInBits(Ty->getScalarType());
5159     return std::max<unsigned>(1, VF * TypeSize / WidestRegister);
5160   };
5161 
5162   for (unsigned int i = 0, s = IdxToInstr.size(); i < s; ++i) {
5163     Instruction *I = IdxToInstr[i];
5164 
5165     // Remove all of the instructions that end at this location.
5166     InstrList &List = TransposeEnds[i];
5167     for (Instruction *ToRemove : List)
5168       OpenIntervals.erase(ToRemove);
5169 
5170     // Ignore instructions that are never used within the loop.
5171     if (Ends.find(I) == Ends.end())
5172       continue;
5173 
5174     // Skip ignored values.
5175     if (ValuesToIgnore.find(I) != ValuesToIgnore.end())
5176       continue;
5177 
5178     // For each VF find the maximum usage of registers.
5179     for (unsigned j = 0, e = VFs.size(); j < e; ++j) {
5180       if (VFs[j] == 1) {
5181         MaxUsages[j] = std::max(MaxUsages[j], OpenIntervals.size());
5182         continue;
5183       }
5184       collectUniformsAndScalars(VFs[j]);
5185       // Count the number of live intervals.
5186       unsigned RegUsage = 0;
5187       for (auto Inst : OpenIntervals) {
5188         // Skip ignored values for VF > 1.
5189         if (VecValuesToIgnore.find(Inst) != VecValuesToIgnore.end() ||
5190             isScalarAfterVectorization(Inst, VFs[j]))
5191           continue;
5192         RegUsage += GetRegUsage(Inst->getType(), VFs[j]);
5193       }
5194       MaxUsages[j] = std::max(MaxUsages[j], RegUsage);
5195     }
5196 
5197     LLVM_DEBUG(dbgs() << "LV(REG): At #" << i << " Interval # "
5198                       << OpenIntervals.size() << '\n');
5199 
5200     // Add the current instruction to the list of open intervals.
5201     OpenIntervals.insert(I);
5202   }
5203 
5204   for (unsigned i = 0, e = VFs.size(); i < e; ++i) {
5205     unsigned Invariant = 0;
5206     if (VFs[i] == 1)
5207       Invariant = LoopInvariants.size();
5208     else {
5209       for (auto Inst : LoopInvariants)
5210         Invariant += GetRegUsage(Inst->getType(), VFs[i]);
5211     }
5212 
5213     LLVM_DEBUG(dbgs() << "LV(REG): VF = " << VFs[i] << '\n');
5214     LLVM_DEBUG(dbgs() << "LV(REG): Found max usage: " << MaxUsages[i] << '\n');
5215     LLVM_DEBUG(dbgs() << "LV(REG): Found invariant usage: " << Invariant
5216                       << '\n');
5217 
5218     RU.LoopInvariantRegs = Invariant;
5219     RU.MaxLocalUsers = MaxUsages[i];
5220     RUs[i] = RU;
5221   }
5222 
5223   return RUs;
5224 }
5225 
5226 bool LoopVectorizationCostModel::useEmulatedMaskMemRefHack(Instruction *I){
5227   // TODO: Cost model for emulated masked load/store is completely
5228   // broken. This hack guides the cost model to use an artificially
5229   // high enough value to practically disable vectorization with such
5230   // operations, except where previously deployed legality hack allowed
5231   // using very low cost values. This is to avoid regressions coming simply
5232   // from moving "masked load/store" check from legality to cost model.
5233   // Masked Load/Gather emulation was previously never allowed.
5234   // Limited number of Masked Store/Scatter emulation was allowed.
5235   assert(isPredicatedInst(I) && "Expecting a scalar emulated instruction");
5236   return isa<LoadInst>(I) ||
5237          (isa<StoreInst>(I) &&
5238           NumPredStores > NumberOfStoresToPredicate);
5239 }
5240 
5241 void LoopVectorizationCostModel::collectInstsToScalarize(unsigned VF) {
5242   // If we aren't vectorizing the loop, or if we've already collected the
5243   // instructions to scalarize, there's nothing to do. Collection may already
5244   // have occurred if we have a user-selected VF and are now computing the
5245   // expected cost for interleaving.
5246   if (VF < 2 || InstsToScalarize.find(VF) != InstsToScalarize.end())
5247     return;
5248 
5249   // Initialize a mapping for VF in InstsToScalalarize. If we find that it's
5250   // not profitable to scalarize any instructions, the presence of VF in the
5251   // map will indicate that we've analyzed it already.
5252   ScalarCostsTy &ScalarCostsVF = InstsToScalarize[VF];
5253 
5254   // Find all the instructions that are scalar with predication in the loop and
5255   // determine if it would be better to not if-convert the blocks they are in.
5256   // If so, we also record the instructions to scalarize.
5257   for (BasicBlock *BB : TheLoop->blocks()) {
5258     if (!blockNeedsPredication(BB))
5259       continue;
5260     for (Instruction &I : *BB)
5261       if (isScalarWithPredication(&I)) {
5262         ScalarCostsTy ScalarCosts;
5263         // Do not apply discount logic if hacked cost is needed
5264         // for emulated masked memrefs.
5265         if (!useEmulatedMaskMemRefHack(&I) &&
5266             computePredInstDiscount(&I, ScalarCosts, VF) >= 0)
5267           ScalarCostsVF.insert(ScalarCosts.begin(), ScalarCosts.end());
5268         // Remember that BB will remain after vectorization.
5269         PredicatedBBsAfterVectorization.insert(BB);
5270       }
5271   }
5272 }
5273 
5274 int LoopVectorizationCostModel::computePredInstDiscount(
5275     Instruction *PredInst, DenseMap<Instruction *, unsigned> &ScalarCosts,
5276     unsigned VF) {
5277   assert(!isUniformAfterVectorization(PredInst, VF) &&
5278          "Instruction marked uniform-after-vectorization will be predicated");
5279 
5280   // Initialize the discount to zero, meaning that the scalar version and the
5281   // vector version cost the same.
5282   int Discount = 0;
5283 
5284   // Holds instructions to analyze. The instructions we visit are mapped in
5285   // ScalarCosts. Those instructions are the ones that would be scalarized if
5286   // we find that the scalar version costs less.
5287   SmallVector<Instruction *, 8> Worklist;
5288 
5289   // Returns true if the given instruction can be scalarized.
5290   auto canBeScalarized = [&](Instruction *I) -> bool {
5291     // We only attempt to scalarize instructions forming a single-use chain
5292     // from the original predicated block that would otherwise be vectorized.
5293     // Although not strictly necessary, we give up on instructions we know will
5294     // already be scalar to avoid traversing chains that are unlikely to be
5295     // beneficial.
5296     if (!I->hasOneUse() || PredInst->getParent() != I->getParent() ||
5297         isScalarAfterVectorization(I, VF))
5298       return false;
5299 
5300     // If the instruction is scalar with predication, it will be analyzed
5301     // separately. We ignore it within the context of PredInst.
5302     if (isScalarWithPredication(I))
5303       return false;
5304 
5305     // If any of the instruction's operands are uniform after vectorization,
5306     // the instruction cannot be scalarized. This prevents, for example, a
5307     // masked load from being scalarized.
5308     //
5309     // We assume we will only emit a value for lane zero of an instruction
5310     // marked uniform after vectorization, rather than VF identical values.
5311     // Thus, if we scalarize an instruction that uses a uniform, we would
5312     // create uses of values corresponding to the lanes we aren't emitting code
5313     // for. This behavior can be changed by allowing getScalarValue to clone
5314     // the lane zero values for uniforms rather than asserting.
5315     for (Use &U : I->operands())
5316       if (auto *J = dyn_cast<Instruction>(U.get()))
5317         if (isUniformAfterVectorization(J, VF))
5318           return false;
5319 
5320     // Otherwise, we can scalarize the instruction.
5321     return true;
5322   };
5323 
5324   // Returns true if an operand that cannot be scalarized must be extracted
5325   // from a vector. We will account for this scalarization overhead below. Note
5326   // that the non-void predicated instructions are placed in their own blocks,
5327   // and their return values are inserted into vectors. Thus, an extract would
5328   // still be required.
5329   auto needsExtract = [&](Instruction *I) -> bool {
5330     return TheLoop->contains(I) && !isScalarAfterVectorization(I, VF);
5331   };
5332 
5333   // Compute the expected cost discount from scalarizing the entire expression
5334   // feeding the predicated instruction. We currently only consider expressions
5335   // that are single-use instruction chains.
5336   Worklist.push_back(PredInst);
5337   while (!Worklist.empty()) {
5338     Instruction *I = Worklist.pop_back_val();
5339 
5340     // If we've already analyzed the instruction, there's nothing to do.
5341     if (ScalarCosts.find(I) != ScalarCosts.end())
5342       continue;
5343 
5344     // Compute the cost of the vector instruction. Note that this cost already
5345     // includes the scalarization overhead of the predicated instruction.
5346     unsigned VectorCost = getInstructionCost(I, VF).first;
5347 
5348     // Compute the cost of the scalarized instruction. This cost is the cost of
5349     // the instruction as if it wasn't if-converted and instead remained in the
5350     // predicated block. We will scale this cost by block probability after
5351     // computing the scalarization overhead.
5352     unsigned ScalarCost = VF * getInstructionCost(I, 1).first;
5353 
5354     // Compute the scalarization overhead of needed insertelement instructions
5355     // and phi nodes.
5356     if (isScalarWithPredication(I) && !I->getType()->isVoidTy()) {
5357       ScalarCost += TTI.getScalarizationOverhead(ToVectorTy(I->getType(), VF),
5358                                                  true, false);
5359       ScalarCost += VF * TTI.getCFInstrCost(Instruction::PHI);
5360     }
5361 
5362     // Compute the scalarization overhead of needed extractelement
5363     // instructions. For each of the instruction's operands, if the operand can
5364     // be scalarized, add it to the worklist; otherwise, account for the
5365     // overhead.
5366     for (Use &U : I->operands())
5367       if (auto *J = dyn_cast<Instruction>(U.get())) {
5368         assert(VectorType::isValidElementType(J->getType()) &&
5369                "Instruction has non-scalar type");
5370         if (canBeScalarized(J))
5371           Worklist.push_back(J);
5372         else if (needsExtract(J))
5373           ScalarCost += TTI.getScalarizationOverhead(
5374                               ToVectorTy(J->getType(),VF), false, true);
5375       }
5376 
5377     // Scale the total scalar cost by block probability.
5378     ScalarCost /= getReciprocalPredBlockProb();
5379 
5380     // Compute the discount. A non-negative discount means the vector version
5381     // of the instruction costs more, and scalarizing would be beneficial.
5382     Discount += VectorCost - ScalarCost;
5383     ScalarCosts[I] = ScalarCost;
5384   }
5385 
5386   return Discount;
5387 }
5388 
5389 LoopVectorizationCostModel::VectorizationCostTy
5390 LoopVectorizationCostModel::expectedCost(unsigned VF) {
5391   VectorizationCostTy Cost;
5392 
5393   // For each block.
5394   for (BasicBlock *BB : TheLoop->blocks()) {
5395     VectorizationCostTy BlockCost;
5396 
5397     // For each instruction in the old loop.
5398     for (Instruction &I : BB->instructionsWithoutDebug()) {
5399       // Skip ignored values.
5400       if (ValuesToIgnore.find(&I) != ValuesToIgnore.end() ||
5401           (VF > 1 && VecValuesToIgnore.find(&I) != VecValuesToIgnore.end()))
5402         continue;
5403 
5404       VectorizationCostTy C = getInstructionCost(&I, VF);
5405 
5406       // Check if we should override the cost.
5407       if (ForceTargetInstructionCost.getNumOccurrences() > 0)
5408         C.first = ForceTargetInstructionCost;
5409 
5410       BlockCost.first += C.first;
5411       BlockCost.second |= C.second;
5412       LLVM_DEBUG(dbgs() << "LV: Found an estimated cost of " << C.first
5413                         << " for VF " << VF << " For instruction: " << I
5414                         << '\n');
5415     }
5416 
5417     // If we are vectorizing a predicated block, it will have been
5418     // if-converted. This means that the block's instructions (aside from
5419     // stores and instructions that may divide by zero) will now be
5420     // unconditionally executed. For the scalar case, we may not always execute
5421     // the predicated block. Thus, scale the block's cost by the probability of
5422     // executing it.
5423     if (VF == 1 && blockNeedsPredication(BB))
5424       BlockCost.first /= getReciprocalPredBlockProb();
5425 
5426     Cost.first += BlockCost.first;
5427     Cost.second |= BlockCost.second;
5428   }
5429 
5430   return Cost;
5431 }
5432 
5433 /// Gets Address Access SCEV after verifying that the access pattern
5434 /// is loop invariant except the induction variable dependence.
5435 ///
5436 /// This SCEV can be sent to the Target in order to estimate the address
5437 /// calculation cost.
5438 static const SCEV *getAddressAccessSCEV(
5439               Value *Ptr,
5440               LoopVectorizationLegality *Legal,
5441               PredicatedScalarEvolution &PSE,
5442               const Loop *TheLoop) {
5443 
5444   auto *Gep = dyn_cast<GetElementPtrInst>(Ptr);
5445   if (!Gep)
5446     return nullptr;
5447 
5448   // We are looking for a gep with all loop invariant indices except for one
5449   // which should be an induction variable.
5450   auto SE = PSE.getSE();
5451   unsigned NumOperands = Gep->getNumOperands();
5452   for (unsigned i = 1; i < NumOperands; ++i) {
5453     Value *Opd = Gep->getOperand(i);
5454     if (!SE->isLoopInvariant(SE->getSCEV(Opd), TheLoop) &&
5455         !Legal->isInductionVariable(Opd))
5456       return nullptr;
5457   }
5458 
5459   // Now we know we have a GEP ptr, %inv, %ind, %inv. return the Ptr SCEV.
5460   return PSE.getSCEV(Ptr);
5461 }
5462 
5463 static bool isStrideMul(Instruction *I, LoopVectorizationLegality *Legal) {
5464   return Legal->hasStride(I->getOperand(0)) ||
5465          Legal->hasStride(I->getOperand(1));
5466 }
5467 
5468 unsigned LoopVectorizationCostModel::getMemInstScalarizationCost(Instruction *I,
5469                                                                  unsigned VF) {
5470   assert(VF > 1 && "Scalarization cost of instruction implies vectorization.");
5471   Type *ValTy = getMemInstValueType(I);
5472   auto SE = PSE.getSE();
5473 
5474   unsigned Alignment = getLoadStoreAlignment(I);
5475   unsigned AS = getLoadStoreAddressSpace(I);
5476   Value *Ptr = getLoadStorePointerOperand(I);
5477   Type *PtrTy = ToVectorTy(Ptr->getType(), VF);
5478 
5479   // Figure out whether the access is strided and get the stride value
5480   // if it's known in compile time
5481   const SCEV *PtrSCEV = getAddressAccessSCEV(Ptr, Legal, PSE, TheLoop);
5482 
5483   // Get the cost of the scalar memory instruction and address computation.
5484   unsigned Cost = VF * TTI.getAddressComputationCost(PtrTy, SE, PtrSCEV);
5485 
5486   // Don't pass *I here, since it is scalar but will actually be part of a
5487   // vectorized loop where the user of it is a vectorized instruction.
5488   Cost += VF *
5489           TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), Alignment,
5490                               AS);
5491 
5492   // Get the overhead of the extractelement and insertelement instructions
5493   // we might create due to scalarization.
5494   Cost += getScalarizationOverhead(I, VF, TTI);
5495 
5496   // If we have a predicated store, it may not be executed for each vector
5497   // lane. Scale the cost by the probability of executing the predicated
5498   // block.
5499   if (isPredicatedInst(I)) {
5500     Cost /= getReciprocalPredBlockProb();
5501 
5502     if (useEmulatedMaskMemRefHack(I))
5503       // Artificially setting to a high enough value to practically disable
5504       // vectorization with such operations.
5505       Cost = 3000000;
5506   }
5507 
5508   return Cost;
5509 }
5510 
5511 unsigned LoopVectorizationCostModel::getConsecutiveMemOpCost(Instruction *I,
5512                                                              unsigned VF) {
5513   Type *ValTy = getMemInstValueType(I);
5514   Type *VectorTy = ToVectorTy(ValTy, VF);
5515   unsigned Alignment = getLoadStoreAlignment(I);
5516   Value *Ptr = getLoadStorePointerOperand(I);
5517   unsigned AS = getLoadStoreAddressSpace(I);
5518   int ConsecutiveStride = Legal->isConsecutivePtr(Ptr);
5519 
5520   assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) &&
5521          "Stride should be 1 or -1 for consecutive memory access");
5522   unsigned Cost = 0;
5523   if (Legal->isMaskRequired(I))
5524     Cost += TTI.getMaskedMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS);
5525   else
5526     Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS, I);
5527 
5528   bool Reverse = ConsecutiveStride < 0;
5529   if (Reverse)
5530     Cost += TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, 0);
5531   return Cost;
5532 }
5533 
5534 unsigned LoopVectorizationCostModel::getUniformMemOpCost(Instruction *I,
5535                                                          unsigned VF) {
5536   Type *ValTy = getMemInstValueType(I);
5537   Type *VectorTy = ToVectorTy(ValTy, VF);
5538   unsigned Alignment = getLoadStoreAlignment(I);
5539   unsigned AS = getLoadStoreAddressSpace(I);
5540   if (isa<LoadInst>(I)) {
5541     return TTI.getAddressComputationCost(ValTy) +
5542            TTI.getMemoryOpCost(Instruction::Load, ValTy, Alignment, AS) +
5543            TTI.getShuffleCost(TargetTransformInfo::SK_Broadcast, VectorTy);
5544   }
5545   StoreInst *SI = cast<StoreInst>(I);
5546 
5547   bool isLoopInvariantStoreValue = Legal->isUniform(SI->getValueOperand());
5548   return TTI.getAddressComputationCost(ValTy) +
5549          TTI.getMemoryOpCost(Instruction::Store, ValTy, Alignment, AS) +
5550          (isLoopInvariantStoreValue ? 0 : TTI.getVectorInstrCost(
5551                                                Instruction::ExtractElement,
5552                                                VectorTy, VF - 1));
5553 }
5554 
5555 unsigned LoopVectorizationCostModel::getGatherScatterCost(Instruction *I,
5556                                                           unsigned VF) {
5557   Type *ValTy = getMemInstValueType(I);
5558   Type *VectorTy = ToVectorTy(ValTy, VF);
5559   unsigned Alignment = getLoadStoreAlignment(I);
5560   Value *Ptr = getLoadStorePointerOperand(I);
5561 
5562   return TTI.getAddressComputationCost(VectorTy) +
5563          TTI.getGatherScatterOpCost(I->getOpcode(), VectorTy, Ptr,
5564                                     Legal->isMaskRequired(I), Alignment);
5565 }
5566 
5567 unsigned LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I,
5568                                                             unsigned VF) {
5569   Type *ValTy = getMemInstValueType(I);
5570   Type *VectorTy = ToVectorTy(ValTy, VF);
5571   unsigned AS = getLoadStoreAddressSpace(I);
5572 
5573   auto Group = getInterleavedAccessGroup(I);
5574   assert(Group && "Fail to get an interleaved access group.");
5575 
5576   unsigned InterleaveFactor = Group->getFactor();
5577   Type *WideVecTy = VectorType::get(ValTy, VF * InterleaveFactor);
5578 
5579   // Holds the indices of existing members in an interleaved load group.
5580   // An interleaved store group doesn't need this as it doesn't allow gaps.
5581   SmallVector<unsigned, 4> Indices;
5582   if (isa<LoadInst>(I)) {
5583     for (unsigned i = 0; i < InterleaveFactor; i++)
5584       if (Group->getMember(i))
5585         Indices.push_back(i);
5586   }
5587 
5588   // Calculate the cost of the whole interleaved group.
5589   bool UseMaskForGaps =
5590       Group->requiresScalarEpilogue() && !IsScalarEpilogueAllowed;
5591   unsigned Cost = TTI.getInterleavedMemoryOpCost(
5592       I->getOpcode(), WideVecTy, Group->getFactor(), Indices,
5593       Group->getAlignment(), AS, Legal->isMaskRequired(I), UseMaskForGaps);
5594 
5595   if (Group->isReverse()) {
5596     // TODO: Add support for reversed masked interleaved access.
5597     assert(!Legal->isMaskRequired(I) &&
5598            "Reverse masked interleaved access not supported.");
5599     Cost += Group->getNumMembers() *
5600             TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, 0);
5601   }
5602   return Cost;
5603 }
5604 
5605 unsigned LoopVectorizationCostModel::getMemoryInstructionCost(Instruction *I,
5606                                                               unsigned VF) {
5607   // Calculate scalar cost only. Vectorization cost should be ready at this
5608   // moment.
5609   if (VF == 1) {
5610     Type *ValTy = getMemInstValueType(I);
5611     unsigned Alignment = getLoadStoreAlignment(I);
5612     unsigned AS = getLoadStoreAddressSpace(I);
5613 
5614     return TTI.getAddressComputationCost(ValTy) +
5615            TTI.getMemoryOpCost(I->getOpcode(), ValTy, Alignment, AS, I);
5616   }
5617   return getWideningCost(I, VF);
5618 }
5619 
5620 LoopVectorizationCostModel::VectorizationCostTy
5621 LoopVectorizationCostModel::getInstructionCost(Instruction *I, unsigned VF) {
5622   // If we know that this instruction will remain uniform, check the cost of
5623   // the scalar version.
5624   if (isUniformAfterVectorization(I, VF))
5625     VF = 1;
5626 
5627   if (VF > 1 && isProfitableToScalarize(I, VF))
5628     return VectorizationCostTy(InstsToScalarize[VF][I], false);
5629 
5630   // Forced scalars do not have any scalarization overhead.
5631   auto ForcedScalar = ForcedScalars.find(VF);
5632   if (VF > 1 && ForcedScalar != ForcedScalars.end()) {
5633     auto InstSet = ForcedScalar->second;
5634     if (InstSet.find(I) != InstSet.end())
5635       return VectorizationCostTy((getInstructionCost(I, 1).first * VF), false);
5636   }
5637 
5638   Type *VectorTy;
5639   unsigned C = getInstructionCost(I, VF, VectorTy);
5640 
5641   bool TypeNotScalarized =
5642       VF > 1 && VectorTy->isVectorTy() && TTI.getNumberOfParts(VectorTy) < VF;
5643   return VectorizationCostTy(C, TypeNotScalarized);
5644 }
5645 
5646 void LoopVectorizationCostModel::setCostBasedWideningDecision(unsigned VF) {
5647   if (VF == 1)
5648     return;
5649   NumPredStores = 0;
5650   for (BasicBlock *BB : TheLoop->blocks()) {
5651     // For each instruction in the old loop.
5652     for (Instruction &I : *BB) {
5653       Value *Ptr =  getLoadStorePointerOperand(&I);
5654       if (!Ptr)
5655         continue;
5656 
5657       // TODO: We should generate better code and update the cost model for
5658       // predicated uniform stores. Today they are treated as any other
5659       // predicated store (see added test cases in
5660       // invariant-store-vectorization.ll).
5661       if (isa<StoreInst>(&I) && isScalarWithPredication(&I))
5662         NumPredStores++;
5663 
5664       if (Legal->isUniform(Ptr) &&
5665           // Conditional loads and stores should be scalarized and predicated.
5666           // isScalarWithPredication cannot be used here since masked
5667           // gather/scatters are not considered scalar with predication.
5668           !Legal->blockNeedsPredication(I.getParent())) {
5669         // TODO: Avoid replicating loads and stores instead of
5670         // relying on instcombine to remove them.
5671         // Load: Scalar load + broadcast
5672         // Store: Scalar store + isLoopInvariantStoreValue ? 0 : extract
5673         unsigned Cost = getUniformMemOpCost(&I, VF);
5674         setWideningDecision(&I, VF, CM_Scalarize, Cost);
5675         continue;
5676       }
5677 
5678       // We assume that widening is the best solution when possible.
5679       if (memoryInstructionCanBeWidened(&I, VF)) {
5680         unsigned Cost = getConsecutiveMemOpCost(&I, VF);
5681         int ConsecutiveStride =
5682                Legal->isConsecutivePtr(getLoadStorePointerOperand(&I));
5683         assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) &&
5684                "Expected consecutive stride.");
5685         InstWidening Decision =
5686             ConsecutiveStride == 1 ? CM_Widen : CM_Widen_Reverse;
5687         setWideningDecision(&I, VF, Decision, Cost);
5688         continue;
5689       }
5690 
5691       // Choose between Interleaving, Gather/Scatter or Scalarization.
5692       unsigned InterleaveCost = std::numeric_limits<unsigned>::max();
5693       unsigned NumAccesses = 1;
5694       if (isAccessInterleaved(&I)) {
5695         auto Group = getInterleavedAccessGroup(&I);
5696         assert(Group && "Fail to get an interleaved access group.");
5697 
5698         // Make one decision for the whole group.
5699         if (getWideningDecision(&I, VF) != CM_Unknown)
5700           continue;
5701 
5702         NumAccesses = Group->getNumMembers();
5703         if (interleavedAccessCanBeWidened(&I, VF))
5704           InterleaveCost = getInterleaveGroupCost(&I, VF);
5705       }
5706 
5707       unsigned GatherScatterCost =
5708           isLegalGatherOrScatter(&I)
5709               ? getGatherScatterCost(&I, VF) * NumAccesses
5710               : std::numeric_limits<unsigned>::max();
5711 
5712       unsigned ScalarizationCost =
5713           getMemInstScalarizationCost(&I, VF) * NumAccesses;
5714 
5715       // Choose better solution for the current VF,
5716       // write down this decision and use it during vectorization.
5717       unsigned Cost;
5718       InstWidening Decision;
5719       if (InterleaveCost <= GatherScatterCost &&
5720           InterleaveCost < ScalarizationCost) {
5721         Decision = CM_Interleave;
5722         Cost = InterleaveCost;
5723       } else if (GatherScatterCost < ScalarizationCost) {
5724         Decision = CM_GatherScatter;
5725         Cost = GatherScatterCost;
5726       } else {
5727         Decision = CM_Scalarize;
5728         Cost = ScalarizationCost;
5729       }
5730       // If the instructions belongs to an interleave group, the whole group
5731       // receives the same decision. The whole group receives the cost, but
5732       // the cost will actually be assigned to one instruction.
5733       if (auto Group = getInterleavedAccessGroup(&I))
5734         setWideningDecision(Group, VF, Decision, Cost);
5735       else
5736         setWideningDecision(&I, VF, Decision, Cost);
5737     }
5738   }
5739 
5740   // Make sure that any load of address and any other address computation
5741   // remains scalar unless there is gather/scatter support. This avoids
5742   // inevitable extracts into address registers, and also has the benefit of
5743   // activating LSR more, since that pass can't optimize vectorized
5744   // addresses.
5745   if (TTI.prefersVectorizedAddressing())
5746     return;
5747 
5748   // Start with all scalar pointer uses.
5749   SmallPtrSet<Instruction *, 8> AddrDefs;
5750   for (BasicBlock *BB : TheLoop->blocks())
5751     for (Instruction &I : *BB) {
5752       Instruction *PtrDef =
5753         dyn_cast_or_null<Instruction>(getLoadStorePointerOperand(&I));
5754       if (PtrDef && TheLoop->contains(PtrDef) &&
5755           getWideningDecision(&I, VF) != CM_GatherScatter)
5756         AddrDefs.insert(PtrDef);
5757     }
5758 
5759   // Add all instructions used to generate the addresses.
5760   SmallVector<Instruction *, 4> Worklist;
5761   for (auto *I : AddrDefs)
5762     Worklist.push_back(I);
5763   while (!Worklist.empty()) {
5764     Instruction *I = Worklist.pop_back_val();
5765     for (auto &Op : I->operands())
5766       if (auto *InstOp = dyn_cast<Instruction>(Op))
5767         if ((InstOp->getParent() == I->getParent()) && !isa<PHINode>(InstOp) &&
5768             AddrDefs.insert(InstOp).second)
5769           Worklist.push_back(InstOp);
5770   }
5771 
5772   for (auto *I : AddrDefs) {
5773     if (isa<LoadInst>(I)) {
5774       // Setting the desired widening decision should ideally be handled in
5775       // by cost functions, but since this involves the task of finding out
5776       // if the loaded register is involved in an address computation, it is
5777       // instead changed here when we know this is the case.
5778       InstWidening Decision = getWideningDecision(I, VF);
5779       if (Decision == CM_Widen || Decision == CM_Widen_Reverse)
5780         // Scalarize a widened load of address.
5781         setWideningDecision(I, VF, CM_Scalarize,
5782                             (VF * getMemoryInstructionCost(I, 1)));
5783       else if (auto Group = getInterleavedAccessGroup(I)) {
5784         // Scalarize an interleave group of address loads.
5785         for (unsigned I = 0; I < Group->getFactor(); ++I) {
5786           if (Instruction *Member = Group->getMember(I))
5787             setWideningDecision(Member, VF, CM_Scalarize,
5788                                 (VF * getMemoryInstructionCost(Member, 1)));
5789         }
5790       }
5791     } else
5792       // Make sure I gets scalarized and a cost estimate without
5793       // scalarization overhead.
5794       ForcedScalars[VF].insert(I);
5795   }
5796 }
5797 
5798 unsigned LoopVectorizationCostModel::getInstructionCost(Instruction *I,
5799                                                         unsigned VF,
5800                                                         Type *&VectorTy) {
5801   Type *RetTy = I->getType();
5802   if (canTruncateToMinimalBitwidth(I, VF))
5803     RetTy = IntegerType::get(RetTy->getContext(), MinBWs[I]);
5804   VectorTy = isScalarAfterVectorization(I, VF) ? RetTy : ToVectorTy(RetTy, VF);
5805   auto SE = PSE.getSE();
5806 
5807   // TODO: We need to estimate the cost of intrinsic calls.
5808   switch (I->getOpcode()) {
5809   case Instruction::GetElementPtr:
5810     // We mark this instruction as zero-cost because the cost of GEPs in
5811     // vectorized code depends on whether the corresponding memory instruction
5812     // is scalarized or not. Therefore, we handle GEPs with the memory
5813     // instruction cost.
5814     return 0;
5815   case Instruction::Br: {
5816     // In cases of scalarized and predicated instructions, there will be VF
5817     // predicated blocks in the vectorized loop. Each branch around these
5818     // blocks requires also an extract of its vector compare i1 element.
5819     bool ScalarPredicatedBB = false;
5820     BranchInst *BI = cast<BranchInst>(I);
5821     if (VF > 1 && BI->isConditional() &&
5822         (PredicatedBBsAfterVectorization.find(BI->getSuccessor(0)) !=
5823              PredicatedBBsAfterVectorization.end() ||
5824          PredicatedBBsAfterVectorization.find(BI->getSuccessor(1)) !=
5825              PredicatedBBsAfterVectorization.end()))
5826       ScalarPredicatedBB = true;
5827 
5828     if (ScalarPredicatedBB) {
5829       // Return cost for branches around scalarized and predicated blocks.
5830       Type *Vec_i1Ty =
5831           VectorType::get(IntegerType::getInt1Ty(RetTy->getContext()), VF);
5832       return (TTI.getScalarizationOverhead(Vec_i1Ty, false, true) +
5833               (TTI.getCFInstrCost(Instruction::Br) * VF));
5834     } else if (I->getParent() == TheLoop->getLoopLatch() || VF == 1)
5835       // The back-edge branch will remain, as will all scalar branches.
5836       return TTI.getCFInstrCost(Instruction::Br);
5837     else
5838       // This branch will be eliminated by if-conversion.
5839       return 0;
5840     // Note: We currently assume zero cost for an unconditional branch inside
5841     // a predicated block since it will become a fall-through, although we
5842     // may decide in the future to call TTI for all branches.
5843   }
5844   case Instruction::PHI: {
5845     auto *Phi = cast<PHINode>(I);
5846 
5847     // First-order recurrences are replaced by vector shuffles inside the loop.
5848     // NOTE: Don't use ToVectorTy as SK_ExtractSubvector expects a vector type.
5849     if (VF > 1 && Legal->isFirstOrderRecurrence(Phi))
5850       return TTI.getShuffleCost(TargetTransformInfo::SK_ExtractSubvector,
5851                                 VectorTy, VF - 1, VectorType::get(RetTy, 1));
5852 
5853     // Phi nodes in non-header blocks (not inductions, reductions, etc.) are
5854     // converted into select instructions. We require N - 1 selects per phi
5855     // node, where N is the number of incoming values.
5856     if (VF > 1 && Phi->getParent() != TheLoop->getHeader())
5857       return (Phi->getNumIncomingValues() - 1) *
5858              TTI.getCmpSelInstrCost(
5859                  Instruction::Select, ToVectorTy(Phi->getType(), VF),
5860                  ToVectorTy(Type::getInt1Ty(Phi->getContext()), VF));
5861 
5862     return TTI.getCFInstrCost(Instruction::PHI);
5863   }
5864   case Instruction::UDiv:
5865   case Instruction::SDiv:
5866   case Instruction::URem:
5867   case Instruction::SRem:
5868     // If we have a predicated instruction, it may not be executed for each
5869     // vector lane. Get the scalarization cost and scale this amount by the
5870     // probability of executing the predicated block. If the instruction is not
5871     // predicated, we fall through to the next case.
5872     if (VF > 1 && isScalarWithPredication(I)) {
5873       unsigned Cost = 0;
5874 
5875       // These instructions have a non-void type, so account for the phi nodes
5876       // that we will create. This cost is likely to be zero. The phi node
5877       // cost, if any, should be scaled by the block probability because it
5878       // models a copy at the end of each predicated block.
5879       Cost += VF * TTI.getCFInstrCost(Instruction::PHI);
5880 
5881       // The cost of the non-predicated instruction.
5882       Cost += VF * TTI.getArithmeticInstrCost(I->getOpcode(), RetTy);
5883 
5884       // The cost of insertelement and extractelement instructions needed for
5885       // scalarization.
5886       Cost += getScalarizationOverhead(I, VF, TTI);
5887 
5888       // Scale the cost by the probability of executing the predicated blocks.
5889       // This assumes the predicated block for each vector lane is equally
5890       // likely.
5891       return Cost / getReciprocalPredBlockProb();
5892     }
5893     LLVM_FALLTHROUGH;
5894   case Instruction::Add:
5895   case Instruction::FAdd:
5896   case Instruction::Sub:
5897   case Instruction::FSub:
5898   case Instruction::Mul:
5899   case Instruction::FMul:
5900   case Instruction::FDiv:
5901   case Instruction::FRem:
5902   case Instruction::Shl:
5903   case Instruction::LShr:
5904   case Instruction::AShr:
5905   case Instruction::And:
5906   case Instruction::Or:
5907   case Instruction::Xor: {
5908     // Since we will replace the stride by 1 the multiplication should go away.
5909     if (I->getOpcode() == Instruction::Mul && isStrideMul(I, Legal))
5910       return 0;
5911     // Certain instructions can be cheaper to vectorize if they have a constant
5912     // second vector operand. One example of this are shifts on x86.
5913     Value *Op2 = I->getOperand(1);
5914     TargetTransformInfo::OperandValueProperties Op2VP;
5915     TargetTransformInfo::OperandValueKind Op2VK =
5916         TTI.getOperandInfo(Op2, Op2VP);
5917     if (Op2VK == TargetTransformInfo::OK_AnyValue && Legal->isUniform(Op2))
5918       Op2VK = TargetTransformInfo::OK_UniformValue;
5919 
5920     SmallVector<const Value *, 4> Operands(I->operand_values());
5921     unsigned N = isScalarAfterVectorization(I, VF) ? VF : 1;
5922     return N * TTI.getArithmeticInstrCost(
5923                    I->getOpcode(), VectorTy, TargetTransformInfo::OK_AnyValue,
5924                    Op2VK, TargetTransformInfo::OP_None, Op2VP, Operands);
5925   }
5926   case Instruction::Select: {
5927     SelectInst *SI = cast<SelectInst>(I);
5928     const SCEV *CondSCEV = SE->getSCEV(SI->getCondition());
5929     bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop));
5930     Type *CondTy = SI->getCondition()->getType();
5931     if (!ScalarCond)
5932       CondTy = VectorType::get(CondTy, VF);
5933 
5934     return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, CondTy, I);
5935   }
5936   case Instruction::ICmp:
5937   case Instruction::FCmp: {
5938     Type *ValTy = I->getOperand(0)->getType();
5939     Instruction *Op0AsInstruction = dyn_cast<Instruction>(I->getOperand(0));
5940     if (canTruncateToMinimalBitwidth(Op0AsInstruction, VF))
5941       ValTy = IntegerType::get(ValTy->getContext(), MinBWs[Op0AsInstruction]);
5942     VectorTy = ToVectorTy(ValTy, VF);
5943     return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, nullptr, I);
5944   }
5945   case Instruction::Store:
5946   case Instruction::Load: {
5947     unsigned Width = VF;
5948     if (Width > 1) {
5949       InstWidening Decision = getWideningDecision(I, Width);
5950       assert(Decision != CM_Unknown &&
5951              "CM decision should be taken at this point");
5952       if (Decision == CM_Scalarize)
5953         Width = 1;
5954     }
5955     VectorTy = ToVectorTy(getMemInstValueType(I), Width);
5956     return getMemoryInstructionCost(I, VF);
5957   }
5958   case Instruction::ZExt:
5959   case Instruction::SExt:
5960   case Instruction::FPToUI:
5961   case Instruction::FPToSI:
5962   case Instruction::FPExt:
5963   case Instruction::PtrToInt:
5964   case Instruction::IntToPtr:
5965   case Instruction::SIToFP:
5966   case Instruction::UIToFP:
5967   case Instruction::Trunc:
5968   case Instruction::FPTrunc:
5969   case Instruction::BitCast: {
5970     // We optimize the truncation of induction variables having constant
5971     // integer steps. The cost of these truncations is the same as the scalar
5972     // operation.
5973     if (isOptimizableIVTruncate(I, VF)) {
5974       auto *Trunc = cast<TruncInst>(I);
5975       return TTI.getCastInstrCost(Instruction::Trunc, Trunc->getDestTy(),
5976                                   Trunc->getSrcTy(), Trunc);
5977     }
5978 
5979     Type *SrcScalarTy = I->getOperand(0)->getType();
5980     Type *SrcVecTy =
5981         VectorTy->isVectorTy() ? ToVectorTy(SrcScalarTy, VF) : SrcScalarTy;
5982     if (canTruncateToMinimalBitwidth(I, VF)) {
5983       // This cast is going to be shrunk. This may remove the cast or it might
5984       // turn it into slightly different cast. For example, if MinBW == 16,
5985       // "zext i8 %1 to i32" becomes "zext i8 %1 to i16".
5986       //
5987       // Calculate the modified src and dest types.
5988       Type *MinVecTy = VectorTy;
5989       if (I->getOpcode() == Instruction::Trunc) {
5990         SrcVecTy = smallestIntegerVectorType(SrcVecTy, MinVecTy);
5991         VectorTy =
5992             largestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy);
5993       } else if (I->getOpcode() == Instruction::ZExt ||
5994                  I->getOpcode() == Instruction::SExt) {
5995         SrcVecTy = largestIntegerVectorType(SrcVecTy, MinVecTy);
5996         VectorTy =
5997             smallestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy);
5998       }
5999     }
6000 
6001     unsigned N = isScalarAfterVectorization(I, VF) ? VF : 1;
6002     return N * TTI.getCastInstrCost(I->getOpcode(), VectorTy, SrcVecTy, I);
6003   }
6004   case Instruction::Call: {
6005     bool NeedToScalarize;
6006     CallInst *CI = cast<CallInst>(I);
6007     unsigned CallCost = getVectorCallCost(CI, VF, TTI, TLI, NeedToScalarize);
6008     if (getVectorIntrinsicIDForCall(CI, TLI))
6009       return std::min(CallCost, getVectorIntrinsicCost(CI, VF, TTI, TLI));
6010     return CallCost;
6011   }
6012   default:
6013     // The cost of executing VF copies of the scalar instruction. This opcode
6014     // is unknown. Assume that it is the same as 'mul'.
6015     return VF * TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy) +
6016            getScalarizationOverhead(I, VF, TTI);
6017   } // end of switch.
6018 }
6019 
6020 char LoopVectorize::ID = 0;
6021 
6022 static const char lv_name[] = "Loop Vectorization";
6023 
6024 INITIALIZE_PASS_BEGIN(LoopVectorize, LV_NAME, lv_name, false, false)
6025 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
6026 INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass)
6027 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
6028 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
6029 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
6030 INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass)
6031 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
6032 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
6033 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
6034 INITIALIZE_PASS_DEPENDENCY(LoopAccessLegacyAnalysis)
6035 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass)
6036 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass)
6037 INITIALIZE_PASS_END(LoopVectorize, LV_NAME, lv_name, false, false)
6038 
6039 namespace llvm {
6040 
6041 Pass *createLoopVectorizePass(bool InterleaveOnlyWhenForced,
6042                               bool VectorizeOnlyWhenForced) {
6043   return new LoopVectorize(InterleaveOnlyWhenForced, VectorizeOnlyWhenForced);
6044 }
6045 
6046 } // end namespace llvm
6047 
6048 bool LoopVectorizationCostModel::isConsecutiveLoadOrStore(Instruction *Inst) {
6049   // Check if the pointer operand of a load or store instruction is
6050   // consecutive.
6051   if (auto *Ptr = getLoadStorePointerOperand(Inst))
6052     return Legal->isConsecutivePtr(Ptr);
6053   return false;
6054 }
6055 
6056 void LoopVectorizationCostModel::collectValuesToIgnore() {
6057   // Ignore ephemeral values.
6058   CodeMetrics::collectEphemeralValues(TheLoop, AC, ValuesToIgnore);
6059 
6060   // Ignore type-promoting instructions we identified during reduction
6061   // detection.
6062   for (auto &Reduction : *Legal->getReductionVars()) {
6063     RecurrenceDescriptor &RedDes = Reduction.second;
6064     SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts();
6065     VecValuesToIgnore.insert(Casts.begin(), Casts.end());
6066   }
6067   // Ignore type-casting instructions we identified during induction
6068   // detection.
6069   for (auto &Induction : *Legal->getInductionVars()) {
6070     InductionDescriptor &IndDes = Induction.second;
6071     const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts();
6072     VecValuesToIgnore.insert(Casts.begin(), Casts.end());
6073   }
6074 }
6075 
6076 VectorizationFactor
6077 LoopVectorizationPlanner::planInVPlanNativePath(bool OptForSize,
6078                                                 unsigned UserVF) {
6079   // Width 1 means no vectorization, cost 0 means uncomputed cost.
6080   const VectorizationFactor NoVectorization = {1U, 0U};
6081 
6082   // Outer loop handling: They may require CFG and instruction level
6083   // transformations before even evaluating whether vectorization is profitable.
6084   // Since we cannot modify the incoming IR, we need to build VPlan upfront in
6085   // the vectorization pipeline.
6086   if (!OrigLoop->empty()) {
6087     // TODO: If UserVF is not provided, we set UserVF to 4 for stress testing.
6088     // This won't be necessary when UserVF is not required in the VPlan-native
6089     // path.
6090     if (VPlanBuildStressTest && !UserVF)
6091       UserVF = 4;
6092 
6093     assert(EnableVPlanNativePath && "VPlan-native path is not enabled.");
6094     assert(UserVF && "Expected UserVF for outer loop vectorization.");
6095     assert(isPowerOf2_32(UserVF) && "VF needs to be a power of two");
6096     LLVM_DEBUG(dbgs() << "LV: Using user VF " << UserVF << ".\n");
6097     buildVPlans(UserVF, UserVF);
6098 
6099     // For VPlan build stress testing, we bail out after VPlan construction.
6100     if (VPlanBuildStressTest)
6101       return NoVectorization;
6102 
6103     return {UserVF, 0};
6104   }
6105 
6106   LLVM_DEBUG(
6107       dbgs() << "LV: Not vectorizing. Inner loops aren't supported in the "
6108                 "VPlan-native path.\n");
6109   return NoVectorization;
6110 }
6111 
6112 VectorizationFactor
6113 LoopVectorizationPlanner::plan(bool OptForSize, unsigned UserVF) {
6114   assert(OrigLoop->empty() && "Inner loop expected.");
6115   // Width 1 means no vectorization, cost 0 means uncomputed cost.
6116   const VectorizationFactor NoVectorization = {1U, 0U};
6117   Optional<unsigned> MaybeMaxVF = CM.computeMaxVF(OptForSize);
6118   if (!MaybeMaxVF.hasValue()) // Cases considered too costly to vectorize.
6119     return NoVectorization;
6120 
6121   // Invalidate interleave groups if all blocks of loop will be predicated.
6122   if (CM.blockNeedsPredication(OrigLoop->getHeader()) &&
6123       !useMaskedInterleavedAccesses(*TTI)) {
6124     LLVM_DEBUG(
6125         dbgs()
6126         << "LV: Invalidate all interleaved groups due to fold-tail by masking "
6127            "which requires masked-interleaved support.\n");
6128     CM.InterleaveInfo.reset();
6129   }
6130 
6131   if (UserVF) {
6132     LLVM_DEBUG(dbgs() << "LV: Using user VF " << UserVF << ".\n");
6133     assert(isPowerOf2_32(UserVF) && "VF needs to be a power of two");
6134     // Collect the instructions (and their associated costs) that will be more
6135     // profitable to scalarize.
6136     CM.selectUserVectorizationFactor(UserVF);
6137     buildVPlansWithVPRecipes(UserVF, UserVF);
6138     LLVM_DEBUG(printPlans(dbgs()));
6139     return {UserVF, 0};
6140   }
6141 
6142   unsigned MaxVF = MaybeMaxVF.getValue();
6143   assert(MaxVF != 0 && "MaxVF is zero.");
6144 
6145   for (unsigned VF = 1; VF <= MaxVF; VF *= 2) {
6146     // Collect Uniform and Scalar instructions after vectorization with VF.
6147     CM.collectUniformsAndScalars(VF);
6148 
6149     // Collect the instructions (and their associated costs) that will be more
6150     // profitable to scalarize.
6151     if (VF > 1)
6152       CM.collectInstsToScalarize(VF);
6153   }
6154 
6155   buildVPlansWithVPRecipes(1, MaxVF);
6156   LLVM_DEBUG(printPlans(dbgs()));
6157   if (MaxVF == 1)
6158     return NoVectorization;
6159 
6160   // Select the optimal vectorization factor.
6161   return CM.selectVectorizationFactor(MaxVF);
6162 }
6163 
6164 void LoopVectorizationPlanner::setBestPlan(unsigned VF, unsigned UF) {
6165   LLVM_DEBUG(dbgs() << "Setting best plan to VF=" << VF << ", UF=" << UF
6166                     << '\n');
6167   BestVF = VF;
6168   BestUF = UF;
6169 
6170   erase_if(VPlans, [VF](const VPlanPtr &Plan) {
6171     return !Plan->hasVF(VF);
6172   });
6173   assert(VPlans.size() == 1 && "Best VF has not a single VPlan.");
6174 }
6175 
6176 void LoopVectorizationPlanner::executePlan(InnerLoopVectorizer &ILV,
6177                                            DominatorTree *DT) {
6178   // Perform the actual loop transformation.
6179 
6180   // 1. Create a new empty loop. Unlink the old loop and connect the new one.
6181   VPCallbackILV CallbackILV(ILV);
6182 
6183   VPTransformState State{BestVF, BestUF,      LI,
6184                          DT,     ILV.Builder, ILV.VectorLoopValueMap,
6185                          &ILV,   CallbackILV};
6186   State.CFG.PrevBB = ILV.createVectorizedLoopSkeleton();
6187   State.TripCount = ILV.getOrCreateTripCount(nullptr);
6188 
6189   //===------------------------------------------------===//
6190   //
6191   // Notice: any optimization or new instruction that go
6192   // into the code below should also be implemented in
6193   // the cost-model.
6194   //
6195   //===------------------------------------------------===//
6196 
6197   // 2. Copy and widen instructions from the old loop into the new loop.
6198   assert(VPlans.size() == 1 && "Not a single VPlan to execute.");
6199   VPlans.front()->execute(&State);
6200 
6201   // 3. Fix the vectorized code: take care of header phi's, live-outs,
6202   //    predication, updating analyses.
6203   ILV.fixVectorizedLoop();
6204 }
6205 
6206 void LoopVectorizationPlanner::collectTriviallyDeadInstructions(
6207     SmallPtrSetImpl<Instruction *> &DeadInstructions) {
6208   BasicBlock *Latch = OrigLoop->getLoopLatch();
6209 
6210   // We create new control-flow for the vectorized loop, so the original
6211   // condition will be dead after vectorization if it's only used by the
6212   // branch.
6213   auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0));
6214   if (Cmp && Cmp->hasOneUse())
6215     DeadInstructions.insert(Cmp);
6216 
6217   // We create new "steps" for induction variable updates to which the original
6218   // induction variables map. An original update instruction will be dead if
6219   // all its users except the induction variable are dead.
6220   for (auto &Induction : *Legal->getInductionVars()) {
6221     PHINode *Ind = Induction.first;
6222     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
6223     if (llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
6224           return U == Ind || DeadInstructions.find(cast<Instruction>(U)) !=
6225                                  DeadInstructions.end();
6226         }))
6227       DeadInstructions.insert(IndUpdate);
6228 
6229     // We record as "Dead" also the type-casting instructions we had identified
6230     // during induction analysis. We don't need any handling for them in the
6231     // vectorized loop because we have proven that, under a proper runtime
6232     // test guarding the vectorized loop, the value of the phi, and the casted
6233     // value of the phi, are the same. The last instruction in this casting chain
6234     // will get its scalar/vector/widened def from the scalar/vector/widened def
6235     // of the respective phi node. Any other casts in the induction def-use chain
6236     // have no other uses outside the phi update chain, and will be ignored.
6237     InductionDescriptor &IndDes = Induction.second;
6238     const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts();
6239     DeadInstructions.insert(Casts.begin(), Casts.end());
6240   }
6241 }
6242 
6243 Value *InnerLoopUnroller::reverseVector(Value *Vec) { return Vec; }
6244 
6245 Value *InnerLoopUnroller::getBroadcastInstrs(Value *V) { return V; }
6246 
6247 Value *InnerLoopUnroller::getStepVector(Value *Val, int StartIdx, Value *Step,
6248                                         Instruction::BinaryOps BinOp) {
6249   // When unrolling and the VF is 1, we only need to add a simple scalar.
6250   Type *Ty = Val->getType();
6251   assert(!Ty->isVectorTy() && "Val must be a scalar");
6252 
6253   if (Ty->isFloatingPointTy()) {
6254     Constant *C = ConstantFP::get(Ty, (double)StartIdx);
6255 
6256     // Floating point operations had to be 'fast' to enable the unrolling.
6257     Value *MulOp = addFastMathFlag(Builder.CreateFMul(C, Step));
6258     return addFastMathFlag(Builder.CreateBinOp(BinOp, Val, MulOp));
6259   }
6260   Constant *C = ConstantInt::get(Ty, StartIdx);
6261   return Builder.CreateAdd(Val, Builder.CreateMul(C, Step), "induction");
6262 }
6263 
6264 static void AddRuntimeUnrollDisableMetaData(Loop *L) {
6265   SmallVector<Metadata *, 4> MDs;
6266   // Reserve first location for self reference to the LoopID metadata node.
6267   MDs.push_back(nullptr);
6268   bool IsUnrollMetadata = false;
6269   MDNode *LoopID = L->getLoopID();
6270   if (LoopID) {
6271     // First find existing loop unrolling disable metadata.
6272     for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) {
6273       auto *MD = dyn_cast<MDNode>(LoopID->getOperand(i));
6274       if (MD) {
6275         const auto *S = dyn_cast<MDString>(MD->getOperand(0));
6276         IsUnrollMetadata =
6277             S && S->getString().startswith("llvm.loop.unroll.disable");
6278       }
6279       MDs.push_back(LoopID->getOperand(i));
6280     }
6281   }
6282 
6283   if (!IsUnrollMetadata) {
6284     // Add runtime unroll disable metadata.
6285     LLVMContext &Context = L->getHeader()->getContext();
6286     SmallVector<Metadata *, 1> DisableOperands;
6287     DisableOperands.push_back(
6288         MDString::get(Context, "llvm.loop.unroll.runtime.disable"));
6289     MDNode *DisableNode = MDNode::get(Context, DisableOperands);
6290     MDs.push_back(DisableNode);
6291     MDNode *NewLoopID = MDNode::get(Context, MDs);
6292     // Set operand 0 to refer to the loop id itself.
6293     NewLoopID->replaceOperandWith(0, NewLoopID);
6294     L->setLoopID(NewLoopID);
6295   }
6296 }
6297 
6298 bool LoopVectorizationPlanner::getDecisionAndClampRange(
6299     const std::function<bool(unsigned)> &Predicate, VFRange &Range) {
6300   assert(Range.End > Range.Start && "Trying to test an empty VF range.");
6301   bool PredicateAtRangeStart = Predicate(Range.Start);
6302 
6303   for (unsigned TmpVF = Range.Start * 2; TmpVF < Range.End; TmpVF *= 2)
6304     if (Predicate(TmpVF) != PredicateAtRangeStart) {
6305       Range.End = TmpVF;
6306       break;
6307     }
6308 
6309   return PredicateAtRangeStart;
6310 }
6311 
6312 /// Build VPlans for the full range of feasible VF's = {\p MinVF, 2 * \p MinVF,
6313 /// 4 * \p MinVF, ..., \p MaxVF} by repeatedly building a VPlan for a sub-range
6314 /// of VF's starting at a given VF and extending it as much as possible. Each
6315 /// vectorization decision can potentially shorten this sub-range during
6316 /// buildVPlan().
6317 void LoopVectorizationPlanner::buildVPlans(unsigned MinVF, unsigned MaxVF) {
6318   for (unsigned VF = MinVF; VF < MaxVF + 1;) {
6319     VFRange SubRange = {VF, MaxVF + 1};
6320     VPlans.push_back(buildVPlan(SubRange));
6321     VF = SubRange.End;
6322   }
6323 }
6324 
6325 VPValue *VPRecipeBuilder::createEdgeMask(BasicBlock *Src, BasicBlock *Dst,
6326                                          VPlanPtr &Plan) {
6327   assert(is_contained(predecessors(Dst), Src) && "Invalid edge");
6328 
6329   // Look for cached value.
6330   std::pair<BasicBlock *, BasicBlock *> Edge(Src, Dst);
6331   EdgeMaskCacheTy::iterator ECEntryIt = EdgeMaskCache.find(Edge);
6332   if (ECEntryIt != EdgeMaskCache.end())
6333     return ECEntryIt->second;
6334 
6335   VPValue *SrcMask = createBlockInMask(Src, Plan);
6336 
6337   // The terminator has to be a branch inst!
6338   BranchInst *BI = dyn_cast<BranchInst>(Src->getTerminator());
6339   assert(BI && "Unexpected terminator found");
6340 
6341   if (!BI->isConditional())
6342     return EdgeMaskCache[Edge] = SrcMask;
6343 
6344   VPValue *EdgeMask = Plan->getVPValue(BI->getCondition());
6345   assert(EdgeMask && "No Edge Mask found for condition");
6346 
6347   if (BI->getSuccessor(0) != Dst)
6348     EdgeMask = Builder.createNot(EdgeMask);
6349 
6350   if (SrcMask) // Otherwise block in-mask is all-one, no need to AND.
6351     EdgeMask = Builder.createAnd(EdgeMask, SrcMask);
6352 
6353   return EdgeMaskCache[Edge] = EdgeMask;
6354 }
6355 
6356 VPValue *VPRecipeBuilder::createBlockInMask(BasicBlock *BB, VPlanPtr &Plan) {
6357   assert(OrigLoop->contains(BB) && "Block is not a part of a loop");
6358 
6359   // Look for cached value.
6360   BlockMaskCacheTy::iterator BCEntryIt = BlockMaskCache.find(BB);
6361   if (BCEntryIt != BlockMaskCache.end())
6362     return BCEntryIt->second;
6363 
6364   // All-one mask is modelled as no-mask following the convention for masked
6365   // load/store/gather/scatter. Initialize BlockMask to no-mask.
6366   VPValue *BlockMask = nullptr;
6367 
6368   if (OrigLoop->getHeader() == BB) {
6369     if (!CM.blockNeedsPredication(BB))
6370       return BlockMaskCache[BB] = BlockMask; // Loop incoming mask is all-one.
6371 
6372     // Introduce the early-exit compare IV <= BTC to form header block mask.
6373     // This is used instead of IV < TC because TC may wrap, unlike BTC.
6374     VPValue *IV = Plan->getVPValue(Legal->getPrimaryInduction());
6375     VPValue *BTC = Plan->getOrCreateBackedgeTakenCount();
6376     BlockMask = Builder.createNaryOp(VPInstruction::ICmpULE, {IV, BTC});
6377     return BlockMaskCache[BB] = BlockMask;
6378   }
6379 
6380   // This is the block mask. We OR all incoming edges.
6381   for (auto *Predecessor : predecessors(BB)) {
6382     VPValue *EdgeMask = createEdgeMask(Predecessor, BB, Plan);
6383     if (!EdgeMask) // Mask of predecessor is all-one so mask of block is too.
6384       return BlockMaskCache[BB] = EdgeMask;
6385 
6386     if (!BlockMask) { // BlockMask has its initialized nullptr value.
6387       BlockMask = EdgeMask;
6388       continue;
6389     }
6390 
6391     BlockMask = Builder.createOr(BlockMask, EdgeMask);
6392   }
6393 
6394   return BlockMaskCache[BB] = BlockMask;
6395 }
6396 
6397 VPInterleaveRecipe *VPRecipeBuilder::tryToInterleaveMemory(Instruction *I,
6398                                                            VFRange &Range,
6399                                                            VPlanPtr &Plan) {
6400   const InterleaveGroup<Instruction> *IG = CM.getInterleavedAccessGroup(I);
6401   if (!IG)
6402     return nullptr;
6403 
6404   // Now check if IG is relevant for VF's in the given range.
6405   auto isIGMember = [&](Instruction *I) -> std::function<bool(unsigned)> {
6406     return [=](unsigned VF) -> bool {
6407       return (VF >= 2 && // Query is illegal for VF == 1
6408               CM.getWideningDecision(I, VF) ==
6409                   LoopVectorizationCostModel::CM_Interleave);
6410     };
6411   };
6412   if (!LoopVectorizationPlanner::getDecisionAndClampRange(isIGMember(I), Range))
6413     return nullptr;
6414 
6415   // I is a member of an InterleaveGroup for VF's in the (possibly trimmed)
6416   // range. If it's the primary member of the IG construct a VPInterleaveRecipe.
6417   // Otherwise, it's an adjunct member of the IG, do not construct any Recipe.
6418   assert(I == IG->getInsertPos() &&
6419          "Generating a recipe for an adjunct member of an interleave group");
6420 
6421   VPValue *Mask = nullptr;
6422   if (Legal->isMaskRequired(I))
6423     Mask = createBlockInMask(I->getParent(), Plan);
6424 
6425   return new VPInterleaveRecipe(IG, Mask);
6426 }
6427 
6428 VPWidenMemoryInstructionRecipe *
6429 VPRecipeBuilder::tryToWidenMemory(Instruction *I, VFRange &Range,
6430                                   VPlanPtr &Plan) {
6431   if (!isa<LoadInst>(I) && !isa<StoreInst>(I))
6432     return nullptr;
6433 
6434   auto willWiden = [&](unsigned VF) -> bool {
6435     if (VF == 1)
6436       return false;
6437     if (CM.isScalarAfterVectorization(I, VF) ||
6438         CM.isProfitableToScalarize(I, VF))
6439       return false;
6440     LoopVectorizationCostModel::InstWidening Decision =
6441         CM.getWideningDecision(I, VF);
6442     assert(Decision != LoopVectorizationCostModel::CM_Unknown &&
6443            "CM decision should be taken at this point.");
6444     assert(Decision != LoopVectorizationCostModel::CM_Interleave &&
6445            "Interleave memory opportunity should be caught earlier.");
6446     return Decision != LoopVectorizationCostModel::CM_Scalarize;
6447   };
6448 
6449   if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range))
6450     return nullptr;
6451 
6452   VPValue *Mask = nullptr;
6453   if (Legal->isMaskRequired(I))
6454     Mask = createBlockInMask(I->getParent(), Plan);
6455 
6456   return new VPWidenMemoryInstructionRecipe(*I, Mask);
6457 }
6458 
6459 VPWidenIntOrFpInductionRecipe *
6460 VPRecipeBuilder::tryToOptimizeInduction(Instruction *I, VFRange &Range) {
6461   if (PHINode *Phi = dyn_cast<PHINode>(I)) {
6462     // Check if this is an integer or fp induction. If so, build the recipe that
6463     // produces its scalar and vector values.
6464     InductionDescriptor II = Legal->getInductionVars()->lookup(Phi);
6465     if (II.getKind() == InductionDescriptor::IK_IntInduction ||
6466         II.getKind() == InductionDescriptor::IK_FpInduction)
6467       return new VPWidenIntOrFpInductionRecipe(Phi);
6468 
6469     return nullptr;
6470   }
6471 
6472   // Optimize the special case where the source is a constant integer
6473   // induction variable. Notice that we can only optimize the 'trunc' case
6474   // because (a) FP conversions lose precision, (b) sext/zext may wrap, and
6475   // (c) other casts depend on pointer size.
6476 
6477   // Determine whether \p K is a truncation based on an induction variable that
6478   // can be optimized.
6479   auto isOptimizableIVTruncate =
6480       [&](Instruction *K) -> std::function<bool(unsigned)> {
6481     return
6482         [=](unsigned VF) -> bool { return CM.isOptimizableIVTruncate(K, VF); };
6483   };
6484 
6485   if (isa<TruncInst>(I) && LoopVectorizationPlanner::getDecisionAndClampRange(
6486                                isOptimizableIVTruncate(I), Range))
6487     return new VPWidenIntOrFpInductionRecipe(cast<PHINode>(I->getOperand(0)),
6488                                              cast<TruncInst>(I));
6489   return nullptr;
6490 }
6491 
6492 VPBlendRecipe *VPRecipeBuilder::tryToBlend(Instruction *I, VPlanPtr &Plan) {
6493   PHINode *Phi = dyn_cast<PHINode>(I);
6494   if (!Phi || Phi->getParent() == OrigLoop->getHeader())
6495     return nullptr;
6496 
6497   // We know that all PHIs in non-header blocks are converted into selects, so
6498   // we don't have to worry about the insertion order and we can just use the
6499   // builder. At this point we generate the predication tree. There may be
6500   // duplications since this is a simple recursive scan, but future
6501   // optimizations will clean it up.
6502 
6503   SmallVector<VPValue *, 2> Masks;
6504   unsigned NumIncoming = Phi->getNumIncomingValues();
6505   for (unsigned In = 0; In < NumIncoming; In++) {
6506     VPValue *EdgeMask =
6507       createEdgeMask(Phi->getIncomingBlock(In), Phi->getParent(), Plan);
6508     assert((EdgeMask || NumIncoming == 1) &&
6509            "Multiple predecessors with one having a full mask");
6510     if (EdgeMask)
6511       Masks.push_back(EdgeMask);
6512   }
6513   return new VPBlendRecipe(Phi, Masks);
6514 }
6515 
6516 bool VPRecipeBuilder::tryToWiden(Instruction *I, VPBasicBlock *VPBB,
6517                                  VFRange &Range) {
6518 
6519   bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange(
6520       [&](unsigned VF) { return CM.isScalarWithPredication(I, VF); }, Range);
6521 
6522   if (IsPredicated)
6523     return false;
6524 
6525   auto IsVectorizableOpcode = [](unsigned Opcode) {
6526     switch (Opcode) {
6527     case Instruction::Add:
6528     case Instruction::And:
6529     case Instruction::AShr:
6530     case Instruction::BitCast:
6531     case Instruction::Br:
6532     case Instruction::Call:
6533     case Instruction::FAdd:
6534     case Instruction::FCmp:
6535     case Instruction::FDiv:
6536     case Instruction::FMul:
6537     case Instruction::FPExt:
6538     case Instruction::FPToSI:
6539     case Instruction::FPToUI:
6540     case Instruction::FPTrunc:
6541     case Instruction::FRem:
6542     case Instruction::FSub:
6543     case Instruction::GetElementPtr:
6544     case Instruction::ICmp:
6545     case Instruction::IntToPtr:
6546     case Instruction::Load:
6547     case Instruction::LShr:
6548     case Instruction::Mul:
6549     case Instruction::Or:
6550     case Instruction::PHI:
6551     case Instruction::PtrToInt:
6552     case Instruction::SDiv:
6553     case Instruction::Select:
6554     case Instruction::SExt:
6555     case Instruction::Shl:
6556     case Instruction::SIToFP:
6557     case Instruction::SRem:
6558     case Instruction::Store:
6559     case Instruction::Sub:
6560     case Instruction::Trunc:
6561     case Instruction::UDiv:
6562     case Instruction::UIToFP:
6563     case Instruction::URem:
6564     case Instruction::Xor:
6565     case Instruction::ZExt:
6566       return true;
6567     }
6568     return false;
6569   };
6570 
6571   if (!IsVectorizableOpcode(I->getOpcode()))
6572     return false;
6573 
6574   if (CallInst *CI = dyn_cast<CallInst>(I)) {
6575     Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
6576     if (ID && (ID == Intrinsic::assume || ID == Intrinsic::lifetime_end ||
6577                ID == Intrinsic::lifetime_start || ID == Intrinsic::sideeffect))
6578       return false;
6579   }
6580 
6581   auto willWiden = [&](unsigned VF) -> bool {
6582     if (!isa<PHINode>(I) && (CM.isScalarAfterVectorization(I, VF) ||
6583                              CM.isProfitableToScalarize(I, VF)))
6584       return false;
6585     if (CallInst *CI = dyn_cast<CallInst>(I)) {
6586       Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
6587       // The following case may be scalarized depending on the VF.
6588       // The flag shows whether we use Intrinsic or a usual Call for vectorized
6589       // version of the instruction.
6590       // Is it beneficial to perform intrinsic call compared to lib call?
6591       bool NeedToScalarize;
6592       unsigned CallCost = getVectorCallCost(CI, VF, *TTI, TLI, NeedToScalarize);
6593       bool UseVectorIntrinsic =
6594           ID && getVectorIntrinsicCost(CI, VF, *TTI, TLI) <= CallCost;
6595       return UseVectorIntrinsic || !NeedToScalarize;
6596     }
6597     if (isa<LoadInst>(I) || isa<StoreInst>(I)) {
6598       assert(CM.getWideningDecision(I, VF) ==
6599                  LoopVectorizationCostModel::CM_Scalarize &&
6600              "Memory widening decisions should have been taken care by now");
6601       return false;
6602     }
6603     return true;
6604   };
6605 
6606   if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range))
6607     return false;
6608 
6609   // Success: widen this instruction. We optimize the common case where
6610   // consecutive instructions can be represented by a single recipe.
6611   if (!VPBB->empty()) {
6612     VPWidenRecipe *LastWidenRecipe = dyn_cast<VPWidenRecipe>(&VPBB->back());
6613     if (LastWidenRecipe && LastWidenRecipe->appendInstruction(I))
6614       return true;
6615   }
6616 
6617   VPBB->appendRecipe(new VPWidenRecipe(I));
6618   return true;
6619 }
6620 
6621 VPBasicBlock *VPRecipeBuilder::handleReplication(
6622     Instruction *I, VFRange &Range, VPBasicBlock *VPBB,
6623     DenseMap<Instruction *, VPReplicateRecipe *> &PredInst2Recipe,
6624     VPlanPtr &Plan) {
6625   bool IsUniform = LoopVectorizationPlanner::getDecisionAndClampRange(
6626       [&](unsigned VF) { return CM.isUniformAfterVectorization(I, VF); },
6627       Range);
6628 
6629   bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange(
6630       [&](unsigned VF) { return CM.isScalarWithPredication(I, VF); }, Range);
6631 
6632   auto *Recipe = new VPReplicateRecipe(I, IsUniform, IsPredicated);
6633 
6634   // Find if I uses a predicated instruction. If so, it will use its scalar
6635   // value. Avoid hoisting the insert-element which packs the scalar value into
6636   // a vector value, as that happens iff all users use the vector value.
6637   for (auto &Op : I->operands())
6638     if (auto *PredInst = dyn_cast<Instruction>(Op))
6639       if (PredInst2Recipe.find(PredInst) != PredInst2Recipe.end())
6640         PredInst2Recipe[PredInst]->setAlsoPack(false);
6641 
6642   // Finalize the recipe for Instr, first if it is not predicated.
6643   if (!IsPredicated) {
6644     LLVM_DEBUG(dbgs() << "LV: Scalarizing:" << *I << "\n");
6645     VPBB->appendRecipe(Recipe);
6646     return VPBB;
6647   }
6648   LLVM_DEBUG(dbgs() << "LV: Scalarizing and predicating:" << *I << "\n");
6649   assert(VPBB->getSuccessors().empty() &&
6650          "VPBB has successors when handling predicated replication.");
6651   // Record predicated instructions for above packing optimizations.
6652   PredInst2Recipe[I] = Recipe;
6653   VPBlockBase *Region = createReplicateRegion(I, Recipe, Plan);
6654   VPBlockUtils::insertBlockAfter(Region, VPBB);
6655   auto *RegSucc = new VPBasicBlock();
6656   VPBlockUtils::insertBlockAfter(RegSucc, Region);
6657   return RegSucc;
6658 }
6659 
6660 VPRegionBlock *VPRecipeBuilder::createReplicateRegion(Instruction *Instr,
6661                                                       VPRecipeBase *PredRecipe,
6662                                                       VPlanPtr &Plan) {
6663   // Instructions marked for predication are replicated and placed under an
6664   // if-then construct to prevent side-effects.
6665 
6666   // Generate recipes to compute the block mask for this region.
6667   VPValue *BlockInMask = createBlockInMask(Instr->getParent(), Plan);
6668 
6669   // Build the triangular if-then region.
6670   std::string RegionName = (Twine("pred.") + Instr->getOpcodeName()).str();
6671   assert(Instr->getParent() && "Predicated instruction not in any basic block");
6672   auto *BOMRecipe = new VPBranchOnMaskRecipe(BlockInMask);
6673   auto *Entry = new VPBasicBlock(Twine(RegionName) + ".entry", BOMRecipe);
6674   auto *PHIRecipe =
6675       Instr->getType()->isVoidTy() ? nullptr : new VPPredInstPHIRecipe(Instr);
6676   auto *Exit = new VPBasicBlock(Twine(RegionName) + ".continue", PHIRecipe);
6677   auto *Pred = new VPBasicBlock(Twine(RegionName) + ".if", PredRecipe);
6678   VPRegionBlock *Region = new VPRegionBlock(Entry, Exit, RegionName, true);
6679 
6680   // Note: first set Entry as region entry and then connect successors starting
6681   // from it in order, to propagate the "parent" of each VPBasicBlock.
6682   VPBlockUtils::insertTwoBlocksAfter(Pred, Exit, BlockInMask, Entry);
6683   VPBlockUtils::connectBlocks(Pred, Exit);
6684 
6685   return Region;
6686 }
6687 
6688 bool VPRecipeBuilder::tryToCreateRecipe(Instruction *Instr, VFRange &Range,
6689                                         VPlanPtr &Plan, VPBasicBlock *VPBB) {
6690   VPRecipeBase *Recipe = nullptr;
6691   // Check if Instr should belong to an interleave memory recipe, or already
6692   // does. In the latter case Instr is irrelevant.
6693   if ((Recipe = tryToInterleaveMemory(Instr, Range, Plan))) {
6694     VPBB->appendRecipe(Recipe);
6695     return true;
6696   }
6697 
6698   // Check if Instr is a memory operation that should be widened.
6699   if ((Recipe = tryToWidenMemory(Instr, Range, Plan))) {
6700     VPBB->appendRecipe(Recipe);
6701     return true;
6702   }
6703 
6704   // Check if Instr should form some PHI recipe.
6705   if ((Recipe = tryToOptimizeInduction(Instr, Range))) {
6706     VPBB->appendRecipe(Recipe);
6707     return true;
6708   }
6709   if ((Recipe = tryToBlend(Instr, Plan))) {
6710     VPBB->appendRecipe(Recipe);
6711     return true;
6712   }
6713   if (PHINode *Phi = dyn_cast<PHINode>(Instr)) {
6714     VPBB->appendRecipe(new VPWidenPHIRecipe(Phi));
6715     return true;
6716   }
6717 
6718   // Check if Instr is to be widened by a general VPWidenRecipe, after
6719   // having first checked for specific widening recipes that deal with
6720   // Interleave Groups, Inductions and Phi nodes.
6721   if (tryToWiden(Instr, VPBB, Range))
6722     return true;
6723 
6724   return false;
6725 }
6726 
6727 void LoopVectorizationPlanner::buildVPlansWithVPRecipes(unsigned MinVF,
6728                                                         unsigned MaxVF) {
6729   assert(OrigLoop->empty() && "Inner loop expected.");
6730 
6731   // Collect conditions feeding internal conditional branches; they need to be
6732   // represented in VPlan for it to model masking.
6733   SmallPtrSet<Value *, 1> NeedDef;
6734 
6735   auto *Latch = OrigLoop->getLoopLatch();
6736   for (BasicBlock *BB : OrigLoop->blocks()) {
6737     if (BB == Latch)
6738       continue;
6739     BranchInst *Branch = dyn_cast<BranchInst>(BB->getTerminator());
6740     if (Branch && Branch->isConditional())
6741       NeedDef.insert(Branch->getCondition());
6742   }
6743 
6744   // If the tail is to be folded by masking, the primary induction variable
6745   // needs to be represented in VPlan for it to model early-exit masking.
6746   if (CM.foldTailByMasking())
6747     NeedDef.insert(Legal->getPrimaryInduction());
6748 
6749   // Collect instructions from the original loop that will become trivially dead
6750   // in the vectorized loop. We don't need to vectorize these instructions. For
6751   // example, original induction update instructions can become dead because we
6752   // separately emit induction "steps" when generating code for the new loop.
6753   // Similarly, we create a new latch condition when setting up the structure
6754   // of the new loop, so the old one can become dead.
6755   SmallPtrSet<Instruction *, 4> DeadInstructions;
6756   collectTriviallyDeadInstructions(DeadInstructions);
6757 
6758   for (unsigned VF = MinVF; VF < MaxVF + 1;) {
6759     VFRange SubRange = {VF, MaxVF + 1};
6760     VPlans.push_back(
6761         buildVPlanWithVPRecipes(SubRange, NeedDef, DeadInstructions));
6762     VF = SubRange.End;
6763   }
6764 }
6765 
6766 LoopVectorizationPlanner::VPlanPtr
6767 LoopVectorizationPlanner::buildVPlanWithVPRecipes(
6768     VFRange &Range, SmallPtrSetImpl<Value *> &NeedDef,
6769     SmallPtrSetImpl<Instruction *> &DeadInstructions) {
6770   // Hold a mapping from predicated instructions to their recipes, in order to
6771   // fix their AlsoPack behavior if a user is determined to replicate and use a
6772   // scalar instead of vector value.
6773   DenseMap<Instruction *, VPReplicateRecipe *> PredInst2Recipe;
6774 
6775   DenseMap<Instruction *, Instruction *> &SinkAfter = Legal->getSinkAfter();
6776   DenseMap<Instruction *, Instruction *> SinkAfterInverse;
6777 
6778   // Create a dummy pre-entry VPBasicBlock to start building the VPlan.
6779   VPBasicBlock *VPBB = new VPBasicBlock("Pre-Entry");
6780   auto Plan = llvm::make_unique<VPlan>(VPBB);
6781 
6782   VPRecipeBuilder RecipeBuilder(OrigLoop, TLI, TTI, Legal, CM, Builder);
6783   // Represent values that will have defs inside VPlan.
6784   for (Value *V : NeedDef)
6785     Plan->addVPValue(V);
6786 
6787   // Scan the body of the loop in a topological order to visit each basic block
6788   // after having visited its predecessor basic blocks.
6789   LoopBlocksDFS DFS(OrigLoop);
6790   DFS.perform(LI);
6791 
6792   for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) {
6793     // Relevant instructions from basic block BB will be grouped into VPRecipe
6794     // ingredients and fill a new VPBasicBlock.
6795     unsigned VPBBsForBB = 0;
6796     auto *FirstVPBBForBB = new VPBasicBlock(BB->getName());
6797     VPBlockUtils::insertBlockAfter(FirstVPBBForBB, VPBB);
6798     VPBB = FirstVPBBForBB;
6799     Builder.setInsertPoint(VPBB);
6800 
6801     std::vector<Instruction *> Ingredients;
6802 
6803     // Organize the ingredients to vectorize from current basic block in the
6804     // right order.
6805     for (Instruction &I : BB->instructionsWithoutDebug()) {
6806       Instruction *Instr = &I;
6807 
6808       // First filter out irrelevant instructions, to ensure no recipes are
6809       // built for them.
6810       if (isa<BranchInst>(Instr) ||
6811           DeadInstructions.find(Instr) != DeadInstructions.end())
6812         continue;
6813 
6814       // I is a member of an InterleaveGroup for Range.Start. If it's an adjunct
6815       // member of the IG, do not construct any Recipe for it.
6816       const InterleaveGroup<Instruction> *IG =
6817           CM.getInterleavedAccessGroup(Instr);
6818       if (IG && Instr != IG->getInsertPos() &&
6819           Range.Start >= 2 && // Query is illegal for VF == 1
6820           CM.getWideningDecision(Instr, Range.Start) ==
6821               LoopVectorizationCostModel::CM_Interleave) {
6822         auto SinkCandidate = SinkAfterInverse.find(Instr);
6823         if (SinkCandidate != SinkAfterInverse.end())
6824           Ingredients.push_back(SinkCandidate->second);
6825         continue;
6826       }
6827 
6828       // Move instructions to handle first-order recurrences, step 1: avoid
6829       // handling this instruction until after we've handled the instruction it
6830       // should follow.
6831       auto SAIt = SinkAfter.find(Instr);
6832       if (SAIt != SinkAfter.end()) {
6833         LLVM_DEBUG(dbgs() << "Sinking" << *SAIt->first << " after"
6834                           << *SAIt->second
6835                           << " to vectorize a 1st order recurrence.\n");
6836         SinkAfterInverse[SAIt->second] = Instr;
6837         continue;
6838       }
6839 
6840       Ingredients.push_back(Instr);
6841 
6842       // Move instructions to handle first-order recurrences, step 2: push the
6843       // instruction to be sunk at its insertion point.
6844       auto SAInvIt = SinkAfterInverse.find(Instr);
6845       if (SAInvIt != SinkAfterInverse.end())
6846         Ingredients.push_back(SAInvIt->second);
6847     }
6848 
6849     // Introduce each ingredient into VPlan.
6850     for (Instruction *Instr : Ingredients) {
6851       if (RecipeBuilder.tryToCreateRecipe(Instr, Range, Plan, VPBB))
6852         continue;
6853 
6854       // Otherwise, if all widening options failed, Instruction is to be
6855       // replicated. This may create a successor for VPBB.
6856       VPBasicBlock *NextVPBB = RecipeBuilder.handleReplication(
6857           Instr, Range, VPBB, PredInst2Recipe, Plan);
6858       if (NextVPBB != VPBB) {
6859         VPBB = NextVPBB;
6860         VPBB->setName(BB->hasName() ? BB->getName() + "." + Twine(VPBBsForBB++)
6861                                     : "");
6862       }
6863     }
6864   }
6865 
6866   // Discard empty dummy pre-entry VPBasicBlock. Note that other VPBasicBlocks
6867   // may also be empty, such as the last one VPBB, reflecting original
6868   // basic-blocks with no recipes.
6869   VPBasicBlock *PreEntry = cast<VPBasicBlock>(Plan->getEntry());
6870   assert(PreEntry->empty() && "Expecting empty pre-entry block.");
6871   VPBlockBase *Entry = Plan->setEntry(PreEntry->getSingleSuccessor());
6872   VPBlockUtils::disconnectBlocks(PreEntry, Entry);
6873   delete PreEntry;
6874 
6875   std::string PlanName;
6876   raw_string_ostream RSO(PlanName);
6877   unsigned VF = Range.Start;
6878   Plan->addVF(VF);
6879   RSO << "Initial VPlan for VF={" << VF;
6880   for (VF *= 2; VF < Range.End; VF *= 2) {
6881     Plan->addVF(VF);
6882     RSO << "," << VF;
6883   }
6884   RSO << "},UF>=1";
6885   RSO.flush();
6886   Plan->setName(PlanName);
6887 
6888   return Plan;
6889 }
6890 
6891 LoopVectorizationPlanner::VPlanPtr
6892 LoopVectorizationPlanner::buildVPlan(VFRange &Range) {
6893   // Outer loop handling: They may require CFG and instruction level
6894   // transformations before even evaluating whether vectorization is profitable.
6895   // Since we cannot modify the incoming IR, we need to build VPlan upfront in
6896   // the vectorization pipeline.
6897   assert(!OrigLoop->empty());
6898   assert(EnableVPlanNativePath && "VPlan-native path is not enabled.");
6899 
6900   // Create new empty VPlan
6901   auto Plan = llvm::make_unique<VPlan>();
6902 
6903   // Build hierarchical CFG
6904   VPlanHCFGBuilder HCFGBuilder(OrigLoop, LI, *Plan);
6905   HCFGBuilder.buildHierarchicalCFG();
6906 
6907   for (unsigned VF = Range.Start; VF < Range.End; VF *= 2)
6908     Plan->addVF(VF);
6909 
6910   if (EnableVPlanPredication) {
6911     VPlanPredicator VPP(*Plan);
6912     VPP.predicate();
6913 
6914     // Avoid running transformation to recipes until masked code generation in
6915     // VPlan-native path is in place.
6916     return Plan;
6917   }
6918 
6919   SmallPtrSet<Instruction *, 1> DeadInstructions;
6920   VPlanHCFGTransforms::VPInstructionsToVPRecipes(
6921       Plan, Legal->getInductionVars(), DeadInstructions);
6922 
6923   return Plan;
6924 }
6925 
6926 Value* LoopVectorizationPlanner::VPCallbackILV::
6927 getOrCreateVectorValues(Value *V, unsigned Part) {
6928       return ILV.getOrCreateVectorValue(V, Part);
6929 }
6930 
6931 void VPInterleaveRecipe::print(raw_ostream &O, const Twine &Indent) const {
6932   O << " +\n"
6933     << Indent << "\"INTERLEAVE-GROUP with factor " << IG->getFactor() << " at ";
6934   IG->getInsertPos()->printAsOperand(O, false);
6935   if (User) {
6936     O << ", ";
6937     User->getOperand(0)->printAsOperand(O);
6938   }
6939   O << "\\l\"";
6940   for (unsigned i = 0; i < IG->getFactor(); ++i)
6941     if (Instruction *I = IG->getMember(i))
6942       O << " +\n"
6943         << Indent << "\"  " << VPlanIngredient(I) << " " << i << "\\l\"";
6944 }
6945 
6946 void VPWidenRecipe::execute(VPTransformState &State) {
6947   for (auto &Instr : make_range(Begin, End))
6948     State.ILV->widenInstruction(Instr);
6949 }
6950 
6951 void VPWidenIntOrFpInductionRecipe::execute(VPTransformState &State) {
6952   assert(!State.Instance && "Int or FP induction being replicated.");
6953   State.ILV->widenIntOrFpInduction(IV, Trunc);
6954 }
6955 
6956 void VPWidenPHIRecipe::execute(VPTransformState &State) {
6957   State.ILV->widenPHIInstruction(Phi, State.UF, State.VF);
6958 }
6959 
6960 void VPBlendRecipe::execute(VPTransformState &State) {
6961   State.ILV->setDebugLocFromInst(State.Builder, Phi);
6962   // We know that all PHIs in non-header blocks are converted into
6963   // selects, so we don't have to worry about the insertion order and we
6964   // can just use the builder.
6965   // At this point we generate the predication tree. There may be
6966   // duplications since this is a simple recursive scan, but future
6967   // optimizations will clean it up.
6968 
6969   unsigned NumIncoming = Phi->getNumIncomingValues();
6970 
6971   assert((User || NumIncoming == 1) &&
6972          "Multiple predecessors with predecessors having a full mask");
6973   // Generate a sequence of selects of the form:
6974   // SELECT(Mask3, In3,
6975   //      SELECT(Mask2, In2,
6976   //                   ( ...)))
6977   InnerLoopVectorizer::VectorParts Entry(State.UF);
6978   for (unsigned In = 0; In < NumIncoming; ++In) {
6979     for (unsigned Part = 0; Part < State.UF; ++Part) {
6980       // We might have single edge PHIs (blocks) - use an identity
6981       // 'select' for the first PHI operand.
6982       Value *In0 =
6983           State.ILV->getOrCreateVectorValue(Phi->getIncomingValue(In), Part);
6984       if (In == 0)
6985         Entry[Part] = In0; // Initialize with the first incoming value.
6986       else {
6987         // Select between the current value and the previous incoming edge
6988         // based on the incoming mask.
6989         Value *Cond = State.get(User->getOperand(In), Part);
6990         Entry[Part] =
6991             State.Builder.CreateSelect(Cond, In0, Entry[Part], "predphi");
6992       }
6993     }
6994   }
6995   for (unsigned Part = 0; Part < State.UF; ++Part)
6996     State.ValueMap.setVectorValue(Phi, Part, Entry[Part]);
6997 }
6998 
6999 void VPInterleaveRecipe::execute(VPTransformState &State) {
7000   assert(!State.Instance && "Interleave group being replicated.");
7001   if (!User)
7002     return State.ILV->vectorizeInterleaveGroup(IG->getInsertPos());
7003 
7004   // Last (and currently only) operand is a mask.
7005   InnerLoopVectorizer::VectorParts MaskValues(State.UF);
7006   VPValue *Mask = User->getOperand(User->getNumOperands() - 1);
7007   for (unsigned Part = 0; Part < State.UF; ++Part)
7008     MaskValues[Part] = State.get(Mask, Part);
7009   State.ILV->vectorizeInterleaveGroup(IG->getInsertPos(), &MaskValues);
7010 }
7011 
7012 void VPReplicateRecipe::execute(VPTransformState &State) {
7013   if (State.Instance) { // Generate a single instance.
7014     State.ILV->scalarizeInstruction(Ingredient, *State.Instance, IsPredicated);
7015     // Insert scalar instance packing it into a vector.
7016     if (AlsoPack && State.VF > 1) {
7017       // If we're constructing lane 0, initialize to start from undef.
7018       if (State.Instance->Lane == 0) {
7019         Value *Undef =
7020             UndefValue::get(VectorType::get(Ingredient->getType(), State.VF));
7021         State.ValueMap.setVectorValue(Ingredient, State.Instance->Part, Undef);
7022       }
7023       State.ILV->packScalarIntoVectorValue(Ingredient, *State.Instance);
7024     }
7025     return;
7026   }
7027 
7028   // Generate scalar instances for all VF lanes of all UF parts, unless the
7029   // instruction is uniform inwhich case generate only the first lane for each
7030   // of the UF parts.
7031   unsigned EndLane = IsUniform ? 1 : State.VF;
7032   for (unsigned Part = 0; Part < State.UF; ++Part)
7033     for (unsigned Lane = 0; Lane < EndLane; ++Lane)
7034       State.ILV->scalarizeInstruction(Ingredient, {Part, Lane}, IsPredicated);
7035 }
7036 
7037 void VPBranchOnMaskRecipe::execute(VPTransformState &State) {
7038   assert(State.Instance && "Branch on Mask works only on single instance.");
7039 
7040   unsigned Part = State.Instance->Part;
7041   unsigned Lane = State.Instance->Lane;
7042 
7043   Value *ConditionBit = nullptr;
7044   if (!User) // Block in mask is all-one.
7045     ConditionBit = State.Builder.getTrue();
7046   else {
7047     VPValue *BlockInMask = User->getOperand(0);
7048     ConditionBit = State.get(BlockInMask, Part);
7049     if (ConditionBit->getType()->isVectorTy())
7050       ConditionBit = State.Builder.CreateExtractElement(
7051           ConditionBit, State.Builder.getInt32(Lane));
7052   }
7053 
7054   // Replace the temporary unreachable terminator with a new conditional branch,
7055   // whose two destinations will be set later when they are created.
7056   auto *CurrentTerminator = State.CFG.PrevBB->getTerminator();
7057   assert(isa<UnreachableInst>(CurrentTerminator) &&
7058          "Expected to replace unreachable terminator with conditional branch.");
7059   auto *CondBr = BranchInst::Create(State.CFG.PrevBB, nullptr, ConditionBit);
7060   CondBr->setSuccessor(0, nullptr);
7061   ReplaceInstWithInst(CurrentTerminator, CondBr);
7062 }
7063 
7064 void VPPredInstPHIRecipe::execute(VPTransformState &State) {
7065   assert(State.Instance && "Predicated instruction PHI works per instance.");
7066   Instruction *ScalarPredInst = cast<Instruction>(
7067       State.ValueMap.getScalarValue(PredInst, *State.Instance));
7068   BasicBlock *PredicatedBB = ScalarPredInst->getParent();
7069   BasicBlock *PredicatingBB = PredicatedBB->getSinglePredecessor();
7070   assert(PredicatingBB && "Predicated block has no single predecessor.");
7071 
7072   // By current pack/unpack logic we need to generate only a single phi node: if
7073   // a vector value for the predicated instruction exists at this point it means
7074   // the instruction has vector users only, and a phi for the vector value is
7075   // needed. In this case the recipe of the predicated instruction is marked to
7076   // also do that packing, thereby "hoisting" the insert-element sequence.
7077   // Otherwise, a phi node for the scalar value is needed.
7078   unsigned Part = State.Instance->Part;
7079   if (State.ValueMap.hasVectorValue(PredInst, Part)) {
7080     Value *VectorValue = State.ValueMap.getVectorValue(PredInst, Part);
7081     InsertElementInst *IEI = cast<InsertElementInst>(VectorValue);
7082     PHINode *VPhi = State.Builder.CreatePHI(IEI->getType(), 2);
7083     VPhi->addIncoming(IEI->getOperand(0), PredicatingBB); // Unmodified vector.
7084     VPhi->addIncoming(IEI, PredicatedBB); // New vector with inserted element.
7085     State.ValueMap.resetVectorValue(PredInst, Part, VPhi); // Update cache.
7086   } else {
7087     Type *PredInstType = PredInst->getType();
7088     PHINode *Phi = State.Builder.CreatePHI(PredInstType, 2);
7089     Phi->addIncoming(UndefValue::get(ScalarPredInst->getType()), PredicatingBB);
7090     Phi->addIncoming(ScalarPredInst, PredicatedBB);
7091     State.ValueMap.resetScalarValue(PredInst, *State.Instance, Phi);
7092   }
7093 }
7094 
7095 void VPWidenMemoryInstructionRecipe::execute(VPTransformState &State) {
7096   if (!User)
7097     return State.ILV->vectorizeMemoryInstruction(&Instr);
7098 
7099   // Last (and currently only) operand is a mask.
7100   InnerLoopVectorizer::VectorParts MaskValues(State.UF);
7101   VPValue *Mask = User->getOperand(User->getNumOperands() - 1);
7102   for (unsigned Part = 0; Part < State.UF; ++Part)
7103     MaskValues[Part] = State.get(Mask, Part);
7104   State.ILV->vectorizeMemoryInstruction(&Instr, &MaskValues);
7105 }
7106 
7107 // Process the loop in the VPlan-native vectorization path. This path builds
7108 // VPlan upfront in the vectorization pipeline, which allows to apply
7109 // VPlan-to-VPlan transformations from the very beginning without modifying the
7110 // input LLVM IR.
7111 static bool processLoopInVPlanNativePath(
7112     Loop *L, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT,
7113     LoopVectorizationLegality *LVL, TargetTransformInfo *TTI,
7114     TargetLibraryInfo *TLI, DemandedBits *DB, AssumptionCache *AC,
7115     OptimizationRemarkEmitter *ORE, LoopVectorizeHints &Hints) {
7116 
7117   assert(EnableVPlanNativePath && "VPlan-native path is disabled.");
7118   Function *F = L->getHeader()->getParent();
7119   InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL->getLAI());
7120   LoopVectorizationCostModel CM(L, PSE, LI, LVL, *TTI, TLI, DB, AC, ORE, F,
7121                                 &Hints, IAI);
7122   // Use the planner for outer loop vectorization.
7123   // TODO: CM is not used at this point inside the planner. Turn CM into an
7124   // optional argument if we don't need it in the future.
7125   LoopVectorizationPlanner LVP(L, LI, TLI, TTI, LVL, CM);
7126 
7127   // Get user vectorization factor.
7128   unsigned UserVF = Hints.getWidth();
7129 
7130   // Check the function attributes to find out if this function should be
7131   // optimized for size.
7132   bool OptForSize =
7133       Hints.getForce() != LoopVectorizeHints::FK_Enabled && F->optForSize();
7134 
7135   // Plan how to best vectorize, return the best VF and its cost.
7136   VectorizationFactor VF = LVP.planInVPlanNativePath(OptForSize, UserVF);
7137 
7138   // If we are stress testing VPlan builds, do not attempt to generate vector
7139   // code. Masked vector code generation support will follow soon.
7140   if (VPlanBuildStressTest || EnableVPlanPredication)
7141     return false;
7142 
7143   LVP.setBestPlan(VF.Width, 1);
7144 
7145   InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, UserVF, 1, LVL,
7146                          &CM);
7147   LLVM_DEBUG(dbgs() << "Vectorizing outer loop in \""
7148                     << L->getHeader()->getParent()->getName() << "\"\n");
7149   LVP.executePlan(LB, DT);
7150 
7151   // Mark the loop as already vectorized to avoid vectorizing again.
7152   Hints.setAlreadyVectorized();
7153 
7154   LLVM_DEBUG(verifyFunction(*L->getHeader()->getParent()));
7155   return true;
7156 }
7157 
7158 bool LoopVectorizePass::processLoop(Loop *L) {
7159   assert((EnableVPlanNativePath || L->empty()) &&
7160          "VPlan-native path is not enabled. Only process inner loops.");
7161 
7162 #ifndef NDEBUG
7163   const std::string DebugLocStr = getDebugLocString(L);
7164 #endif /* NDEBUG */
7165 
7166   LLVM_DEBUG(dbgs() << "\nLV: Checking a loop in \""
7167                     << L->getHeader()->getParent()->getName() << "\" from "
7168                     << DebugLocStr << "\n");
7169 
7170   LoopVectorizeHints Hints(L, InterleaveOnlyWhenForced, *ORE);
7171 
7172   LLVM_DEBUG(
7173       dbgs() << "LV: Loop hints:"
7174              << " force="
7175              << (Hints.getForce() == LoopVectorizeHints::FK_Disabled
7176                      ? "disabled"
7177                      : (Hints.getForce() == LoopVectorizeHints::FK_Enabled
7178                             ? "enabled"
7179                             : "?"))
7180              << " width=" << Hints.getWidth()
7181              << " unroll=" << Hints.getInterleave() << "\n");
7182 
7183   // Function containing loop
7184   Function *F = L->getHeader()->getParent();
7185 
7186   // Looking at the diagnostic output is the only way to determine if a loop
7187   // was vectorized (other than looking at the IR or machine code), so it
7188   // is important to generate an optimization remark for each loop. Most of
7189   // these messages are generated as OptimizationRemarkAnalysis. Remarks
7190   // generated as OptimizationRemark and OptimizationRemarkMissed are
7191   // less verbose reporting vectorized loops and unvectorized loops that may
7192   // benefit from vectorization, respectively.
7193 
7194   if (!Hints.allowVectorization(F, L, VectorizeOnlyWhenForced)) {
7195     LLVM_DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n");
7196     return false;
7197   }
7198 
7199   PredicatedScalarEvolution PSE(*SE, *L);
7200 
7201   // Check if it is legal to vectorize the loop.
7202   LoopVectorizationRequirements Requirements(*ORE);
7203   LoopVectorizationLegality LVL(L, PSE, DT, TLI, AA, F, GetLAA, LI, ORE,
7204                                 &Requirements, &Hints, DB, AC);
7205   if (!LVL.canVectorize(EnableVPlanNativePath)) {
7206     LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n");
7207     Hints.emitRemarkWithHints();
7208     return false;
7209   }
7210 
7211   // Check the function attributes to find out if this function should be
7212   // optimized for size.
7213   bool OptForSize =
7214       Hints.getForce() != LoopVectorizeHints::FK_Enabled && F->optForSize();
7215 
7216   // Entrance to the VPlan-native vectorization path. Outer loops are processed
7217   // here. They may require CFG and instruction level transformations before
7218   // even evaluating whether vectorization is profitable. Since we cannot modify
7219   // the incoming IR, we need to build VPlan upfront in the vectorization
7220   // pipeline.
7221   if (!L->empty())
7222     return processLoopInVPlanNativePath(L, PSE, LI, DT, &LVL, TTI, TLI, DB, AC,
7223                                         ORE, Hints);
7224 
7225   assert(L->empty() && "Inner loop expected.");
7226   // Check the loop for a trip count threshold: vectorize loops with a tiny trip
7227   // count by optimizing for size, to minimize overheads.
7228   // Prefer constant trip counts over profile data, over upper bound estimate.
7229   unsigned ExpectedTC = 0;
7230   bool HasExpectedTC = false;
7231   if (const SCEVConstant *ConstExits =
7232       dyn_cast<SCEVConstant>(SE->getBackedgeTakenCount(L))) {
7233     const APInt &ExitsCount = ConstExits->getAPInt();
7234     // We are interested in small values for ExpectedTC. Skip over those that
7235     // can't fit an unsigned.
7236     if (ExitsCount.ult(std::numeric_limits<unsigned>::max())) {
7237       ExpectedTC = static_cast<unsigned>(ExitsCount.getZExtValue()) + 1;
7238       HasExpectedTC = true;
7239     }
7240   }
7241   // ExpectedTC may be large because it's bound by a variable. Check
7242   // profiling information to validate we should vectorize.
7243   if (!HasExpectedTC && LoopVectorizeWithBlockFrequency) {
7244     auto EstimatedTC = getLoopEstimatedTripCount(L);
7245     if (EstimatedTC) {
7246       ExpectedTC = *EstimatedTC;
7247       HasExpectedTC = true;
7248     }
7249   }
7250   if (!HasExpectedTC) {
7251     ExpectedTC = SE->getSmallConstantMaxTripCount(L);
7252     HasExpectedTC = (ExpectedTC > 0);
7253   }
7254 
7255   if (HasExpectedTC && ExpectedTC < TinyTripCountVectorThreshold) {
7256     LLVM_DEBUG(dbgs() << "LV: Found a loop with a very small trip count. "
7257                       << "This loop is worth vectorizing only if no scalar "
7258                       << "iteration overheads are incurred.");
7259     if (Hints.getForce() == LoopVectorizeHints::FK_Enabled)
7260       LLVM_DEBUG(dbgs() << " But vectorizing was explicitly forced.\n");
7261     else {
7262       LLVM_DEBUG(dbgs() << "\n");
7263       // Loops with a very small trip count are considered for vectorization
7264       // under OptForSize, thereby making sure the cost of their loop body is
7265       // dominant, free of runtime guards and scalar iteration overheads.
7266       OptForSize = true;
7267     }
7268   }
7269 
7270   // Check the function attributes to see if implicit floats are allowed.
7271   // FIXME: This check doesn't seem possibly correct -- what if the loop is
7272   // an integer loop and the vector instructions selected are purely integer
7273   // vector instructions?
7274   if (F->hasFnAttribute(Attribute::NoImplicitFloat)) {
7275     LLVM_DEBUG(dbgs() << "LV: Can't vectorize when the NoImplicitFloat"
7276                          "attribute is used.\n");
7277     ORE->emit(createLVMissedAnalysis(Hints.vectorizeAnalysisPassName(),
7278                                      "NoImplicitFloat", L)
7279               << "loop not vectorized due to NoImplicitFloat attribute");
7280     Hints.emitRemarkWithHints();
7281     return false;
7282   }
7283 
7284   // Check if the target supports potentially unsafe FP vectorization.
7285   // FIXME: Add a check for the type of safety issue (denormal, signaling)
7286   // for the target we're vectorizing for, to make sure none of the
7287   // additional fp-math flags can help.
7288   if (Hints.isPotentiallyUnsafe() &&
7289       TTI->isFPVectorizationPotentiallyUnsafe()) {
7290     LLVM_DEBUG(
7291         dbgs() << "LV: Potentially unsafe FP op prevents vectorization.\n");
7292     ORE->emit(
7293         createLVMissedAnalysis(Hints.vectorizeAnalysisPassName(), "UnsafeFP", L)
7294         << "loop not vectorized due to unsafe FP support.");
7295     Hints.emitRemarkWithHints();
7296     return false;
7297   }
7298 
7299   bool UseInterleaved = TTI->enableInterleavedAccessVectorization();
7300   InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL.getLAI());
7301 
7302   // If an override option has been passed in for interleaved accesses, use it.
7303   if (EnableInterleavedMemAccesses.getNumOccurrences() > 0)
7304     UseInterleaved = EnableInterleavedMemAccesses;
7305 
7306   // Analyze interleaved memory accesses.
7307   if (UseInterleaved) {
7308     IAI.analyzeInterleaving(useMaskedInterleavedAccesses(*TTI));
7309   }
7310 
7311   // Use the cost model.
7312   LoopVectorizationCostModel CM(L, PSE, LI, &LVL, *TTI, TLI, DB, AC, ORE, F,
7313                                 &Hints, IAI);
7314   CM.collectValuesToIgnore();
7315 
7316   // Use the planner for vectorization.
7317   LoopVectorizationPlanner LVP(L, LI, TLI, TTI, &LVL, CM);
7318 
7319   // Get user vectorization factor.
7320   unsigned UserVF = Hints.getWidth();
7321 
7322   // Plan how to best vectorize, return the best VF and its cost.
7323   VectorizationFactor VF = LVP.plan(OptForSize, UserVF);
7324 
7325   // Select the interleave count.
7326   unsigned IC = CM.selectInterleaveCount(OptForSize, VF.Width, VF.Cost);
7327 
7328   // Get user interleave count.
7329   unsigned UserIC = Hints.getInterleave();
7330 
7331   // Identify the diagnostic messages that should be produced.
7332   std::pair<StringRef, std::string> VecDiagMsg, IntDiagMsg;
7333   bool VectorizeLoop = true, InterleaveLoop = true;
7334   if (Requirements.doesNotMeet(F, L, Hints)) {
7335     LLVM_DEBUG(dbgs() << "LV: Not vectorizing: loop did not meet vectorization "
7336                          "requirements.\n");
7337     Hints.emitRemarkWithHints();
7338     return false;
7339   }
7340 
7341   if (VF.Width == 1) {
7342     LLVM_DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n");
7343     VecDiagMsg = std::make_pair(
7344         "VectorizationNotBeneficial",
7345         "the cost-model indicates that vectorization is not beneficial");
7346     VectorizeLoop = false;
7347   }
7348 
7349   if (IC == 1 && UserIC <= 1) {
7350     // Tell the user interleaving is not beneficial.
7351     LLVM_DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n");
7352     IntDiagMsg = std::make_pair(
7353         "InterleavingNotBeneficial",
7354         "the cost-model indicates that interleaving is not beneficial");
7355     InterleaveLoop = false;
7356     if (UserIC == 1) {
7357       IntDiagMsg.first = "InterleavingNotBeneficialAndDisabled";
7358       IntDiagMsg.second +=
7359           " and is explicitly disabled or interleave count is set to 1";
7360     }
7361   } else if (IC > 1 && UserIC == 1) {
7362     // Tell the user interleaving is beneficial, but it explicitly disabled.
7363     LLVM_DEBUG(
7364         dbgs() << "LV: Interleaving is beneficial but is explicitly disabled.");
7365     IntDiagMsg = std::make_pair(
7366         "InterleavingBeneficialButDisabled",
7367         "the cost-model indicates that interleaving is beneficial "
7368         "but is explicitly disabled or interleave count is set to 1");
7369     InterleaveLoop = false;
7370   }
7371 
7372   // Override IC if user provided an interleave count.
7373   IC = UserIC > 0 ? UserIC : IC;
7374 
7375   // Emit diagnostic messages, if any.
7376   const char *VAPassName = Hints.vectorizeAnalysisPassName();
7377   if (!VectorizeLoop && !InterleaveLoop) {
7378     // Do not vectorize or interleaving the loop.
7379     ORE->emit([&]() {
7380       return OptimizationRemarkMissed(VAPassName, VecDiagMsg.first,
7381                                       L->getStartLoc(), L->getHeader())
7382              << VecDiagMsg.second;
7383     });
7384     ORE->emit([&]() {
7385       return OptimizationRemarkMissed(LV_NAME, IntDiagMsg.first,
7386                                       L->getStartLoc(), L->getHeader())
7387              << IntDiagMsg.second;
7388     });
7389     return false;
7390   } else if (!VectorizeLoop && InterleaveLoop) {
7391     LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n');
7392     ORE->emit([&]() {
7393       return OptimizationRemarkAnalysis(VAPassName, VecDiagMsg.first,
7394                                         L->getStartLoc(), L->getHeader())
7395              << VecDiagMsg.second;
7396     });
7397   } else if (VectorizeLoop && !InterleaveLoop) {
7398     LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width
7399                       << ") in " << DebugLocStr << '\n');
7400     ORE->emit([&]() {
7401       return OptimizationRemarkAnalysis(LV_NAME, IntDiagMsg.first,
7402                                         L->getStartLoc(), L->getHeader())
7403              << IntDiagMsg.second;
7404     });
7405   } else if (VectorizeLoop && InterleaveLoop) {
7406     LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width
7407                       << ") in " << DebugLocStr << '\n');
7408     LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n');
7409   }
7410 
7411   LVP.setBestPlan(VF.Width, IC);
7412 
7413   using namespace ore;
7414   bool DisableRuntimeUnroll = false;
7415   MDNode *OrigLoopID = L->getLoopID();
7416 
7417   if (!VectorizeLoop) {
7418     assert(IC > 1 && "interleave count should not be 1 or 0");
7419     // If we decided that it is not legal to vectorize the loop, then
7420     // interleave it.
7421     InnerLoopUnroller Unroller(L, PSE, LI, DT, TLI, TTI, AC, ORE, IC, &LVL,
7422                                &CM);
7423     LVP.executePlan(Unroller, DT);
7424 
7425     ORE->emit([&]() {
7426       return OptimizationRemark(LV_NAME, "Interleaved", L->getStartLoc(),
7427                                 L->getHeader())
7428              << "interleaved loop (interleaved count: "
7429              << NV("InterleaveCount", IC) << ")";
7430     });
7431   } else {
7432     // If we decided that it is *legal* to vectorize the loop, then do it.
7433     InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, IC,
7434                            &LVL, &CM);
7435     LVP.executePlan(LB, DT);
7436     ++LoopsVectorized;
7437 
7438     // Add metadata to disable runtime unrolling a scalar loop when there are
7439     // no runtime checks about strides and memory. A scalar loop that is
7440     // rarely used is not worth unrolling.
7441     if (!LB.areSafetyChecksAdded())
7442       DisableRuntimeUnroll = true;
7443 
7444     // Report the vectorization decision.
7445     ORE->emit([&]() {
7446       return OptimizationRemark(LV_NAME, "Vectorized", L->getStartLoc(),
7447                                 L->getHeader())
7448              << "vectorized loop (vectorization width: "
7449              << NV("VectorizationFactor", VF.Width)
7450              << ", interleaved count: " << NV("InterleaveCount", IC) << ")";
7451     });
7452   }
7453 
7454   Optional<MDNode *> RemainderLoopID =
7455       makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll,
7456                                       LLVMLoopVectorizeFollowupEpilogue});
7457   if (RemainderLoopID.hasValue()) {
7458     L->setLoopID(RemainderLoopID.getValue());
7459   } else {
7460     if (DisableRuntimeUnroll)
7461       AddRuntimeUnrollDisableMetaData(L);
7462 
7463     // Mark the loop as already vectorized to avoid vectorizing again.
7464     Hints.setAlreadyVectorized();
7465   }
7466 
7467   LLVM_DEBUG(verifyFunction(*L->getHeader()->getParent()));
7468   return true;
7469 }
7470 
7471 bool LoopVectorizePass::runImpl(
7472     Function &F, ScalarEvolution &SE_, LoopInfo &LI_, TargetTransformInfo &TTI_,
7473     DominatorTree &DT_, BlockFrequencyInfo &BFI_, TargetLibraryInfo *TLI_,
7474     DemandedBits &DB_, AliasAnalysis &AA_, AssumptionCache &AC_,
7475     std::function<const LoopAccessInfo &(Loop &)> &GetLAA_,
7476     OptimizationRemarkEmitter &ORE_) {
7477   SE = &SE_;
7478   LI = &LI_;
7479   TTI = &TTI_;
7480   DT = &DT_;
7481   BFI = &BFI_;
7482   TLI = TLI_;
7483   AA = &AA_;
7484   AC = &AC_;
7485   GetLAA = &GetLAA_;
7486   DB = &DB_;
7487   ORE = &ORE_;
7488 
7489   // Don't attempt if
7490   // 1. the target claims to have no vector registers, and
7491   // 2. interleaving won't help ILP.
7492   //
7493   // The second condition is necessary because, even if the target has no
7494   // vector registers, loop vectorization may still enable scalar
7495   // interleaving.
7496   if (!TTI->getNumberOfRegisters(true) && TTI->getMaxInterleaveFactor(1) < 2)
7497     return false;
7498 
7499   bool Changed = false;
7500 
7501   // The vectorizer requires loops to be in simplified form.
7502   // Since simplification may add new inner loops, it has to run before the
7503   // legality and profitability checks. This means running the loop vectorizer
7504   // will simplify all loops, regardless of whether anything end up being
7505   // vectorized.
7506   for (auto &L : *LI)
7507     Changed |= simplifyLoop(L, DT, LI, SE, AC, false /* PreserveLCSSA */);
7508 
7509   // Build up a worklist of inner-loops to vectorize. This is necessary as
7510   // the act of vectorizing or partially unrolling a loop creates new loops
7511   // and can invalidate iterators across the loops.
7512   SmallVector<Loop *, 8> Worklist;
7513 
7514   for (Loop *L : *LI)
7515     collectSupportedLoops(*L, LI, ORE, Worklist);
7516 
7517   LoopsAnalyzed += Worklist.size();
7518 
7519   // Now walk the identified inner loops.
7520   while (!Worklist.empty()) {
7521     Loop *L = Worklist.pop_back_val();
7522 
7523     // For the inner loops we actually process, form LCSSA to simplify the
7524     // transform.
7525     Changed |= formLCSSARecursively(*L, *DT, LI, SE);
7526 
7527     Changed |= processLoop(L);
7528   }
7529 
7530   // Process each loop nest in the function.
7531   return Changed;
7532 }
7533 
7534 PreservedAnalyses LoopVectorizePass::run(Function &F,
7535                                          FunctionAnalysisManager &AM) {
7536     auto &SE = AM.getResult<ScalarEvolutionAnalysis>(F);
7537     auto &LI = AM.getResult<LoopAnalysis>(F);
7538     auto &TTI = AM.getResult<TargetIRAnalysis>(F);
7539     auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
7540     auto &BFI = AM.getResult<BlockFrequencyAnalysis>(F);
7541     auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
7542     auto &AA = AM.getResult<AAManager>(F);
7543     auto &AC = AM.getResult<AssumptionAnalysis>(F);
7544     auto &DB = AM.getResult<DemandedBitsAnalysis>(F);
7545     auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F);
7546 
7547     auto &LAM = AM.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager();
7548     std::function<const LoopAccessInfo &(Loop &)> GetLAA =
7549         [&](Loop &L) -> const LoopAccessInfo & {
7550       LoopStandardAnalysisResults AR = {AA, AC, DT, LI, SE, TLI, TTI, nullptr};
7551       return LAM.getResult<LoopAccessAnalysis>(L, AR);
7552     };
7553     bool Changed =
7554         runImpl(F, SE, LI, TTI, DT, BFI, &TLI, DB, AA, AC, GetLAA, ORE);
7555     if (!Changed)
7556       return PreservedAnalyses::all();
7557     PreservedAnalyses PA;
7558 
7559     // We currently do not preserve loopinfo/dominator analyses with outer loop
7560     // vectorization. Until this is addressed, mark these analyses as preserved
7561     // only for non-VPlan-native path.
7562     // TODO: Preserve Loop and Dominator analyses for VPlan-native path.
7563     if (!EnableVPlanNativePath) {
7564       PA.preserve<LoopAnalysis>();
7565       PA.preserve<DominatorTreeAnalysis>();
7566     }
7567     PA.preserve<BasicAA>();
7568     PA.preserve<GlobalsAA>();
7569     return PA;
7570 }
7571