1 //===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops
10 // and generates target-independent LLVM-IR.
11 // The vectorizer uses the TargetTransformInfo analysis to estimate the costs
12 // of instructions in order to estimate the profitability of vectorization.
13 //
14 // The loop vectorizer combines consecutive loop iterations into a single
15 // 'wide' iteration. After this transformation the index is incremented
16 // by the SIMD vector width, and not by one.
17 //
18 // This pass has three parts:
19 // 1. The main loop pass that drives the different parts.
20 // 2. LoopVectorizationLegality - A unit that checks for the legality
21 //    of the vectorization.
22 // 3. InnerLoopVectorizer - A unit that performs the actual
23 //    widening of instructions.
24 // 4. LoopVectorizationCostModel - A unit that checks for the profitability
25 //    of vectorization. It decides on the optimal vector width, which
26 //    can be one, if vectorization is not profitable.
27 //
28 // There is a development effort going on to migrate loop vectorizer to the
29 // VPlan infrastructure and to introduce outer loop vectorization support (see
30 // docs/Proposal/VectorizationPlan.rst and
31 // http://lists.llvm.org/pipermail/llvm-dev/2017-December/119523.html). For this
32 // purpose, we temporarily introduced the VPlan-native vectorization path: an
33 // alternative vectorization path that is natively implemented on top of the
34 // VPlan infrastructure. See EnableVPlanNativePath for enabling.
35 //
36 //===----------------------------------------------------------------------===//
37 //
38 // The reduction-variable vectorization is based on the paper:
39 //  D. Nuzman and R. Henderson. Multi-platform Auto-vectorization.
40 //
41 // Variable uniformity checks are inspired by:
42 //  Karrenberg, R. and Hack, S. Whole Function Vectorization.
43 //
44 // The interleaved access vectorization is based on the paper:
45 //  Dorit Nuzman, Ira Rosen and Ayal Zaks.  Auto-Vectorization of Interleaved
46 //  Data for SIMD
47 //
48 // Other ideas/concepts are from:
49 //  A. Zaks and D. Nuzman. Autovectorization in GCC-two years later.
50 //
51 //  S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua.  An Evaluation of
52 //  Vectorizing Compilers.
53 //
54 //===----------------------------------------------------------------------===//
55 
56 #include "llvm/Transforms/Vectorize/LoopVectorize.h"
57 #include "LoopVectorizationPlanner.h"
58 #include "VPRecipeBuilder.h"
59 #include "VPlan.h"
60 #include "VPlanHCFGBuilder.h"
61 #include "VPlanPredicator.h"
62 #include "VPlanTransforms.h"
63 #include "llvm/ADT/APInt.h"
64 #include "llvm/ADT/ArrayRef.h"
65 #include "llvm/ADT/DenseMap.h"
66 #include "llvm/ADT/DenseMapInfo.h"
67 #include "llvm/ADT/Hashing.h"
68 #include "llvm/ADT/MapVector.h"
69 #include "llvm/ADT/None.h"
70 #include "llvm/ADT/Optional.h"
71 #include "llvm/ADT/STLExtras.h"
72 #include "llvm/ADT/SetVector.h"
73 #include "llvm/ADT/SmallPtrSet.h"
74 #include "llvm/ADT/SmallVector.h"
75 #include "llvm/ADT/Statistic.h"
76 #include "llvm/ADT/StringRef.h"
77 #include "llvm/ADT/Twine.h"
78 #include "llvm/ADT/iterator_range.h"
79 #include "llvm/Analysis/AssumptionCache.h"
80 #include "llvm/Analysis/BasicAliasAnalysis.h"
81 #include "llvm/Analysis/BlockFrequencyInfo.h"
82 #include "llvm/Analysis/CFG.h"
83 #include "llvm/Analysis/CodeMetrics.h"
84 #include "llvm/Analysis/DemandedBits.h"
85 #include "llvm/Analysis/GlobalsModRef.h"
86 #include "llvm/Analysis/LoopAccessAnalysis.h"
87 #include "llvm/Analysis/LoopAnalysisManager.h"
88 #include "llvm/Analysis/LoopInfo.h"
89 #include "llvm/Analysis/LoopIterator.h"
90 #include "llvm/Analysis/MemorySSA.h"
91 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
92 #include "llvm/Analysis/ProfileSummaryInfo.h"
93 #include "llvm/Analysis/ScalarEvolution.h"
94 #include "llvm/Analysis/ScalarEvolutionExpander.h"
95 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
96 #include "llvm/Analysis/TargetLibraryInfo.h"
97 #include "llvm/Analysis/TargetTransformInfo.h"
98 #include "llvm/Analysis/VectorUtils.h"
99 #include "llvm/IR/Attributes.h"
100 #include "llvm/IR/BasicBlock.h"
101 #include "llvm/IR/CFG.h"
102 #include "llvm/IR/Constant.h"
103 #include "llvm/IR/Constants.h"
104 #include "llvm/IR/DataLayout.h"
105 #include "llvm/IR/DebugInfoMetadata.h"
106 #include "llvm/IR/DebugLoc.h"
107 #include "llvm/IR/DerivedTypes.h"
108 #include "llvm/IR/DiagnosticInfo.h"
109 #include "llvm/IR/Dominators.h"
110 #include "llvm/IR/Function.h"
111 #include "llvm/IR/IRBuilder.h"
112 #include "llvm/IR/InstrTypes.h"
113 #include "llvm/IR/Instruction.h"
114 #include "llvm/IR/Instructions.h"
115 #include "llvm/IR/IntrinsicInst.h"
116 #include "llvm/IR/Intrinsics.h"
117 #include "llvm/IR/LLVMContext.h"
118 #include "llvm/IR/Metadata.h"
119 #include "llvm/IR/Module.h"
120 #include "llvm/IR/Operator.h"
121 #include "llvm/IR/Type.h"
122 #include "llvm/IR/Use.h"
123 #include "llvm/IR/User.h"
124 #include "llvm/IR/Value.h"
125 #include "llvm/IR/ValueHandle.h"
126 #include "llvm/IR/Verifier.h"
127 #include "llvm/InitializePasses.h"
128 #include "llvm/Pass.h"
129 #include "llvm/Support/Casting.h"
130 #include "llvm/Support/CommandLine.h"
131 #include "llvm/Support/Compiler.h"
132 #include "llvm/Support/Debug.h"
133 #include "llvm/Support/ErrorHandling.h"
134 #include "llvm/Support/MathExtras.h"
135 #include "llvm/Support/raw_ostream.h"
136 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
137 #include "llvm/Transforms/Utils/InjectTLIMappings.h"
138 #include "llvm/Transforms/Utils/LoopSimplify.h"
139 #include "llvm/Transforms/Utils/LoopUtils.h"
140 #include "llvm/Transforms/Utils/LoopVersioning.h"
141 #include "llvm/Transforms/Utils/SizeOpts.h"
142 #include "llvm/Transforms/Vectorize/LoopVectorizationLegality.h"
143 #include <algorithm>
144 #include <cassert>
145 #include <cstdint>
146 #include <cstdlib>
147 #include <functional>
148 #include <iterator>
149 #include <limits>
150 #include <memory>
151 #include <string>
152 #include <tuple>
153 #include <utility>
154 
155 using namespace llvm;
156 
157 #define LV_NAME "loop-vectorize"
158 #define DEBUG_TYPE LV_NAME
159 
160 /// @{
161 /// Metadata attribute names
162 static const char *const LLVMLoopVectorizeFollowupAll =
163     "llvm.loop.vectorize.followup_all";
164 static const char *const LLVMLoopVectorizeFollowupVectorized =
165     "llvm.loop.vectorize.followup_vectorized";
166 static const char *const LLVMLoopVectorizeFollowupEpilogue =
167     "llvm.loop.vectorize.followup_epilogue";
168 /// @}
169 
170 STATISTIC(LoopsVectorized, "Number of loops vectorized");
171 STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization");
172 
173 /// Loops with a known constant trip count below this number are vectorized only
174 /// if no scalar iteration overheads are incurred.
175 static cl::opt<unsigned> TinyTripCountVectorThreshold(
176     "vectorizer-min-trip-count", cl::init(16), cl::Hidden,
177     cl::desc("Loops with a constant trip count that is smaller than this "
178              "value are vectorized only if no scalar iteration overheads "
179              "are incurred."));
180 
181 // Indicates that an epilogue is undesired, predication is preferred.
182 // This means that the vectorizer will try to fold the loop-tail (epilogue)
183 // into the loop and predicate the loop body accordingly.
184 static cl::opt<bool> PreferPredicateOverEpilog(
185     "prefer-predicate-over-epilog", cl::init(false), cl::Hidden,
186     cl::desc("Indicate that an epilogue is undesired, predication should be "
187              "used instead."));
188 
189 static cl::opt<bool> MaximizeBandwidth(
190     "vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden,
191     cl::desc("Maximize bandwidth when selecting vectorization factor which "
192              "will be determined by the smallest type in loop."));
193 
194 static cl::opt<bool> EnableInterleavedMemAccesses(
195     "enable-interleaved-mem-accesses", cl::init(false), cl::Hidden,
196     cl::desc("Enable vectorization on interleaved memory accesses in a loop"));
197 
198 /// An interleave-group may need masking if it resides in a block that needs
199 /// predication, or in order to mask away gaps.
200 static cl::opt<bool> EnableMaskedInterleavedMemAccesses(
201     "enable-masked-interleaved-mem-accesses", cl::init(false), cl::Hidden,
202     cl::desc("Enable vectorization on masked interleaved memory accesses in a loop"));
203 
204 static cl::opt<unsigned> TinyTripCountInterleaveThreshold(
205     "tiny-trip-count-interleave-threshold", cl::init(128), cl::Hidden,
206     cl::desc("We don't interleave loops with a estimated constant trip count "
207              "below this number"));
208 
209 static cl::opt<unsigned> ForceTargetNumScalarRegs(
210     "force-target-num-scalar-regs", cl::init(0), cl::Hidden,
211     cl::desc("A flag that overrides the target's number of scalar registers."));
212 
213 static cl::opt<unsigned> ForceTargetNumVectorRegs(
214     "force-target-num-vector-regs", cl::init(0), cl::Hidden,
215     cl::desc("A flag that overrides the target's number of vector registers."));
216 
217 static cl::opt<unsigned> ForceTargetMaxScalarInterleaveFactor(
218     "force-target-max-scalar-interleave", cl::init(0), cl::Hidden,
219     cl::desc("A flag that overrides the target's max interleave factor for "
220              "scalar loops."));
221 
222 static cl::opt<unsigned> ForceTargetMaxVectorInterleaveFactor(
223     "force-target-max-vector-interleave", cl::init(0), cl::Hidden,
224     cl::desc("A flag that overrides the target's max interleave factor for "
225              "vectorized loops."));
226 
227 static cl::opt<unsigned> ForceTargetInstructionCost(
228     "force-target-instruction-cost", cl::init(0), cl::Hidden,
229     cl::desc("A flag that overrides the target's expected cost for "
230              "an instruction to a single constant value. Mostly "
231              "useful for getting consistent testing."));
232 
233 static cl::opt<unsigned> SmallLoopCost(
234     "small-loop-cost", cl::init(20), cl::Hidden,
235     cl::desc(
236         "The cost of a loop that is considered 'small' by the interleaver."));
237 
238 static cl::opt<bool> LoopVectorizeWithBlockFrequency(
239     "loop-vectorize-with-block-frequency", cl::init(true), cl::Hidden,
240     cl::desc("Enable the use of the block frequency analysis to access PGO "
241              "heuristics minimizing code growth in cold regions and being more "
242              "aggressive in hot regions."));
243 
244 // Runtime interleave loops for load/store throughput.
245 static cl::opt<bool> EnableLoadStoreRuntimeInterleave(
246     "enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden,
247     cl::desc(
248         "Enable runtime interleaving until load/store ports are saturated"));
249 
250 /// The number of stores in a loop that are allowed to need predication.
251 static cl::opt<unsigned> NumberOfStoresToPredicate(
252     "vectorize-num-stores-pred", cl::init(1), cl::Hidden,
253     cl::desc("Max number of stores to be predicated behind an if."));
254 
255 static cl::opt<bool> EnableIndVarRegisterHeur(
256     "enable-ind-var-reg-heur", cl::init(true), cl::Hidden,
257     cl::desc("Count the induction variable only once when interleaving"));
258 
259 static cl::opt<bool> EnableCondStoresVectorization(
260     "enable-cond-stores-vec", cl::init(true), cl::Hidden,
261     cl::desc("Enable if predication of stores during vectorization."));
262 
263 static cl::opt<unsigned> MaxNestedScalarReductionIC(
264     "max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden,
265     cl::desc("The maximum interleave count to use when interleaving a scalar "
266              "reduction in a nested loop."));
267 
268 cl::opt<bool> EnableVPlanNativePath(
269     "enable-vplan-native-path", cl::init(false), cl::Hidden,
270     cl::desc("Enable VPlan-native vectorization path with "
271              "support for outer loop vectorization."));
272 
273 // FIXME: Remove this switch once we have divergence analysis. Currently we
274 // assume divergent non-backedge branches when this switch is true.
275 cl::opt<bool> EnableVPlanPredication(
276     "enable-vplan-predication", cl::init(false), cl::Hidden,
277     cl::desc("Enable VPlan-native vectorization path predicator with "
278              "support for outer loop vectorization."));
279 
280 // This flag enables the stress testing of the VPlan H-CFG construction in the
281 // VPlan-native vectorization path. It must be used in conjuction with
282 // -enable-vplan-native-path. -vplan-verify-hcfg can also be used to enable the
283 // verification of the H-CFGs built.
284 static cl::opt<bool> VPlanBuildStressTest(
285     "vplan-build-stress-test", cl::init(false), cl::Hidden,
286     cl::desc(
287         "Build VPlan for every supported loop nest in the function and bail "
288         "out right after the build (stress test the VPlan H-CFG construction "
289         "in the VPlan-native vectorization path)."));
290 
291 cl::opt<bool> llvm::EnableLoopInterleaving(
292     "interleave-loops", cl::init(true), cl::Hidden,
293     cl::desc("Enable loop interleaving in Loop vectorization passes"));
294 cl::opt<bool> llvm::EnableLoopVectorization(
295     "vectorize-loops", cl::init(true), cl::Hidden,
296     cl::desc("Run the Loop vectorization passes"));
297 
298 /// A helper function that returns the type of loaded or stored value.
299 static Type *getMemInstValueType(Value *I) {
300   assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
301          "Expected Load or Store instruction");
302   if (auto *LI = dyn_cast<LoadInst>(I))
303     return LI->getType();
304   return cast<StoreInst>(I)->getValueOperand()->getType();
305 }
306 
307 /// A helper function that returns true if the given type is irregular. The
308 /// type is irregular if its allocated size doesn't equal the store size of an
309 /// element of the corresponding vector type at the given vectorization factor.
310 static bool hasIrregularType(Type *Ty, const DataLayout &DL, unsigned VF) {
311   // Determine if an array of VF elements of type Ty is "bitcast compatible"
312   // with a <VF x Ty> vector.
313   if (VF > 1) {
314     auto *VectorTy = VectorType::get(Ty, VF);
315     return VF * DL.getTypeAllocSize(Ty) != DL.getTypeStoreSize(VectorTy);
316   }
317 
318   // If the vectorization factor is one, we just check if an array of type Ty
319   // requires padding between elements.
320   return DL.getTypeAllocSizeInBits(Ty) != DL.getTypeSizeInBits(Ty);
321 }
322 
323 /// A helper function that returns the reciprocal of the block probability of
324 /// predicated blocks. If we return X, we are assuming the predicated block
325 /// will execute once for every X iterations of the loop header.
326 ///
327 /// TODO: We should use actual block probability here, if available. Currently,
328 ///       we always assume predicated blocks have a 50% chance of executing.
329 static unsigned getReciprocalPredBlockProb() { return 2; }
330 
331 /// A helper function that adds a 'fast' flag to floating-point operations.
332 static Value *addFastMathFlag(Value *V) {
333   if (isa<FPMathOperator>(V))
334     cast<Instruction>(V)->setFastMathFlags(FastMathFlags::getFast());
335   return V;
336 }
337 
338 static Value *addFastMathFlag(Value *V, FastMathFlags FMF) {
339   if (isa<FPMathOperator>(V))
340     cast<Instruction>(V)->setFastMathFlags(FMF);
341   return V;
342 }
343 
344 /// A helper function that returns an integer or floating-point constant with
345 /// value C.
346 static Constant *getSignedIntOrFpConstant(Type *Ty, int64_t C) {
347   return Ty->isIntegerTy() ? ConstantInt::getSigned(Ty, C)
348                            : ConstantFP::get(Ty, C);
349 }
350 
351 /// Returns "best known" trip count for the specified loop \p L as defined by
352 /// the following procedure:
353 ///   1) Returns exact trip count if it is known.
354 ///   2) Returns expected trip count according to profile data if any.
355 ///   3) Returns upper bound estimate if it is known.
356 ///   4) Returns None if all of the above failed.
357 static Optional<unsigned> getSmallBestKnownTC(ScalarEvolution &SE, Loop *L) {
358   // Check if exact trip count is known.
359   if (unsigned ExpectedTC = SE.getSmallConstantTripCount(L))
360     return ExpectedTC;
361 
362   // Check if there is an expected trip count available from profile data.
363   if (LoopVectorizeWithBlockFrequency)
364     if (auto EstimatedTC = getLoopEstimatedTripCount(L))
365       return EstimatedTC;
366 
367   // Check if upper bound estimate is known.
368   if (unsigned ExpectedTC = SE.getSmallConstantMaxTripCount(L))
369     return ExpectedTC;
370 
371   return None;
372 }
373 
374 namespace llvm {
375 
376 /// InnerLoopVectorizer vectorizes loops which contain only one basic
377 /// block to a specified vectorization factor (VF).
378 /// This class performs the widening of scalars into vectors, or multiple
379 /// scalars. This class also implements the following features:
380 /// * It inserts an epilogue loop for handling loops that don't have iteration
381 ///   counts that are known to be a multiple of the vectorization factor.
382 /// * It handles the code generation for reduction variables.
383 /// * Scalarization (implementation using scalars) of un-vectorizable
384 ///   instructions.
385 /// InnerLoopVectorizer does not perform any vectorization-legality
386 /// checks, and relies on the caller to check for the different legality
387 /// aspects. The InnerLoopVectorizer relies on the
388 /// LoopVectorizationLegality class to provide information about the induction
389 /// and reduction variables that were found to a given vectorization factor.
390 class InnerLoopVectorizer {
391 public:
392   InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE,
393                       LoopInfo *LI, DominatorTree *DT,
394                       const TargetLibraryInfo *TLI,
395                       const TargetTransformInfo *TTI, AssumptionCache *AC,
396                       OptimizationRemarkEmitter *ORE, unsigned VecWidth,
397                       unsigned UnrollFactor, LoopVectorizationLegality *LVL,
398                       LoopVectorizationCostModel *CM)
399       : OrigLoop(OrigLoop), PSE(PSE), LI(LI), DT(DT), TLI(TLI), TTI(TTI),
400         AC(AC), ORE(ORE), VF(VecWidth), UF(UnrollFactor),
401         Builder(PSE.getSE()->getContext()),
402         VectorLoopValueMap(UnrollFactor, VecWidth), Legal(LVL), Cost(CM) {}
403   virtual ~InnerLoopVectorizer() = default;
404 
405   /// Create a new empty loop. Unlink the old loop and connect the new one.
406   /// Return the pre-header block of the new loop.
407   BasicBlock *createVectorizedLoopSkeleton();
408 
409   /// Widen a single instruction within the innermost loop.
410   void widenInstruction(Instruction &I, VPUser &Operands,
411                         VPTransformState &State);
412 
413   /// Widen a single call instruction within the innermost loop.
414   void widenCallInstruction(CallInst &I, VPUser &ArgOperands,
415                             VPTransformState &State);
416 
417   /// Widen a single select instruction within the innermost loop.
418   void widenSelectInstruction(SelectInst &I, bool InvariantCond);
419 
420   /// Fix the vectorized code, taking care of header phi's, live-outs, and more.
421   void fixVectorizedLoop();
422 
423   // Return true if any runtime check is added.
424   bool areSafetyChecksAdded() { return AddedSafetyChecks; }
425 
426   /// A type for vectorized values in the new loop. Each value from the
427   /// original loop, when vectorized, is represented by UF vector values in the
428   /// new unrolled loop, where UF is the unroll factor.
429   using VectorParts = SmallVector<Value *, 2>;
430 
431   /// Vectorize a single GetElementPtrInst based on information gathered and
432   /// decisions taken during planning.
433   void widenGEP(GetElementPtrInst *GEP, unsigned UF, unsigned VF,
434                 bool IsPtrLoopInvariant, SmallBitVector &IsIndexLoopInvariant);
435 
436   /// Vectorize a single PHINode in a block. This method handles the induction
437   /// variable canonicalization. It supports both VF = 1 for unrolled loops and
438   /// arbitrary length vectors.
439   void widenPHIInstruction(Instruction *PN, unsigned UF, unsigned VF);
440 
441   /// A helper function to scalarize a single Instruction in the innermost loop.
442   /// Generates a sequence of scalar instances for each lane between \p MinLane
443   /// and \p MaxLane, times each part between \p MinPart and \p MaxPart,
444   /// inclusive..
445   void scalarizeInstruction(Instruction *Instr, const VPIteration &Instance,
446                             bool IfPredicateInstr);
447 
448   /// Widen an integer or floating-point induction variable \p IV. If \p Trunc
449   /// is provided, the integer induction variable will first be truncated to
450   /// the corresponding type.
451   void widenIntOrFpInduction(PHINode *IV, TruncInst *Trunc = nullptr);
452 
453   /// getOrCreateVectorValue and getOrCreateScalarValue coordinate to generate a
454   /// vector or scalar value on-demand if one is not yet available. When
455   /// vectorizing a loop, we visit the definition of an instruction before its
456   /// uses. When visiting the definition, we either vectorize or scalarize the
457   /// instruction, creating an entry for it in the corresponding map. (In some
458   /// cases, such as induction variables, we will create both vector and scalar
459   /// entries.) Then, as we encounter uses of the definition, we derive values
460   /// for each scalar or vector use unless such a value is already available.
461   /// For example, if we scalarize a definition and one of its uses is vector,
462   /// we build the required vector on-demand with an insertelement sequence
463   /// when visiting the use. Otherwise, if the use is scalar, we can use the
464   /// existing scalar definition.
465   ///
466   /// Return a value in the new loop corresponding to \p V from the original
467   /// loop at unroll index \p Part. If the value has already been vectorized,
468   /// the corresponding vector entry in VectorLoopValueMap is returned. If,
469   /// however, the value has a scalar entry in VectorLoopValueMap, we construct
470   /// a new vector value on-demand by inserting the scalar values into a vector
471   /// with an insertelement sequence. If the value has been neither vectorized
472   /// nor scalarized, it must be loop invariant, so we simply broadcast the
473   /// value into a vector.
474   Value *getOrCreateVectorValue(Value *V, unsigned Part);
475 
476   /// Return a value in the new loop corresponding to \p V from the original
477   /// loop at unroll and vector indices \p Instance. If the value has been
478   /// vectorized but not scalarized, the necessary extractelement instruction
479   /// will be generated.
480   Value *getOrCreateScalarValue(Value *V, const VPIteration &Instance);
481 
482   /// Construct the vector value of a scalarized value \p V one lane at a time.
483   void packScalarIntoVectorValue(Value *V, const VPIteration &Instance);
484 
485   /// Try to vectorize interleaved access group \p Group with the base address
486   /// given in \p Addr, optionally masking the vector operations if \p
487   /// BlockInMask is non-null. Use \p State to translate given VPValues to IR
488   /// values in the vectorized loop.
489   void vectorizeInterleaveGroup(const InterleaveGroup<Instruction> *Group,
490                                 VPTransformState &State, VPValue *Addr,
491                                 VPValue *BlockInMask = nullptr);
492 
493   /// Vectorize Load and Store instructions with the base address given in \p
494   /// Addr, optionally masking the vector operations if \p BlockInMask is
495   /// non-null. Use \p State to translate given VPValues to IR values in the
496   /// vectorized loop.
497   void vectorizeMemoryInstruction(Instruction *Instr, VPTransformState &State,
498                                   VPValue *Addr, VPValue *StoredValue,
499                                   VPValue *BlockInMask);
500 
501   /// Set the debug location in the builder using the debug location in
502   /// the instruction.
503   void setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr);
504 
505   /// Fix the non-induction PHIs in the OrigPHIsToFix vector.
506   void fixNonInductionPHIs(void);
507 
508 protected:
509   friend class LoopVectorizationPlanner;
510 
511   /// A small list of PHINodes.
512   using PhiVector = SmallVector<PHINode *, 4>;
513 
514   /// A type for scalarized values in the new loop. Each value from the
515   /// original loop, when scalarized, is represented by UF x VF scalar values
516   /// in the new unrolled loop, where UF is the unroll factor and VF is the
517   /// vectorization factor.
518   using ScalarParts = SmallVector<SmallVector<Value *, 4>, 2>;
519 
520   /// Set up the values of the IVs correctly when exiting the vector loop.
521   void fixupIVUsers(PHINode *OrigPhi, const InductionDescriptor &II,
522                     Value *CountRoundDown, Value *EndValue,
523                     BasicBlock *MiddleBlock);
524 
525   /// Create a new induction variable inside L.
526   PHINode *createInductionVariable(Loop *L, Value *Start, Value *End,
527                                    Value *Step, Instruction *DL);
528 
529   /// Handle all cross-iteration phis in the header.
530   void fixCrossIterationPHIs();
531 
532   /// Fix a first-order recurrence. This is the second phase of vectorizing
533   /// this phi node.
534   void fixFirstOrderRecurrence(PHINode *Phi);
535 
536   /// Fix a reduction cross-iteration phi. This is the second phase of
537   /// vectorizing this phi node.
538   void fixReduction(PHINode *Phi);
539 
540   /// Clear NSW/NUW flags from reduction instructions if necessary.
541   void clearReductionWrapFlags(RecurrenceDescriptor &RdxDesc);
542 
543   /// The Loop exit block may have single value PHI nodes with some
544   /// incoming value. While vectorizing we only handled real values
545   /// that were defined inside the loop and we should have one value for
546   /// each predecessor of its parent basic block. See PR14725.
547   void fixLCSSAPHIs();
548 
549   /// Iteratively sink the scalarized operands of a predicated instruction into
550   /// the block that was created for it.
551   void sinkScalarOperands(Instruction *PredInst);
552 
553   /// Shrinks vector element sizes to the smallest bitwidth they can be legally
554   /// represented as.
555   void truncateToMinimalBitwidths();
556 
557   /// Create a broadcast instruction. This method generates a broadcast
558   /// instruction (shuffle) for loop invariant values and for the induction
559   /// value. If this is the induction variable then we extend it to N, N+1, ...
560   /// this is needed because each iteration in the loop corresponds to a SIMD
561   /// element.
562   virtual Value *getBroadcastInstrs(Value *V);
563 
564   /// This function adds (StartIdx, StartIdx + Step, StartIdx + 2*Step, ...)
565   /// to each vector element of Val. The sequence starts at StartIndex.
566   /// \p Opcode is relevant for FP induction variable.
567   virtual Value *getStepVector(Value *Val, int StartIdx, Value *Step,
568                                Instruction::BinaryOps Opcode =
569                                Instruction::BinaryOpsEnd);
570 
571   /// Compute scalar induction steps. \p ScalarIV is the scalar induction
572   /// variable on which to base the steps, \p Step is the size of the step, and
573   /// \p EntryVal is the value from the original loop that maps to the steps.
574   /// Note that \p EntryVal doesn't have to be an induction variable - it
575   /// can also be a truncate instruction.
576   void buildScalarSteps(Value *ScalarIV, Value *Step, Instruction *EntryVal,
577                         const InductionDescriptor &ID);
578 
579   /// Create a vector induction phi node based on an existing scalar one. \p
580   /// EntryVal is the value from the original loop that maps to the vector phi
581   /// node, and \p Step is the loop-invariant step. If \p EntryVal is a
582   /// truncate instruction, instead of widening the original IV, we widen a
583   /// version of the IV truncated to \p EntryVal's type.
584   void createVectorIntOrFpInductionPHI(const InductionDescriptor &II,
585                                        Value *Step, Instruction *EntryVal);
586 
587   /// Returns true if an instruction \p I should be scalarized instead of
588   /// vectorized for the chosen vectorization factor.
589   bool shouldScalarizeInstruction(Instruction *I) const;
590 
591   /// Returns true if we should generate a scalar version of \p IV.
592   bool needsScalarInduction(Instruction *IV) const;
593 
594   /// If there is a cast involved in the induction variable \p ID, which should
595   /// be ignored in the vectorized loop body, this function records the
596   /// VectorLoopValue of the respective Phi also as the VectorLoopValue of the
597   /// cast. We had already proved that the casted Phi is equal to the uncasted
598   /// Phi in the vectorized loop (under a runtime guard), and therefore
599   /// there is no need to vectorize the cast - the same value can be used in the
600   /// vector loop for both the Phi and the cast.
601   /// If \p VectorLoopValue is a scalarized value, \p Lane is also specified,
602   /// Otherwise, \p VectorLoopValue is a widened/vectorized value.
603   ///
604   /// \p EntryVal is the value from the original loop that maps to the vector
605   /// phi node and is used to distinguish what is the IV currently being
606   /// processed - original one (if \p EntryVal is a phi corresponding to the
607   /// original IV) or the "newly-created" one based on the proof mentioned above
608   /// (see also buildScalarSteps() and createVectorIntOrFPInductionPHI()). In the
609   /// latter case \p EntryVal is a TruncInst and we must not record anything for
610   /// that IV, but it's error-prone to expect callers of this routine to care
611   /// about that, hence this explicit parameter.
612   void recordVectorLoopValueForInductionCast(const InductionDescriptor &ID,
613                                              const Instruction *EntryVal,
614                                              Value *VectorLoopValue,
615                                              unsigned Part,
616                                              unsigned Lane = UINT_MAX);
617 
618   /// Generate a shuffle sequence that will reverse the vector Vec.
619   virtual Value *reverseVector(Value *Vec);
620 
621   /// Returns (and creates if needed) the original loop trip count.
622   Value *getOrCreateTripCount(Loop *NewLoop);
623 
624   /// Returns (and creates if needed) the trip count of the widened loop.
625   Value *getOrCreateVectorTripCount(Loop *NewLoop);
626 
627   /// Returns a bitcasted value to the requested vector type.
628   /// Also handles bitcasts of vector<float> <-> vector<pointer> types.
629   Value *createBitOrPointerCast(Value *V, VectorType *DstVTy,
630                                 const DataLayout &DL);
631 
632   /// Emit a bypass check to see if the vector trip count is zero, including if
633   /// it overflows.
634   void emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass);
635 
636   /// Emit a bypass check to see if all of the SCEV assumptions we've
637   /// had to make are correct.
638   void emitSCEVChecks(Loop *L, BasicBlock *Bypass);
639 
640   /// Emit bypass checks to check any memory assumptions we may have made.
641   void emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass);
642 
643   /// Compute the transformed value of Index at offset StartValue using step
644   /// StepValue.
645   /// For integer induction, returns StartValue + Index * StepValue.
646   /// For pointer induction, returns StartValue[Index * StepValue].
647   /// FIXME: The newly created binary instructions should contain nsw/nuw
648   /// flags, which can be found from the original scalar operations.
649   Value *emitTransformedIndex(IRBuilder<> &B, Value *Index, ScalarEvolution *SE,
650                               const DataLayout &DL,
651                               const InductionDescriptor &ID) const;
652 
653   /// Add additional metadata to \p To that was not present on \p Orig.
654   ///
655   /// Currently this is used to add the noalias annotations based on the
656   /// inserted memchecks.  Use this for instructions that are *cloned* into the
657   /// vector loop.
658   void addNewMetadata(Instruction *To, const Instruction *Orig);
659 
660   /// Add metadata from one instruction to another.
661   ///
662   /// This includes both the original MDs from \p From and additional ones (\see
663   /// addNewMetadata).  Use this for *newly created* instructions in the vector
664   /// loop.
665   void addMetadata(Instruction *To, Instruction *From);
666 
667   /// Similar to the previous function but it adds the metadata to a
668   /// vector of instructions.
669   void addMetadata(ArrayRef<Value *> To, Instruction *From);
670 
671   /// The original loop.
672   Loop *OrigLoop;
673 
674   /// A wrapper around ScalarEvolution used to add runtime SCEV checks. Applies
675   /// dynamic knowledge to simplify SCEV expressions and converts them to a
676   /// more usable form.
677   PredicatedScalarEvolution &PSE;
678 
679   /// Loop Info.
680   LoopInfo *LI;
681 
682   /// Dominator Tree.
683   DominatorTree *DT;
684 
685   /// Alias Analysis.
686   AliasAnalysis *AA;
687 
688   /// Target Library Info.
689   const TargetLibraryInfo *TLI;
690 
691   /// Target Transform Info.
692   const TargetTransformInfo *TTI;
693 
694   /// Assumption Cache.
695   AssumptionCache *AC;
696 
697   /// Interface to emit optimization remarks.
698   OptimizationRemarkEmitter *ORE;
699 
700   /// LoopVersioning.  It's only set up (non-null) if memchecks were
701   /// used.
702   ///
703   /// This is currently only used to add no-alias metadata based on the
704   /// memchecks.  The actually versioning is performed manually.
705   std::unique_ptr<LoopVersioning> LVer;
706 
707   /// The vectorization SIMD factor to use. Each vector will have this many
708   /// vector elements.
709   unsigned VF;
710 
711   /// The vectorization unroll factor to use. Each scalar is vectorized to this
712   /// many different vector instructions.
713   unsigned UF;
714 
715   /// The builder that we use
716   IRBuilder<> Builder;
717 
718   // --- Vectorization state ---
719 
720   /// The vector-loop preheader.
721   BasicBlock *LoopVectorPreHeader;
722 
723   /// The scalar-loop preheader.
724   BasicBlock *LoopScalarPreHeader;
725 
726   /// Middle Block between the vector and the scalar.
727   BasicBlock *LoopMiddleBlock;
728 
729   /// The ExitBlock of the scalar loop.
730   BasicBlock *LoopExitBlock;
731 
732   /// The vector loop body.
733   BasicBlock *LoopVectorBody;
734 
735   /// The scalar loop body.
736   BasicBlock *LoopScalarBody;
737 
738   /// A list of all bypass blocks. The first block is the entry of the loop.
739   SmallVector<BasicBlock *, 4> LoopBypassBlocks;
740 
741   /// The new Induction variable which was added to the new block.
742   PHINode *Induction = nullptr;
743 
744   /// The induction variable of the old basic block.
745   PHINode *OldInduction = nullptr;
746 
747   /// Maps values from the original loop to their corresponding values in the
748   /// vectorized loop. A key value can map to either vector values, scalar
749   /// values or both kinds of values, depending on whether the key was
750   /// vectorized and scalarized.
751   VectorizerValueMap VectorLoopValueMap;
752 
753   /// Store instructions that were predicated.
754   SmallVector<Instruction *, 4> PredicatedInstructions;
755 
756   /// Trip count of the original loop.
757   Value *TripCount = nullptr;
758 
759   /// Trip count of the widened loop (TripCount - TripCount % (VF*UF))
760   Value *VectorTripCount = nullptr;
761 
762   /// The legality analysis.
763   LoopVectorizationLegality *Legal;
764 
765   /// The profitablity analysis.
766   LoopVectorizationCostModel *Cost;
767 
768   // Record whether runtime checks are added.
769   bool AddedSafetyChecks = false;
770 
771   // Holds the end values for each induction variable. We save the end values
772   // so we can later fix-up the external users of the induction variables.
773   DenseMap<PHINode *, Value *> IVEndValues;
774 
775   // Vector of original scalar PHIs whose corresponding widened PHIs need to be
776   // fixed up at the end of vector code generation.
777   SmallVector<PHINode *, 8> OrigPHIsToFix;
778 };
779 
780 class InnerLoopUnroller : public InnerLoopVectorizer {
781 public:
782   InnerLoopUnroller(Loop *OrigLoop, PredicatedScalarEvolution &PSE,
783                     LoopInfo *LI, DominatorTree *DT,
784                     const TargetLibraryInfo *TLI,
785                     const TargetTransformInfo *TTI, AssumptionCache *AC,
786                     OptimizationRemarkEmitter *ORE, unsigned UnrollFactor,
787                     LoopVectorizationLegality *LVL,
788                     LoopVectorizationCostModel *CM)
789       : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 1,
790                             UnrollFactor, LVL, CM) {}
791 
792 private:
793   Value *getBroadcastInstrs(Value *V) override;
794   Value *getStepVector(Value *Val, int StartIdx, Value *Step,
795                        Instruction::BinaryOps Opcode =
796                        Instruction::BinaryOpsEnd) override;
797   Value *reverseVector(Value *Vec) override;
798 };
799 
800 } // end namespace llvm
801 
802 /// Look for a meaningful debug location on the instruction or it's
803 /// operands.
804 static Instruction *getDebugLocFromInstOrOperands(Instruction *I) {
805   if (!I)
806     return I;
807 
808   DebugLoc Empty;
809   if (I->getDebugLoc() != Empty)
810     return I;
811 
812   for (User::op_iterator OI = I->op_begin(), OE = I->op_end(); OI != OE; ++OI) {
813     if (Instruction *OpInst = dyn_cast<Instruction>(*OI))
814       if (OpInst->getDebugLoc() != Empty)
815         return OpInst;
816   }
817 
818   return I;
819 }
820 
821 void InnerLoopVectorizer::setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr) {
822   if (const Instruction *Inst = dyn_cast_or_null<Instruction>(Ptr)) {
823     const DILocation *DIL = Inst->getDebugLoc();
824     if (DIL && Inst->getFunction()->isDebugInfoForProfiling() &&
825         !isa<DbgInfoIntrinsic>(Inst)) {
826       auto NewDIL = DIL->cloneByMultiplyingDuplicationFactor(UF * VF);
827       if (NewDIL)
828         B.SetCurrentDebugLocation(NewDIL.getValue());
829       else
830         LLVM_DEBUG(dbgs()
831                    << "Failed to create new discriminator: "
832                    << DIL->getFilename() << " Line: " << DIL->getLine());
833     }
834     else
835       B.SetCurrentDebugLocation(DIL);
836   } else
837     B.SetCurrentDebugLocation(DebugLoc());
838 }
839 
840 /// Write a record \p DebugMsg about vectorization failure to the debug
841 /// output stream. If \p I is passed, it is an instruction that prevents
842 /// vectorization.
843 #ifndef NDEBUG
844 static void debugVectorizationFailure(const StringRef DebugMsg,
845     Instruction *I) {
846   dbgs() << "LV: Not vectorizing: " << DebugMsg;
847   if (I != nullptr)
848     dbgs() << " " << *I;
849   else
850     dbgs() << '.';
851   dbgs() << '\n';
852 }
853 #endif
854 
855 /// Create an analysis remark that explains why vectorization failed
856 ///
857 /// \p PassName is the name of the pass (e.g. can be AlwaysPrint).  \p
858 /// RemarkName is the identifier for the remark.  If \p I is passed it is an
859 /// instruction that prevents vectorization.  Otherwise \p TheLoop is used for
860 /// the location of the remark.  \return the remark object that can be
861 /// streamed to.
862 static OptimizationRemarkAnalysis createLVAnalysis(const char *PassName,
863     StringRef RemarkName, Loop *TheLoop, Instruction *I) {
864   Value *CodeRegion = TheLoop->getHeader();
865   DebugLoc DL = TheLoop->getStartLoc();
866 
867   if (I) {
868     CodeRegion = I->getParent();
869     // If there is no debug location attached to the instruction, revert back to
870     // using the loop's.
871     if (I->getDebugLoc())
872       DL = I->getDebugLoc();
873   }
874 
875   OptimizationRemarkAnalysis R(PassName, RemarkName, DL, CodeRegion);
876   R << "loop not vectorized: ";
877   return R;
878 }
879 
880 namespace llvm {
881 
882 void reportVectorizationFailure(const StringRef DebugMsg,
883     const StringRef OREMsg, const StringRef ORETag,
884     OptimizationRemarkEmitter *ORE, Loop *TheLoop, Instruction *I) {
885   LLVM_DEBUG(debugVectorizationFailure(DebugMsg, I));
886   LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE);
887   ORE->emit(createLVAnalysis(Hints.vectorizeAnalysisPassName(),
888                 ORETag, TheLoop, I) << OREMsg);
889 }
890 
891 } // end namespace llvm
892 
893 #ifndef NDEBUG
894 /// \return string containing a file name and a line # for the given loop.
895 static std::string getDebugLocString(const Loop *L) {
896   std::string Result;
897   if (L) {
898     raw_string_ostream OS(Result);
899     if (const DebugLoc LoopDbgLoc = L->getStartLoc())
900       LoopDbgLoc.print(OS);
901     else
902       // Just print the module name.
903       OS << L->getHeader()->getParent()->getParent()->getModuleIdentifier();
904     OS.flush();
905   }
906   return Result;
907 }
908 #endif
909 
910 void InnerLoopVectorizer::addNewMetadata(Instruction *To,
911                                          const Instruction *Orig) {
912   // If the loop was versioned with memchecks, add the corresponding no-alias
913   // metadata.
914   if (LVer && (isa<LoadInst>(Orig) || isa<StoreInst>(Orig)))
915     LVer->annotateInstWithNoAlias(To, Orig);
916 }
917 
918 void InnerLoopVectorizer::addMetadata(Instruction *To,
919                                       Instruction *From) {
920   propagateMetadata(To, From);
921   addNewMetadata(To, From);
922 }
923 
924 void InnerLoopVectorizer::addMetadata(ArrayRef<Value *> To,
925                                       Instruction *From) {
926   for (Value *V : To) {
927     if (Instruction *I = dyn_cast<Instruction>(V))
928       addMetadata(I, From);
929   }
930 }
931 
932 namespace llvm {
933 
934 // Loop vectorization cost-model hints how the scalar epilogue loop should be
935 // lowered.
936 enum ScalarEpilogueLowering {
937 
938   // The default: allowing scalar epilogues.
939   CM_ScalarEpilogueAllowed,
940 
941   // Vectorization with OptForSize: don't allow epilogues.
942   CM_ScalarEpilogueNotAllowedOptSize,
943 
944   // A special case of vectorisation with OptForSize: loops with a very small
945   // trip count are considered for vectorization under OptForSize, thereby
946   // making sure the cost of their loop body is dominant, free of runtime
947   // guards and scalar iteration overheads.
948   CM_ScalarEpilogueNotAllowedLowTripLoop,
949 
950   // Loop hint predicate indicating an epilogue is undesired.
951   CM_ScalarEpilogueNotNeededUsePredicate
952 };
953 
954 /// LoopVectorizationCostModel - estimates the expected speedups due to
955 /// vectorization.
956 /// In many cases vectorization is not profitable. This can happen because of
957 /// a number of reasons. In this class we mainly attempt to predict the
958 /// expected speedup/slowdowns due to the supported instruction set. We use the
959 /// TargetTransformInfo to query the different backends for the cost of
960 /// different operations.
961 class LoopVectorizationCostModel {
962 public:
963   LoopVectorizationCostModel(ScalarEpilogueLowering SEL, Loop *L,
964                              PredicatedScalarEvolution &PSE, LoopInfo *LI,
965                              LoopVectorizationLegality *Legal,
966                              const TargetTransformInfo &TTI,
967                              const TargetLibraryInfo *TLI, DemandedBits *DB,
968                              AssumptionCache *AC,
969                              OptimizationRemarkEmitter *ORE, const Function *F,
970                              const LoopVectorizeHints *Hints,
971                              InterleavedAccessInfo &IAI)
972       : ScalarEpilogueStatus(SEL), TheLoop(L), PSE(PSE), LI(LI), Legal(Legal),
973         TTI(TTI), TLI(TLI), DB(DB), AC(AC), ORE(ORE), TheFunction(F),
974         Hints(Hints), InterleaveInfo(IAI) {}
975 
976   /// \return An upper bound for the vectorization factor, or None if
977   /// vectorization and interleaving should be avoided up front.
978   Optional<unsigned> computeMaxVF();
979 
980   /// \return True if runtime checks are required for vectorization, and false
981   /// otherwise.
982   bool runtimeChecksRequired();
983 
984   /// \return The most profitable vectorization factor and the cost of that VF.
985   /// This method checks every power of two up to MaxVF. If UserVF is not ZERO
986   /// then this vectorization factor will be selected if vectorization is
987   /// possible.
988   VectorizationFactor selectVectorizationFactor(unsigned MaxVF);
989 
990   /// Setup cost-based decisions for user vectorization factor.
991   void selectUserVectorizationFactor(unsigned UserVF) {
992     collectUniformsAndScalars(UserVF);
993     collectInstsToScalarize(UserVF);
994   }
995 
996   /// \return The size (in bits) of the smallest and widest types in the code
997   /// that needs to be vectorized. We ignore values that remain scalar such as
998   /// 64 bit loop indices.
999   std::pair<unsigned, unsigned> getSmallestAndWidestTypes();
1000 
1001   /// \return The desired interleave count.
1002   /// If interleave count has been specified by metadata it will be returned.
1003   /// Otherwise, the interleave count is computed and returned. VF and LoopCost
1004   /// are the selected vectorization factor and the cost of the selected VF.
1005   unsigned selectInterleaveCount(unsigned VF, unsigned LoopCost);
1006 
1007   /// Memory access instruction may be vectorized in more than one way.
1008   /// Form of instruction after vectorization depends on cost.
1009   /// This function takes cost-based decisions for Load/Store instructions
1010   /// and collects them in a map. This decisions map is used for building
1011   /// the lists of loop-uniform and loop-scalar instructions.
1012   /// The calculated cost is saved with widening decision in order to
1013   /// avoid redundant calculations.
1014   void setCostBasedWideningDecision(unsigned VF);
1015 
1016   /// A struct that represents some properties of the register usage
1017   /// of a loop.
1018   struct RegisterUsage {
1019     /// Holds the number of loop invariant values that are used in the loop.
1020     /// The key is ClassID of target-provided register class.
1021     SmallMapVector<unsigned, unsigned, 4> LoopInvariantRegs;
1022     /// Holds the maximum number of concurrent live intervals in the loop.
1023     /// The key is ClassID of target-provided register class.
1024     SmallMapVector<unsigned, unsigned, 4> MaxLocalUsers;
1025   };
1026 
1027   /// \return Returns information about the register usages of the loop for the
1028   /// given vectorization factors.
1029   SmallVector<RegisterUsage, 8> calculateRegisterUsage(ArrayRef<unsigned> VFs);
1030 
1031   /// Collect values we want to ignore in the cost model.
1032   void collectValuesToIgnore();
1033 
1034   /// \returns The smallest bitwidth each instruction can be represented with.
1035   /// The vector equivalents of these instructions should be truncated to this
1036   /// type.
1037   const MapVector<Instruction *, uint64_t> &getMinimalBitwidths() const {
1038     return MinBWs;
1039   }
1040 
1041   /// \returns True if it is more profitable to scalarize instruction \p I for
1042   /// vectorization factor \p VF.
1043   bool isProfitableToScalarize(Instruction *I, unsigned VF) const {
1044     assert(VF > 1 && "Profitable to scalarize relevant only for VF > 1.");
1045 
1046     // Cost model is not run in the VPlan-native path - return conservative
1047     // result until this changes.
1048     if (EnableVPlanNativePath)
1049       return false;
1050 
1051     auto Scalars = InstsToScalarize.find(VF);
1052     assert(Scalars != InstsToScalarize.end() &&
1053            "VF not yet analyzed for scalarization profitability");
1054     return Scalars->second.find(I) != Scalars->second.end();
1055   }
1056 
1057   /// Returns true if \p I is known to be uniform after vectorization.
1058   bool isUniformAfterVectorization(Instruction *I, unsigned VF) const {
1059     if (VF == 1)
1060       return true;
1061 
1062     // Cost model is not run in the VPlan-native path - return conservative
1063     // result until this changes.
1064     if (EnableVPlanNativePath)
1065       return false;
1066 
1067     auto UniformsPerVF = Uniforms.find(VF);
1068     assert(UniformsPerVF != Uniforms.end() &&
1069            "VF not yet analyzed for uniformity");
1070     return UniformsPerVF->second.find(I) != UniformsPerVF->second.end();
1071   }
1072 
1073   /// Returns true if \p I is known to be scalar after vectorization.
1074   bool isScalarAfterVectorization(Instruction *I, unsigned VF) const {
1075     if (VF == 1)
1076       return true;
1077 
1078     // Cost model is not run in the VPlan-native path - return conservative
1079     // result until this changes.
1080     if (EnableVPlanNativePath)
1081       return false;
1082 
1083     auto ScalarsPerVF = Scalars.find(VF);
1084     assert(ScalarsPerVF != Scalars.end() &&
1085            "Scalar values are not calculated for VF");
1086     return ScalarsPerVF->second.find(I) != ScalarsPerVF->second.end();
1087   }
1088 
1089   /// \returns True if instruction \p I can be truncated to a smaller bitwidth
1090   /// for vectorization factor \p VF.
1091   bool canTruncateToMinimalBitwidth(Instruction *I, unsigned VF) const {
1092     return VF > 1 && MinBWs.find(I) != MinBWs.end() &&
1093            !isProfitableToScalarize(I, VF) &&
1094            !isScalarAfterVectorization(I, VF);
1095   }
1096 
1097   /// Decision that was taken during cost calculation for memory instruction.
1098   enum InstWidening {
1099     CM_Unknown,
1100     CM_Widen,         // For consecutive accesses with stride +1.
1101     CM_Widen_Reverse, // For consecutive accesses with stride -1.
1102     CM_Interleave,
1103     CM_GatherScatter,
1104     CM_Scalarize
1105   };
1106 
1107   /// Save vectorization decision \p W and \p Cost taken by the cost model for
1108   /// instruction \p I and vector width \p VF.
1109   void setWideningDecision(Instruction *I, unsigned VF, InstWidening W,
1110                            unsigned Cost) {
1111     assert(VF >= 2 && "Expected VF >=2");
1112     WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost);
1113   }
1114 
1115   /// Save vectorization decision \p W and \p Cost taken by the cost model for
1116   /// interleaving group \p Grp and vector width \p VF.
1117   void setWideningDecision(const InterleaveGroup<Instruction> *Grp, unsigned VF,
1118                            InstWidening W, unsigned Cost) {
1119     assert(VF >= 2 && "Expected VF >=2");
1120     /// Broadcast this decicion to all instructions inside the group.
1121     /// But the cost will be assigned to one instruction only.
1122     for (unsigned i = 0; i < Grp->getFactor(); ++i) {
1123       if (auto *I = Grp->getMember(i)) {
1124         if (Grp->getInsertPos() == I)
1125           WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost);
1126         else
1127           WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, 0);
1128       }
1129     }
1130   }
1131 
1132   /// Return the cost model decision for the given instruction \p I and vector
1133   /// width \p VF. Return CM_Unknown if this instruction did not pass
1134   /// through the cost modeling.
1135   InstWidening getWideningDecision(Instruction *I, unsigned VF) {
1136     assert(VF >= 2 && "Expected VF >=2");
1137 
1138     // Cost model is not run in the VPlan-native path - return conservative
1139     // result until this changes.
1140     if (EnableVPlanNativePath)
1141       return CM_GatherScatter;
1142 
1143     std::pair<Instruction *, unsigned> InstOnVF = std::make_pair(I, VF);
1144     auto Itr = WideningDecisions.find(InstOnVF);
1145     if (Itr == WideningDecisions.end())
1146       return CM_Unknown;
1147     return Itr->second.first;
1148   }
1149 
1150   /// Return the vectorization cost for the given instruction \p I and vector
1151   /// width \p VF.
1152   unsigned getWideningCost(Instruction *I, unsigned VF) {
1153     assert(VF >= 2 && "Expected VF >=2");
1154     std::pair<Instruction *, unsigned> InstOnVF = std::make_pair(I, VF);
1155     assert(WideningDecisions.find(InstOnVF) != WideningDecisions.end() &&
1156            "The cost is not calculated");
1157     return WideningDecisions[InstOnVF].second;
1158   }
1159 
1160   /// Return True if instruction \p I is an optimizable truncate whose operand
1161   /// is an induction variable. Such a truncate will be removed by adding a new
1162   /// induction variable with the destination type.
1163   bool isOptimizableIVTruncate(Instruction *I, unsigned VF) {
1164     // If the instruction is not a truncate, return false.
1165     auto *Trunc = dyn_cast<TruncInst>(I);
1166     if (!Trunc)
1167       return false;
1168 
1169     // Get the source and destination types of the truncate.
1170     Type *SrcTy = ToVectorTy(cast<CastInst>(I)->getSrcTy(), VF);
1171     Type *DestTy = ToVectorTy(cast<CastInst>(I)->getDestTy(), VF);
1172 
1173     // If the truncate is free for the given types, return false. Replacing a
1174     // free truncate with an induction variable would add an induction variable
1175     // update instruction to each iteration of the loop. We exclude from this
1176     // check the primary induction variable since it will need an update
1177     // instruction regardless.
1178     Value *Op = Trunc->getOperand(0);
1179     if (Op != Legal->getPrimaryInduction() && TTI.isTruncateFree(SrcTy, DestTy))
1180       return false;
1181 
1182     // If the truncated value is not an induction variable, return false.
1183     return Legal->isInductionPhi(Op);
1184   }
1185 
1186   /// Collects the instructions to scalarize for each predicated instruction in
1187   /// the loop.
1188   void collectInstsToScalarize(unsigned VF);
1189 
1190   /// Collect Uniform and Scalar values for the given \p VF.
1191   /// The sets depend on CM decision for Load/Store instructions
1192   /// that may be vectorized as interleave, gather-scatter or scalarized.
1193   void collectUniformsAndScalars(unsigned VF) {
1194     // Do the analysis once.
1195     if (VF == 1 || Uniforms.find(VF) != Uniforms.end())
1196       return;
1197     setCostBasedWideningDecision(VF);
1198     collectLoopUniforms(VF);
1199     collectLoopScalars(VF);
1200   }
1201 
1202   /// Returns true if the target machine supports masked store operation
1203   /// for the given \p DataType and kind of access to \p Ptr.
1204   bool isLegalMaskedStore(Type *DataType, Value *Ptr, MaybeAlign Alignment) {
1205     return Legal->isConsecutivePtr(Ptr) &&
1206            TTI.isLegalMaskedStore(DataType, Alignment);
1207   }
1208 
1209   /// Returns true if the target machine supports masked load operation
1210   /// for the given \p DataType and kind of access to \p Ptr.
1211   bool isLegalMaskedLoad(Type *DataType, Value *Ptr, MaybeAlign Alignment) {
1212     return Legal->isConsecutivePtr(Ptr) &&
1213            TTI.isLegalMaskedLoad(DataType, Alignment);
1214   }
1215 
1216   /// Returns true if the target machine supports masked scatter operation
1217   /// for the given \p DataType.
1218   bool isLegalMaskedScatter(Type *DataType, MaybeAlign Alignment) {
1219     return TTI.isLegalMaskedScatter(DataType, Alignment);
1220   }
1221 
1222   /// Returns true if the target machine supports masked gather operation
1223   /// for the given \p DataType.
1224   bool isLegalMaskedGather(Type *DataType, MaybeAlign Alignment) {
1225     return TTI.isLegalMaskedGather(DataType, Alignment);
1226   }
1227 
1228   /// Returns true if the target machine can represent \p V as a masked gather
1229   /// or scatter operation.
1230   bool isLegalGatherOrScatter(Value *V) {
1231     bool LI = isa<LoadInst>(V);
1232     bool SI = isa<StoreInst>(V);
1233     if (!LI && !SI)
1234       return false;
1235     auto *Ty = getMemInstValueType(V);
1236     Align Align = getLoadStoreAlignment(V);
1237     return (LI && isLegalMaskedGather(Ty, Align)) ||
1238            (SI && isLegalMaskedScatter(Ty, Align));
1239   }
1240 
1241   /// Returns true if \p I is an instruction that will be scalarized with
1242   /// predication. Such instructions include conditional stores and
1243   /// instructions that may divide by zero.
1244   /// If a non-zero VF has been calculated, we check if I will be scalarized
1245   /// predication for that VF.
1246   bool isScalarWithPredication(Instruction *I, unsigned VF = 1);
1247 
1248   // Returns true if \p I is an instruction that will be predicated either
1249   // through scalar predication or masked load/store or masked gather/scatter.
1250   // Superset of instructions that return true for isScalarWithPredication.
1251   bool isPredicatedInst(Instruction *I) {
1252     if (!blockNeedsPredication(I->getParent()))
1253       return false;
1254     // Loads and stores that need some form of masked operation are predicated
1255     // instructions.
1256     if (isa<LoadInst>(I) || isa<StoreInst>(I))
1257       return Legal->isMaskRequired(I);
1258     return isScalarWithPredication(I);
1259   }
1260 
1261   /// Returns true if \p I is a memory instruction with consecutive memory
1262   /// access that can be widened.
1263   bool memoryInstructionCanBeWidened(Instruction *I, unsigned VF = 1);
1264 
1265   /// Returns true if \p I is a memory instruction in an interleaved-group
1266   /// of memory accesses that can be vectorized with wide vector loads/stores
1267   /// and shuffles.
1268   bool interleavedAccessCanBeWidened(Instruction *I, unsigned VF = 1);
1269 
1270   /// Check if \p Instr belongs to any interleaved access group.
1271   bool isAccessInterleaved(Instruction *Instr) {
1272     return InterleaveInfo.isInterleaved(Instr);
1273   }
1274 
1275   /// Get the interleaved access group that \p Instr belongs to.
1276   const InterleaveGroup<Instruction> *
1277   getInterleavedAccessGroup(Instruction *Instr) {
1278     return InterleaveInfo.getInterleaveGroup(Instr);
1279   }
1280 
1281   /// Returns true if an interleaved group requires a scalar iteration
1282   /// to handle accesses with gaps, and there is nothing preventing us from
1283   /// creating a scalar epilogue.
1284   bool requiresScalarEpilogue() const {
1285     return isScalarEpilogueAllowed() && InterleaveInfo.requiresScalarEpilogue();
1286   }
1287 
1288   /// Returns true if a scalar epilogue is not allowed due to optsize or a
1289   /// loop hint annotation.
1290   bool isScalarEpilogueAllowed() const {
1291     return ScalarEpilogueStatus == CM_ScalarEpilogueAllowed;
1292   }
1293 
1294   /// Returns true if all loop blocks should be masked to fold tail loop.
1295   bool foldTailByMasking() const { return FoldTailByMasking; }
1296 
1297   bool blockNeedsPredication(BasicBlock *BB) {
1298     return foldTailByMasking() || Legal->blockNeedsPredication(BB);
1299   }
1300 
1301   /// Estimate cost of an intrinsic call instruction CI if it were vectorized
1302   /// with factor VF.  Return the cost of the instruction, including
1303   /// scalarization overhead if it's needed.
1304   unsigned getVectorIntrinsicCost(CallInst *CI, unsigned VF);
1305 
1306   /// Estimate cost of a call instruction CI if it were vectorized with factor
1307   /// VF. Return the cost of the instruction, including scalarization overhead
1308   /// if it's needed. The flag NeedToScalarize shows if the call needs to be
1309   /// scalarized -
1310   /// i.e. either vector version isn't available, or is too expensive.
1311   unsigned getVectorCallCost(CallInst *CI, unsigned VF, bool &NeedToScalarize);
1312 
1313   /// Invalidates decisions already taken by the cost model.
1314   void invalidateCostModelingDecisions() {
1315     WideningDecisions.clear();
1316     Uniforms.clear();
1317     Scalars.clear();
1318   }
1319 
1320 private:
1321   unsigned NumPredStores = 0;
1322 
1323   /// \return An upper bound for the vectorization factor, larger than zero.
1324   /// One is returned if vectorization should best be avoided due to cost.
1325   unsigned computeFeasibleMaxVF(unsigned ConstTripCount);
1326 
1327   /// The vectorization cost is a combination of the cost itself and a boolean
1328   /// indicating whether any of the contributing operations will actually
1329   /// operate on
1330   /// vector values after type legalization in the backend. If this latter value
1331   /// is
1332   /// false, then all operations will be scalarized (i.e. no vectorization has
1333   /// actually taken place).
1334   using VectorizationCostTy = std::pair<unsigned, bool>;
1335 
1336   /// Returns the expected execution cost. The unit of the cost does
1337   /// not matter because we use the 'cost' units to compare different
1338   /// vector widths. The cost that is returned is *not* normalized by
1339   /// the factor width.
1340   VectorizationCostTy expectedCost(unsigned VF);
1341 
1342   /// Returns the execution time cost of an instruction for a given vector
1343   /// width. Vector width of one means scalar.
1344   VectorizationCostTy getInstructionCost(Instruction *I, unsigned VF);
1345 
1346   /// The cost-computation logic from getInstructionCost which provides
1347   /// the vector type as an output parameter.
1348   unsigned getInstructionCost(Instruction *I, unsigned VF, Type *&VectorTy);
1349 
1350   /// Calculate vectorization cost of memory instruction \p I.
1351   unsigned getMemoryInstructionCost(Instruction *I, unsigned VF);
1352 
1353   /// The cost computation for scalarized memory instruction.
1354   unsigned getMemInstScalarizationCost(Instruction *I, unsigned VF);
1355 
1356   /// The cost computation for interleaving group of memory instructions.
1357   unsigned getInterleaveGroupCost(Instruction *I, unsigned VF);
1358 
1359   /// The cost computation for Gather/Scatter instruction.
1360   unsigned getGatherScatterCost(Instruction *I, unsigned VF);
1361 
1362   /// The cost computation for widening instruction \p I with consecutive
1363   /// memory access.
1364   unsigned getConsecutiveMemOpCost(Instruction *I, unsigned VF);
1365 
1366   /// The cost calculation for Load/Store instruction \p I with uniform pointer -
1367   /// Load: scalar load + broadcast.
1368   /// Store: scalar store + (loop invariant value stored? 0 : extract of last
1369   /// element)
1370   unsigned getUniformMemOpCost(Instruction *I, unsigned VF);
1371 
1372   /// Estimate the overhead of scalarizing an instruction. This is a
1373   /// convenience wrapper for the type-based getScalarizationOverhead API.
1374   unsigned getScalarizationOverhead(Instruction *I, unsigned VF);
1375 
1376   /// Returns whether the instruction is a load or store and will be a emitted
1377   /// as a vector operation.
1378   bool isConsecutiveLoadOrStore(Instruction *I);
1379 
1380   /// Returns true if an artificially high cost for emulated masked memrefs
1381   /// should be used.
1382   bool useEmulatedMaskMemRefHack(Instruction *I);
1383 
1384   /// Map of scalar integer values to the smallest bitwidth they can be legally
1385   /// represented as. The vector equivalents of these values should be truncated
1386   /// to this type.
1387   MapVector<Instruction *, uint64_t> MinBWs;
1388 
1389   /// A type representing the costs for instructions if they were to be
1390   /// scalarized rather than vectorized. The entries are Instruction-Cost
1391   /// pairs.
1392   using ScalarCostsTy = DenseMap<Instruction *, unsigned>;
1393 
1394   /// A set containing all BasicBlocks that are known to present after
1395   /// vectorization as a predicated block.
1396   SmallPtrSet<BasicBlock *, 4> PredicatedBBsAfterVectorization;
1397 
1398   /// Records whether it is allowed to have the original scalar loop execute at
1399   /// least once. This may be needed as a fallback loop in case runtime
1400   /// aliasing/dependence checks fail, or to handle the tail/remainder
1401   /// iterations when the trip count is unknown or doesn't divide by the VF,
1402   /// or as a peel-loop to handle gaps in interleave-groups.
1403   /// Under optsize and when the trip count is very small we don't allow any
1404   /// iterations to execute in the scalar loop.
1405   ScalarEpilogueLowering ScalarEpilogueStatus = CM_ScalarEpilogueAllowed;
1406 
1407   /// All blocks of loop are to be masked to fold tail of scalar iterations.
1408   bool FoldTailByMasking = false;
1409 
1410   /// A map holding scalar costs for different vectorization factors. The
1411   /// presence of a cost for an instruction in the mapping indicates that the
1412   /// instruction will be scalarized when vectorizing with the associated
1413   /// vectorization factor. The entries are VF-ScalarCostTy pairs.
1414   DenseMap<unsigned, ScalarCostsTy> InstsToScalarize;
1415 
1416   /// Holds the instructions known to be uniform after vectorization.
1417   /// The data is collected per VF.
1418   DenseMap<unsigned, SmallPtrSet<Instruction *, 4>> Uniforms;
1419 
1420   /// Holds the instructions known to be scalar after vectorization.
1421   /// The data is collected per VF.
1422   DenseMap<unsigned, SmallPtrSet<Instruction *, 4>> Scalars;
1423 
1424   /// Holds the instructions (address computations) that are forced to be
1425   /// scalarized.
1426   DenseMap<unsigned, SmallPtrSet<Instruction *, 4>> ForcedScalars;
1427 
1428   /// Returns the expected difference in cost from scalarizing the expression
1429   /// feeding a predicated instruction \p PredInst. The instructions to
1430   /// scalarize and their scalar costs are collected in \p ScalarCosts. A
1431   /// non-negative return value implies the expression will be scalarized.
1432   /// Currently, only single-use chains are considered for scalarization.
1433   int computePredInstDiscount(Instruction *PredInst, ScalarCostsTy &ScalarCosts,
1434                               unsigned VF);
1435 
1436   /// Collect the instructions that are uniform after vectorization. An
1437   /// instruction is uniform if we represent it with a single scalar value in
1438   /// the vectorized loop corresponding to each vector iteration. Examples of
1439   /// uniform instructions include pointer operands of consecutive or
1440   /// interleaved memory accesses. Note that although uniformity implies an
1441   /// instruction will be scalar, the reverse is not true. In general, a
1442   /// scalarized instruction will be represented by VF scalar values in the
1443   /// vectorized loop, each corresponding to an iteration of the original
1444   /// scalar loop.
1445   void collectLoopUniforms(unsigned VF);
1446 
1447   /// Collect the instructions that are scalar after vectorization. An
1448   /// instruction is scalar if it is known to be uniform or will be scalarized
1449   /// during vectorization. Non-uniform scalarized instructions will be
1450   /// represented by VF values in the vectorized loop, each corresponding to an
1451   /// iteration of the original scalar loop.
1452   void collectLoopScalars(unsigned VF);
1453 
1454   /// Keeps cost model vectorization decision and cost for instructions.
1455   /// Right now it is used for memory instructions only.
1456   using DecisionList = DenseMap<std::pair<Instruction *, unsigned>,
1457                                 std::pair<InstWidening, unsigned>>;
1458 
1459   DecisionList WideningDecisions;
1460 
1461   /// Returns true if \p V is expected to be vectorized and it needs to be
1462   /// extracted.
1463   bool needsExtract(Value *V, unsigned VF) const {
1464     Instruction *I = dyn_cast<Instruction>(V);
1465     if (VF == 1 || !I || !TheLoop->contains(I) || TheLoop->isLoopInvariant(I))
1466       return false;
1467 
1468     // Assume we can vectorize V (and hence we need extraction) if the
1469     // scalars are not computed yet. This can happen, because it is called
1470     // via getScalarizationOverhead from setCostBasedWideningDecision, before
1471     // the scalars are collected. That should be a safe assumption in most
1472     // cases, because we check if the operands have vectorizable types
1473     // beforehand in LoopVectorizationLegality.
1474     return Scalars.find(VF) == Scalars.end() ||
1475            !isScalarAfterVectorization(I, VF);
1476   };
1477 
1478   /// Returns a range containing only operands needing to be extracted.
1479   SmallVector<Value *, 4> filterExtractingOperands(Instruction::op_range Ops,
1480                                                    unsigned VF) {
1481     return SmallVector<Value *, 4>(make_filter_range(
1482         Ops, [this, VF](Value *V) { return this->needsExtract(V, VF); }));
1483   }
1484 
1485 public:
1486   /// The loop that we evaluate.
1487   Loop *TheLoop;
1488 
1489   /// Predicated scalar evolution analysis.
1490   PredicatedScalarEvolution &PSE;
1491 
1492   /// Loop Info analysis.
1493   LoopInfo *LI;
1494 
1495   /// Vectorization legality.
1496   LoopVectorizationLegality *Legal;
1497 
1498   /// Vector target information.
1499   const TargetTransformInfo &TTI;
1500 
1501   /// Target Library Info.
1502   const TargetLibraryInfo *TLI;
1503 
1504   /// Demanded bits analysis.
1505   DemandedBits *DB;
1506 
1507   /// Assumption cache.
1508   AssumptionCache *AC;
1509 
1510   /// Interface to emit optimization remarks.
1511   OptimizationRemarkEmitter *ORE;
1512 
1513   const Function *TheFunction;
1514 
1515   /// Loop Vectorize Hint.
1516   const LoopVectorizeHints *Hints;
1517 
1518   /// The interleave access information contains groups of interleaved accesses
1519   /// with the same stride and close to each other.
1520   InterleavedAccessInfo &InterleaveInfo;
1521 
1522   /// Values to ignore in the cost model.
1523   SmallPtrSet<const Value *, 16> ValuesToIgnore;
1524 
1525   /// Values to ignore in the cost model when VF > 1.
1526   SmallPtrSet<const Value *, 16> VecValuesToIgnore;
1527 };
1528 
1529 } // end namespace llvm
1530 
1531 // Return true if \p OuterLp is an outer loop annotated with hints for explicit
1532 // vectorization. The loop needs to be annotated with #pragma omp simd
1533 // simdlen(#) or #pragma clang vectorize(enable) vectorize_width(#). If the
1534 // vector length information is not provided, vectorization is not considered
1535 // explicit. Interleave hints are not allowed either. These limitations will be
1536 // relaxed in the future.
1537 // Please, note that we are currently forced to abuse the pragma 'clang
1538 // vectorize' semantics. This pragma provides *auto-vectorization hints*
1539 // (i.e., LV must check that vectorization is legal) whereas pragma 'omp simd'
1540 // provides *explicit vectorization hints* (LV can bypass legal checks and
1541 // assume that vectorization is legal). However, both hints are implemented
1542 // using the same metadata (llvm.loop.vectorize, processed by
1543 // LoopVectorizeHints). This will be fixed in the future when the native IR
1544 // representation for pragma 'omp simd' is introduced.
1545 static bool isExplicitVecOuterLoop(Loop *OuterLp,
1546                                    OptimizationRemarkEmitter *ORE) {
1547   assert(!OuterLp->empty() && "This is not an outer loop");
1548   LoopVectorizeHints Hints(OuterLp, true /*DisableInterleaving*/, *ORE);
1549 
1550   // Only outer loops with an explicit vectorization hint are supported.
1551   // Unannotated outer loops are ignored.
1552   if (Hints.getForce() == LoopVectorizeHints::FK_Undefined)
1553     return false;
1554 
1555   Function *Fn = OuterLp->getHeader()->getParent();
1556   if (!Hints.allowVectorization(Fn, OuterLp,
1557                                 true /*VectorizeOnlyWhenForced*/)) {
1558     LLVM_DEBUG(dbgs() << "LV: Loop hints prevent outer loop vectorization.\n");
1559     return false;
1560   }
1561 
1562   if (Hints.getInterleave() > 1) {
1563     // TODO: Interleave support is future work.
1564     LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Interleave is not supported for "
1565                          "outer loops.\n");
1566     Hints.emitRemarkWithHints();
1567     return false;
1568   }
1569 
1570   return true;
1571 }
1572 
1573 static void collectSupportedLoops(Loop &L, LoopInfo *LI,
1574                                   OptimizationRemarkEmitter *ORE,
1575                                   SmallVectorImpl<Loop *> &V) {
1576   // Collect inner loops and outer loops without irreducible control flow. For
1577   // now, only collect outer loops that have explicit vectorization hints. If we
1578   // are stress testing the VPlan H-CFG construction, we collect the outermost
1579   // loop of every loop nest.
1580   if (L.empty() || VPlanBuildStressTest ||
1581       (EnableVPlanNativePath && isExplicitVecOuterLoop(&L, ORE))) {
1582     LoopBlocksRPO RPOT(&L);
1583     RPOT.perform(LI);
1584     if (!containsIrreducibleCFG<const BasicBlock *>(RPOT, *LI)) {
1585       V.push_back(&L);
1586       // TODO: Collect inner loops inside marked outer loops in case
1587       // vectorization fails for the outer loop. Do not invoke
1588       // 'containsIrreducibleCFG' again for inner loops when the outer loop is
1589       // already known to be reducible. We can use an inherited attribute for
1590       // that.
1591       return;
1592     }
1593   }
1594   for (Loop *InnerL : L)
1595     collectSupportedLoops(*InnerL, LI, ORE, V);
1596 }
1597 
1598 namespace {
1599 
1600 /// The LoopVectorize Pass.
1601 struct LoopVectorize : public FunctionPass {
1602   /// Pass identification, replacement for typeid
1603   static char ID;
1604 
1605   LoopVectorizePass Impl;
1606 
1607   explicit LoopVectorize(bool InterleaveOnlyWhenForced = false,
1608                          bool VectorizeOnlyWhenForced = false)
1609       : FunctionPass(ID),
1610         Impl({InterleaveOnlyWhenForced, VectorizeOnlyWhenForced}) {
1611     initializeLoopVectorizePass(*PassRegistry::getPassRegistry());
1612   }
1613 
1614   bool runOnFunction(Function &F) override {
1615     if (skipFunction(F))
1616       return false;
1617 
1618     auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
1619     auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
1620     auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
1621     auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
1622     auto *BFI = &getAnalysis<BlockFrequencyInfoWrapperPass>().getBFI();
1623     auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
1624     auto *TLI = TLIP ? &TLIP->getTLI(F) : nullptr;
1625     auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
1626     auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
1627     auto *LAA = &getAnalysis<LoopAccessLegacyAnalysis>();
1628     auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits();
1629     auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE();
1630     auto *PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI();
1631 
1632     std::function<const LoopAccessInfo &(Loop &)> GetLAA =
1633         [&](Loop &L) -> const LoopAccessInfo & { return LAA->getInfo(&L); };
1634 
1635     return Impl.runImpl(F, *SE, *LI, *TTI, *DT, *BFI, TLI, *DB, *AA, *AC,
1636                         GetLAA, *ORE, PSI).MadeAnyChange;
1637   }
1638 
1639   void getAnalysisUsage(AnalysisUsage &AU) const override {
1640     AU.addRequired<AssumptionCacheTracker>();
1641     AU.addRequired<BlockFrequencyInfoWrapperPass>();
1642     AU.addRequired<DominatorTreeWrapperPass>();
1643     AU.addRequired<LoopInfoWrapperPass>();
1644     AU.addRequired<ScalarEvolutionWrapperPass>();
1645     AU.addRequired<TargetTransformInfoWrapperPass>();
1646     AU.addRequired<AAResultsWrapperPass>();
1647     AU.addRequired<LoopAccessLegacyAnalysis>();
1648     AU.addRequired<DemandedBitsWrapperPass>();
1649     AU.addRequired<OptimizationRemarkEmitterWrapperPass>();
1650     AU.addRequired<InjectTLIMappingsLegacy>();
1651 
1652     // We currently do not preserve loopinfo/dominator analyses with outer loop
1653     // vectorization. Until this is addressed, mark these analyses as preserved
1654     // only for non-VPlan-native path.
1655     // TODO: Preserve Loop and Dominator analyses for VPlan-native path.
1656     if (!EnableVPlanNativePath) {
1657       AU.addPreserved<LoopInfoWrapperPass>();
1658       AU.addPreserved<DominatorTreeWrapperPass>();
1659     }
1660 
1661     AU.addPreserved<BasicAAWrapperPass>();
1662     AU.addPreserved<GlobalsAAWrapperPass>();
1663     AU.addRequired<ProfileSummaryInfoWrapperPass>();
1664   }
1665 };
1666 
1667 } // end anonymous namespace
1668 
1669 //===----------------------------------------------------------------------===//
1670 // Implementation of LoopVectorizationLegality, InnerLoopVectorizer and
1671 // LoopVectorizationCostModel and LoopVectorizationPlanner.
1672 //===----------------------------------------------------------------------===//
1673 
1674 Value *InnerLoopVectorizer::getBroadcastInstrs(Value *V) {
1675   // We need to place the broadcast of invariant variables outside the loop,
1676   // but only if it's proven safe to do so. Else, broadcast will be inside
1677   // vector loop body.
1678   Instruction *Instr = dyn_cast<Instruction>(V);
1679   bool SafeToHoist = OrigLoop->isLoopInvariant(V) &&
1680                      (!Instr ||
1681                       DT->dominates(Instr->getParent(), LoopVectorPreHeader));
1682   // Place the code for broadcasting invariant variables in the new preheader.
1683   IRBuilder<>::InsertPointGuard Guard(Builder);
1684   if (SafeToHoist)
1685     Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
1686 
1687   // Broadcast the scalar into all locations in the vector.
1688   Value *Shuf = Builder.CreateVectorSplat(VF, V, "broadcast");
1689 
1690   return Shuf;
1691 }
1692 
1693 void InnerLoopVectorizer::createVectorIntOrFpInductionPHI(
1694     const InductionDescriptor &II, Value *Step, Instruction *EntryVal) {
1695   assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) &&
1696          "Expected either an induction phi-node or a truncate of it!");
1697   Value *Start = II.getStartValue();
1698 
1699   // Construct the initial value of the vector IV in the vector loop preheader
1700   auto CurrIP = Builder.saveIP();
1701   Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
1702   if (isa<TruncInst>(EntryVal)) {
1703     assert(Start->getType()->isIntegerTy() &&
1704            "Truncation requires an integer type");
1705     auto *TruncType = cast<IntegerType>(EntryVal->getType());
1706     Step = Builder.CreateTrunc(Step, TruncType);
1707     Start = Builder.CreateCast(Instruction::Trunc, Start, TruncType);
1708   }
1709   Value *SplatStart = Builder.CreateVectorSplat(VF, Start);
1710   Value *SteppedStart =
1711       getStepVector(SplatStart, 0, Step, II.getInductionOpcode());
1712 
1713   // We create vector phi nodes for both integer and floating-point induction
1714   // variables. Here, we determine the kind of arithmetic we will perform.
1715   Instruction::BinaryOps AddOp;
1716   Instruction::BinaryOps MulOp;
1717   if (Step->getType()->isIntegerTy()) {
1718     AddOp = Instruction::Add;
1719     MulOp = Instruction::Mul;
1720   } else {
1721     AddOp = II.getInductionOpcode();
1722     MulOp = Instruction::FMul;
1723   }
1724 
1725   // Multiply the vectorization factor by the step using integer or
1726   // floating-point arithmetic as appropriate.
1727   Value *ConstVF = getSignedIntOrFpConstant(Step->getType(), VF);
1728   Value *Mul = addFastMathFlag(Builder.CreateBinOp(MulOp, Step, ConstVF));
1729 
1730   // Create a vector splat to use in the induction update.
1731   //
1732   // FIXME: If the step is non-constant, we create the vector splat with
1733   //        IRBuilder. IRBuilder can constant-fold the multiply, but it doesn't
1734   //        handle a constant vector splat.
1735   Value *SplatVF =
1736       isa<Constant>(Mul)
1737           ? ConstantVector::getSplat({VF, false}, cast<Constant>(Mul))
1738           : Builder.CreateVectorSplat(VF, Mul);
1739   Builder.restoreIP(CurrIP);
1740 
1741   // We may need to add the step a number of times, depending on the unroll
1742   // factor. The last of those goes into the PHI.
1743   PHINode *VecInd = PHINode::Create(SteppedStart->getType(), 2, "vec.ind",
1744                                     &*LoopVectorBody->getFirstInsertionPt());
1745   VecInd->setDebugLoc(EntryVal->getDebugLoc());
1746   Instruction *LastInduction = VecInd;
1747   for (unsigned Part = 0; Part < UF; ++Part) {
1748     VectorLoopValueMap.setVectorValue(EntryVal, Part, LastInduction);
1749 
1750     if (isa<TruncInst>(EntryVal))
1751       addMetadata(LastInduction, EntryVal);
1752     recordVectorLoopValueForInductionCast(II, EntryVal, LastInduction, Part);
1753 
1754     LastInduction = cast<Instruction>(addFastMathFlag(
1755         Builder.CreateBinOp(AddOp, LastInduction, SplatVF, "step.add")));
1756     LastInduction->setDebugLoc(EntryVal->getDebugLoc());
1757   }
1758 
1759   // Move the last step to the end of the latch block. This ensures consistent
1760   // placement of all induction updates.
1761   auto *LoopVectorLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch();
1762   auto *Br = cast<BranchInst>(LoopVectorLatch->getTerminator());
1763   auto *ICmp = cast<Instruction>(Br->getCondition());
1764   LastInduction->moveBefore(ICmp);
1765   LastInduction->setName("vec.ind.next");
1766 
1767   VecInd->addIncoming(SteppedStart, LoopVectorPreHeader);
1768   VecInd->addIncoming(LastInduction, LoopVectorLatch);
1769 }
1770 
1771 bool InnerLoopVectorizer::shouldScalarizeInstruction(Instruction *I) const {
1772   return Cost->isScalarAfterVectorization(I, VF) ||
1773          Cost->isProfitableToScalarize(I, VF);
1774 }
1775 
1776 bool InnerLoopVectorizer::needsScalarInduction(Instruction *IV) const {
1777   if (shouldScalarizeInstruction(IV))
1778     return true;
1779   auto isScalarInst = [&](User *U) -> bool {
1780     auto *I = cast<Instruction>(U);
1781     return (OrigLoop->contains(I) && shouldScalarizeInstruction(I));
1782   };
1783   return llvm::any_of(IV->users(), isScalarInst);
1784 }
1785 
1786 void InnerLoopVectorizer::recordVectorLoopValueForInductionCast(
1787     const InductionDescriptor &ID, const Instruction *EntryVal,
1788     Value *VectorLoopVal, unsigned Part, unsigned Lane) {
1789   assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) &&
1790          "Expected either an induction phi-node or a truncate of it!");
1791 
1792   // This induction variable is not the phi from the original loop but the
1793   // newly-created IV based on the proof that casted Phi is equal to the
1794   // uncasted Phi in the vectorized loop (under a runtime guard possibly). It
1795   // re-uses the same InductionDescriptor that original IV uses but we don't
1796   // have to do any recording in this case - that is done when original IV is
1797   // processed.
1798   if (isa<TruncInst>(EntryVal))
1799     return;
1800 
1801   const SmallVectorImpl<Instruction *> &Casts = ID.getCastInsts();
1802   if (Casts.empty())
1803     return;
1804   // Only the first Cast instruction in the Casts vector is of interest.
1805   // The rest of the Casts (if exist) have no uses outside the
1806   // induction update chain itself.
1807   Instruction *CastInst = *Casts.begin();
1808   if (Lane < UINT_MAX)
1809     VectorLoopValueMap.setScalarValue(CastInst, {Part, Lane}, VectorLoopVal);
1810   else
1811     VectorLoopValueMap.setVectorValue(CastInst, Part, VectorLoopVal);
1812 }
1813 
1814 void InnerLoopVectorizer::widenIntOrFpInduction(PHINode *IV, TruncInst *Trunc) {
1815   assert((IV->getType()->isIntegerTy() || IV != OldInduction) &&
1816          "Primary induction variable must have an integer type");
1817 
1818   auto II = Legal->getInductionVars().find(IV);
1819   assert(II != Legal->getInductionVars().end() && "IV is not an induction");
1820 
1821   auto ID = II->second;
1822   assert(IV->getType() == ID.getStartValue()->getType() && "Types must match");
1823 
1824   // The value from the original loop to which we are mapping the new induction
1825   // variable.
1826   Instruction *EntryVal = Trunc ? cast<Instruction>(Trunc) : IV;
1827 
1828   auto &DL = OrigLoop->getHeader()->getModule()->getDataLayout();
1829 
1830   // Generate code for the induction step. Note that induction steps are
1831   // required to be loop-invariant
1832   auto CreateStepValue = [&](const SCEV *Step) -> Value * {
1833     assert(PSE.getSE()->isLoopInvariant(Step, OrigLoop) &&
1834            "Induction step should be loop invariant");
1835     if (PSE.getSE()->isSCEVable(IV->getType())) {
1836       SCEVExpander Exp(*PSE.getSE(), DL, "induction");
1837       return Exp.expandCodeFor(Step, Step->getType(),
1838                                LoopVectorPreHeader->getTerminator());
1839     }
1840     return cast<SCEVUnknown>(Step)->getValue();
1841   };
1842 
1843   // The scalar value to broadcast. This is derived from the canonical
1844   // induction variable. If a truncation type is given, truncate the canonical
1845   // induction variable and step. Otherwise, derive these values from the
1846   // induction descriptor.
1847   auto CreateScalarIV = [&](Value *&Step) -> Value * {
1848     Value *ScalarIV = Induction;
1849     if (IV != OldInduction) {
1850       ScalarIV = IV->getType()->isIntegerTy()
1851                      ? Builder.CreateSExtOrTrunc(Induction, IV->getType())
1852                      : Builder.CreateCast(Instruction::SIToFP, Induction,
1853                                           IV->getType());
1854       ScalarIV = emitTransformedIndex(Builder, ScalarIV, PSE.getSE(), DL, ID);
1855       ScalarIV->setName("offset.idx");
1856     }
1857     if (Trunc) {
1858       auto *TruncType = cast<IntegerType>(Trunc->getType());
1859       assert(Step->getType()->isIntegerTy() &&
1860              "Truncation requires an integer step");
1861       ScalarIV = Builder.CreateTrunc(ScalarIV, TruncType);
1862       Step = Builder.CreateTrunc(Step, TruncType);
1863     }
1864     return ScalarIV;
1865   };
1866 
1867   // Create the vector values from the scalar IV, in the absence of creating a
1868   // vector IV.
1869   auto CreateSplatIV = [&](Value *ScalarIV, Value *Step) {
1870     Value *Broadcasted = getBroadcastInstrs(ScalarIV);
1871     for (unsigned Part = 0; Part < UF; ++Part) {
1872       Value *EntryPart =
1873           getStepVector(Broadcasted, VF * Part, Step, ID.getInductionOpcode());
1874       VectorLoopValueMap.setVectorValue(EntryVal, Part, EntryPart);
1875       if (Trunc)
1876         addMetadata(EntryPart, Trunc);
1877       recordVectorLoopValueForInductionCast(ID, EntryVal, EntryPart, Part);
1878     }
1879   };
1880 
1881   // Now do the actual transformations, and start with creating the step value.
1882   Value *Step = CreateStepValue(ID.getStep());
1883   if (VF <= 1) {
1884     Value *ScalarIV = CreateScalarIV(Step);
1885     CreateSplatIV(ScalarIV, Step);
1886     return;
1887   }
1888 
1889   // Determine if we want a scalar version of the induction variable. This is
1890   // true if the induction variable itself is not widened, or if it has at
1891   // least one user in the loop that is not widened.
1892   auto NeedsScalarIV = needsScalarInduction(EntryVal);
1893   if (!NeedsScalarIV) {
1894     createVectorIntOrFpInductionPHI(ID, Step, EntryVal);
1895     return;
1896   }
1897 
1898   // Try to create a new independent vector induction variable. If we can't
1899   // create the phi node, we will splat the scalar induction variable in each
1900   // loop iteration.
1901   if (!shouldScalarizeInstruction(EntryVal)) {
1902     createVectorIntOrFpInductionPHI(ID, Step, EntryVal);
1903     Value *ScalarIV = CreateScalarIV(Step);
1904     // Create scalar steps that can be used by instructions we will later
1905     // scalarize. Note that the addition of the scalar steps will not increase
1906     // the number of instructions in the loop in the common case prior to
1907     // InstCombine. We will be trading one vector extract for each scalar step.
1908     buildScalarSteps(ScalarIV, Step, EntryVal, ID);
1909     return;
1910   }
1911 
1912   // All IV users are scalar instructions, so only emit a scalar IV, not a
1913   // vectorised IV. Except when we tail-fold, then the splat IV feeds the
1914   // predicate used by the masked loads/stores.
1915   Value *ScalarIV = CreateScalarIV(Step);
1916   if (!Cost->isScalarEpilogueAllowed())
1917     CreateSplatIV(ScalarIV, Step);
1918   buildScalarSteps(ScalarIV, Step, EntryVal, ID);
1919 }
1920 
1921 Value *InnerLoopVectorizer::getStepVector(Value *Val, int StartIdx, Value *Step,
1922                                           Instruction::BinaryOps BinOp) {
1923   // Create and check the types.
1924   auto *ValVTy = cast<VectorType>(Val->getType());
1925   int VLen = ValVTy->getNumElements();
1926 
1927   Type *STy = Val->getType()->getScalarType();
1928   assert((STy->isIntegerTy() || STy->isFloatingPointTy()) &&
1929          "Induction Step must be an integer or FP");
1930   assert(Step->getType() == STy && "Step has wrong type");
1931 
1932   SmallVector<Constant *, 8> Indices;
1933 
1934   if (STy->isIntegerTy()) {
1935     // Create a vector of consecutive numbers from zero to VF.
1936     for (int i = 0; i < VLen; ++i)
1937       Indices.push_back(ConstantInt::get(STy, StartIdx + i));
1938 
1939     // Add the consecutive indices to the vector value.
1940     Constant *Cv = ConstantVector::get(Indices);
1941     assert(Cv->getType() == Val->getType() && "Invalid consecutive vec");
1942     Step = Builder.CreateVectorSplat(VLen, Step);
1943     assert(Step->getType() == Val->getType() && "Invalid step vec");
1944     // FIXME: The newly created binary instructions should contain nsw/nuw flags,
1945     // which can be found from the original scalar operations.
1946     Step = Builder.CreateMul(Cv, Step);
1947     return Builder.CreateAdd(Val, Step, "induction");
1948   }
1949 
1950   // Floating point induction.
1951   assert((BinOp == Instruction::FAdd || BinOp == Instruction::FSub) &&
1952          "Binary Opcode should be specified for FP induction");
1953   // Create a vector of consecutive numbers from zero to VF.
1954   for (int i = 0; i < VLen; ++i)
1955     Indices.push_back(ConstantFP::get(STy, (double)(StartIdx + i)));
1956 
1957   // Add the consecutive indices to the vector value.
1958   Constant *Cv = ConstantVector::get(Indices);
1959 
1960   Step = Builder.CreateVectorSplat(VLen, Step);
1961 
1962   // Floating point operations had to be 'fast' to enable the induction.
1963   FastMathFlags Flags;
1964   Flags.setFast();
1965 
1966   Value *MulOp = Builder.CreateFMul(Cv, Step);
1967   if (isa<Instruction>(MulOp))
1968     // Have to check, MulOp may be a constant
1969     cast<Instruction>(MulOp)->setFastMathFlags(Flags);
1970 
1971   Value *BOp = Builder.CreateBinOp(BinOp, Val, MulOp, "induction");
1972   if (isa<Instruction>(BOp))
1973     cast<Instruction>(BOp)->setFastMathFlags(Flags);
1974   return BOp;
1975 }
1976 
1977 void InnerLoopVectorizer::buildScalarSteps(Value *ScalarIV, Value *Step,
1978                                            Instruction *EntryVal,
1979                                            const InductionDescriptor &ID) {
1980   // We shouldn't have to build scalar steps if we aren't vectorizing.
1981   assert(VF > 1 && "VF should be greater than one");
1982 
1983   // Get the value type and ensure it and the step have the same integer type.
1984   Type *ScalarIVTy = ScalarIV->getType()->getScalarType();
1985   assert(ScalarIVTy == Step->getType() &&
1986          "Val and Step should have the same type");
1987 
1988   // We build scalar steps for both integer and floating-point induction
1989   // variables. Here, we determine the kind of arithmetic we will perform.
1990   Instruction::BinaryOps AddOp;
1991   Instruction::BinaryOps MulOp;
1992   if (ScalarIVTy->isIntegerTy()) {
1993     AddOp = Instruction::Add;
1994     MulOp = Instruction::Mul;
1995   } else {
1996     AddOp = ID.getInductionOpcode();
1997     MulOp = Instruction::FMul;
1998   }
1999 
2000   // Determine the number of scalars we need to generate for each unroll
2001   // iteration. If EntryVal is uniform, we only need to generate the first
2002   // lane. Otherwise, we generate all VF values.
2003   unsigned Lanes =
2004       Cost->isUniformAfterVectorization(cast<Instruction>(EntryVal), VF) ? 1
2005                                                                          : VF;
2006   // Compute the scalar steps and save the results in VectorLoopValueMap.
2007   for (unsigned Part = 0; Part < UF; ++Part) {
2008     for (unsigned Lane = 0; Lane < Lanes; ++Lane) {
2009       auto *StartIdx = getSignedIntOrFpConstant(ScalarIVTy, VF * Part + Lane);
2010       auto *Mul = addFastMathFlag(Builder.CreateBinOp(MulOp, StartIdx, Step));
2011       auto *Add = addFastMathFlag(Builder.CreateBinOp(AddOp, ScalarIV, Mul));
2012       VectorLoopValueMap.setScalarValue(EntryVal, {Part, Lane}, Add);
2013       recordVectorLoopValueForInductionCast(ID, EntryVal, Add, Part, Lane);
2014     }
2015   }
2016 }
2017 
2018 Value *InnerLoopVectorizer::getOrCreateVectorValue(Value *V, unsigned Part) {
2019   assert(V != Induction && "The new induction variable should not be used.");
2020   assert(!V->getType()->isVectorTy() && "Can't widen a vector");
2021   assert(!V->getType()->isVoidTy() && "Type does not produce a value");
2022 
2023   // If we have a stride that is replaced by one, do it here. Defer this for
2024   // the VPlan-native path until we start running Legal checks in that path.
2025   if (!EnableVPlanNativePath && Legal->hasStride(V))
2026     V = ConstantInt::get(V->getType(), 1);
2027 
2028   // If we have a vector mapped to this value, return it.
2029   if (VectorLoopValueMap.hasVectorValue(V, Part))
2030     return VectorLoopValueMap.getVectorValue(V, Part);
2031 
2032   // If the value has not been vectorized, check if it has been scalarized
2033   // instead. If it has been scalarized, and we actually need the value in
2034   // vector form, we will construct the vector values on demand.
2035   if (VectorLoopValueMap.hasAnyScalarValue(V)) {
2036     Value *ScalarValue = VectorLoopValueMap.getScalarValue(V, {Part, 0});
2037 
2038     // If we've scalarized a value, that value should be an instruction.
2039     auto *I = cast<Instruction>(V);
2040 
2041     // If we aren't vectorizing, we can just copy the scalar map values over to
2042     // the vector map.
2043     if (VF == 1) {
2044       VectorLoopValueMap.setVectorValue(V, Part, ScalarValue);
2045       return ScalarValue;
2046     }
2047 
2048     // Get the last scalar instruction we generated for V and Part. If the value
2049     // is known to be uniform after vectorization, this corresponds to lane zero
2050     // of the Part unroll iteration. Otherwise, the last instruction is the one
2051     // we created for the last vector lane of the Part unroll iteration.
2052     unsigned LastLane = Cost->isUniformAfterVectorization(I, VF) ? 0 : VF - 1;
2053     auto *LastInst = cast<Instruction>(
2054         VectorLoopValueMap.getScalarValue(V, {Part, LastLane}));
2055 
2056     // Set the insert point after the last scalarized instruction. This ensures
2057     // the insertelement sequence will directly follow the scalar definitions.
2058     auto OldIP = Builder.saveIP();
2059     auto NewIP = std::next(BasicBlock::iterator(LastInst));
2060     Builder.SetInsertPoint(&*NewIP);
2061 
2062     // However, if we are vectorizing, we need to construct the vector values.
2063     // If the value is known to be uniform after vectorization, we can just
2064     // broadcast the scalar value corresponding to lane zero for each unroll
2065     // iteration. Otherwise, we construct the vector values using insertelement
2066     // instructions. Since the resulting vectors are stored in
2067     // VectorLoopValueMap, we will only generate the insertelements once.
2068     Value *VectorValue = nullptr;
2069     if (Cost->isUniformAfterVectorization(I, VF)) {
2070       VectorValue = getBroadcastInstrs(ScalarValue);
2071       VectorLoopValueMap.setVectorValue(V, Part, VectorValue);
2072     } else {
2073       // Initialize packing with insertelements to start from undef.
2074       Value *Undef = UndefValue::get(VectorType::get(V->getType(), VF));
2075       VectorLoopValueMap.setVectorValue(V, Part, Undef);
2076       for (unsigned Lane = 0; Lane < VF; ++Lane)
2077         packScalarIntoVectorValue(V, {Part, Lane});
2078       VectorValue = VectorLoopValueMap.getVectorValue(V, Part);
2079     }
2080     Builder.restoreIP(OldIP);
2081     return VectorValue;
2082   }
2083 
2084   // If this scalar is unknown, assume that it is a constant or that it is
2085   // loop invariant. Broadcast V and save the value for future uses.
2086   Value *B = getBroadcastInstrs(V);
2087   VectorLoopValueMap.setVectorValue(V, Part, B);
2088   return B;
2089 }
2090 
2091 Value *
2092 InnerLoopVectorizer::getOrCreateScalarValue(Value *V,
2093                                             const VPIteration &Instance) {
2094   // If the value is not an instruction contained in the loop, it should
2095   // already be scalar.
2096   if (OrigLoop->isLoopInvariant(V))
2097     return V;
2098 
2099   assert(Instance.Lane > 0
2100              ? !Cost->isUniformAfterVectorization(cast<Instruction>(V), VF)
2101              : true && "Uniform values only have lane zero");
2102 
2103   // If the value from the original loop has not been vectorized, it is
2104   // represented by UF x VF scalar values in the new loop. Return the requested
2105   // scalar value.
2106   if (VectorLoopValueMap.hasScalarValue(V, Instance))
2107     return VectorLoopValueMap.getScalarValue(V, Instance);
2108 
2109   // If the value has not been scalarized, get its entry in VectorLoopValueMap
2110   // for the given unroll part. If this entry is not a vector type (i.e., the
2111   // vectorization factor is one), there is no need to generate an
2112   // extractelement instruction.
2113   auto *U = getOrCreateVectorValue(V, Instance.Part);
2114   if (!U->getType()->isVectorTy()) {
2115     assert(VF == 1 && "Value not scalarized has non-vector type");
2116     return U;
2117   }
2118 
2119   // Otherwise, the value from the original loop has been vectorized and is
2120   // represented by UF vector values. Extract and return the requested scalar
2121   // value from the appropriate vector lane.
2122   return Builder.CreateExtractElement(U, Builder.getInt32(Instance.Lane));
2123 }
2124 
2125 void InnerLoopVectorizer::packScalarIntoVectorValue(
2126     Value *V, const VPIteration &Instance) {
2127   assert(V != Induction && "The new induction variable should not be used.");
2128   assert(!V->getType()->isVectorTy() && "Can't pack a vector");
2129   assert(!V->getType()->isVoidTy() && "Type does not produce a value");
2130 
2131   Value *ScalarInst = VectorLoopValueMap.getScalarValue(V, Instance);
2132   Value *VectorValue = VectorLoopValueMap.getVectorValue(V, Instance.Part);
2133   VectorValue = Builder.CreateInsertElement(VectorValue, ScalarInst,
2134                                             Builder.getInt32(Instance.Lane));
2135   VectorLoopValueMap.resetVectorValue(V, Instance.Part, VectorValue);
2136 }
2137 
2138 Value *InnerLoopVectorizer::reverseVector(Value *Vec) {
2139   assert(Vec->getType()->isVectorTy() && "Invalid type");
2140   SmallVector<int, 8> ShuffleMask;
2141   for (unsigned i = 0; i < VF; ++i)
2142     ShuffleMask.push_back(VF - i - 1);
2143 
2144   return Builder.CreateShuffleVector(Vec, UndefValue::get(Vec->getType()),
2145                                      ShuffleMask, "reverse");
2146 }
2147 
2148 // Return whether we allow using masked interleave-groups (for dealing with
2149 // strided loads/stores that reside in predicated blocks, or for dealing
2150 // with gaps).
2151 static bool useMaskedInterleavedAccesses(const TargetTransformInfo &TTI) {
2152   // If an override option has been passed in for interleaved accesses, use it.
2153   if (EnableMaskedInterleavedMemAccesses.getNumOccurrences() > 0)
2154     return EnableMaskedInterleavedMemAccesses;
2155 
2156   return TTI.enableMaskedInterleavedAccessVectorization();
2157 }
2158 
2159 // Try to vectorize the interleave group that \p Instr belongs to.
2160 //
2161 // E.g. Translate following interleaved load group (factor = 3):
2162 //   for (i = 0; i < N; i+=3) {
2163 //     R = Pic[i];             // Member of index 0
2164 //     G = Pic[i+1];           // Member of index 1
2165 //     B = Pic[i+2];           // Member of index 2
2166 //     ... // do something to R, G, B
2167 //   }
2168 // To:
2169 //   %wide.vec = load <12 x i32>                       ; Read 4 tuples of R,G,B
2170 //   %R.vec = shuffle %wide.vec, undef, <0, 3, 6, 9>   ; R elements
2171 //   %G.vec = shuffle %wide.vec, undef, <1, 4, 7, 10>  ; G elements
2172 //   %B.vec = shuffle %wide.vec, undef, <2, 5, 8, 11>  ; B elements
2173 //
2174 // Or translate following interleaved store group (factor = 3):
2175 //   for (i = 0; i < N; i+=3) {
2176 //     ... do something to R, G, B
2177 //     Pic[i]   = R;           // Member of index 0
2178 //     Pic[i+1] = G;           // Member of index 1
2179 //     Pic[i+2] = B;           // Member of index 2
2180 //   }
2181 // To:
2182 //   %R_G.vec = shuffle %R.vec, %G.vec, <0, 1, 2, ..., 7>
2183 //   %B_U.vec = shuffle %B.vec, undef, <0, 1, 2, 3, u, u, u, u>
2184 //   %interleaved.vec = shuffle %R_G.vec, %B_U.vec,
2185 //        <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11>    ; Interleave R,G,B elements
2186 //   store <12 x i32> %interleaved.vec              ; Write 4 tuples of R,G,B
2187 void InnerLoopVectorizer::vectorizeInterleaveGroup(
2188     const InterleaveGroup<Instruction> *Group, VPTransformState &State,
2189     VPValue *Addr, VPValue *BlockInMask) {
2190   Instruction *Instr = Group->getInsertPos();
2191   const DataLayout &DL = Instr->getModule()->getDataLayout();
2192 
2193   // Prepare for the vector type of the interleaved load/store.
2194   Type *ScalarTy = getMemInstValueType(Instr);
2195   unsigned InterleaveFactor = Group->getFactor();
2196   Type *VecTy = VectorType::get(ScalarTy, InterleaveFactor * VF);
2197 
2198   // Prepare for the new pointers.
2199   SmallVector<Value *, 2> AddrParts;
2200   unsigned Index = Group->getIndex(Instr);
2201 
2202   // TODO: extend the masked interleaved-group support to reversed access.
2203   assert((!BlockInMask || !Group->isReverse()) &&
2204          "Reversed masked interleave-group not supported.");
2205 
2206   // If the group is reverse, adjust the index to refer to the last vector lane
2207   // instead of the first. We adjust the index from the first vector lane,
2208   // rather than directly getting the pointer for lane VF - 1, because the
2209   // pointer operand of the interleaved access is supposed to be uniform. For
2210   // uniform instructions, we're only required to generate a value for the
2211   // first vector lane in each unroll iteration.
2212   if (Group->isReverse())
2213     Index += (VF - 1) * Group->getFactor();
2214 
2215   for (unsigned Part = 0; Part < UF; Part++) {
2216     Value *AddrPart = State.get(Addr, {Part, 0});
2217     setDebugLocFromInst(Builder, AddrPart);
2218 
2219     // Notice current instruction could be any index. Need to adjust the address
2220     // to the member of index 0.
2221     //
2222     // E.g.  a = A[i+1];     // Member of index 1 (Current instruction)
2223     //       b = A[i];       // Member of index 0
2224     // Current pointer is pointed to A[i+1], adjust it to A[i].
2225     //
2226     // E.g.  A[i+1] = a;     // Member of index 1
2227     //       A[i]   = b;     // Member of index 0
2228     //       A[i+2] = c;     // Member of index 2 (Current instruction)
2229     // Current pointer is pointed to A[i+2], adjust it to A[i].
2230 
2231     bool InBounds = false;
2232     if (auto *gep = dyn_cast<GetElementPtrInst>(AddrPart->stripPointerCasts()))
2233       InBounds = gep->isInBounds();
2234     AddrPart = Builder.CreateGEP(ScalarTy, AddrPart, Builder.getInt32(-Index));
2235     cast<GetElementPtrInst>(AddrPart)->setIsInBounds(InBounds);
2236 
2237     // Cast to the vector pointer type.
2238     unsigned AddressSpace = AddrPart->getType()->getPointerAddressSpace();
2239     Type *PtrTy = VecTy->getPointerTo(AddressSpace);
2240     AddrParts.push_back(Builder.CreateBitCast(AddrPart, PtrTy));
2241   }
2242 
2243   setDebugLocFromInst(Builder, Instr);
2244   Value *UndefVec = UndefValue::get(VecTy);
2245 
2246   Value *MaskForGaps = nullptr;
2247   if (Group->requiresScalarEpilogue() && !Cost->isScalarEpilogueAllowed()) {
2248     MaskForGaps = createBitMaskForGaps(Builder, VF, *Group);
2249     assert(MaskForGaps && "Mask for Gaps is required but it is null");
2250   }
2251 
2252   // Vectorize the interleaved load group.
2253   if (isa<LoadInst>(Instr)) {
2254     // For each unroll part, create a wide load for the group.
2255     SmallVector<Value *, 2> NewLoads;
2256     for (unsigned Part = 0; Part < UF; Part++) {
2257       Instruction *NewLoad;
2258       if (BlockInMask || MaskForGaps) {
2259         assert(useMaskedInterleavedAccesses(*TTI) &&
2260                "masked interleaved groups are not allowed.");
2261         Value *GroupMask = MaskForGaps;
2262         if (BlockInMask) {
2263           Value *BlockInMaskPart = State.get(BlockInMask, Part);
2264           auto *Undefs = UndefValue::get(BlockInMaskPart->getType());
2265           Value *ShuffledMask = Builder.CreateShuffleVector(
2266               BlockInMaskPart, Undefs,
2267               createReplicatedMask(InterleaveFactor, VF), "interleaved.mask");
2268           GroupMask = MaskForGaps
2269                           ? Builder.CreateBinOp(Instruction::And, ShuffledMask,
2270                                                 MaskForGaps)
2271                           : ShuffledMask;
2272         }
2273         NewLoad =
2274             Builder.CreateMaskedLoad(AddrParts[Part], Group->getAlign(),
2275                                      GroupMask, UndefVec, "wide.masked.vec");
2276       }
2277       else
2278         NewLoad = Builder.CreateAlignedLoad(VecTy, AddrParts[Part],
2279                                             Group->getAlign(), "wide.vec");
2280       Group->addMetadata(NewLoad);
2281       NewLoads.push_back(NewLoad);
2282     }
2283 
2284     // For each member in the group, shuffle out the appropriate data from the
2285     // wide loads.
2286     for (unsigned I = 0; I < InterleaveFactor; ++I) {
2287       Instruction *Member = Group->getMember(I);
2288 
2289       // Skip the gaps in the group.
2290       if (!Member)
2291         continue;
2292 
2293       auto StrideMask = createStrideMask(I, InterleaveFactor, VF);
2294       for (unsigned Part = 0; Part < UF; Part++) {
2295         Value *StridedVec = Builder.CreateShuffleVector(
2296             NewLoads[Part], UndefVec, StrideMask, "strided.vec");
2297 
2298         // If this member has different type, cast the result type.
2299         if (Member->getType() != ScalarTy) {
2300           VectorType *OtherVTy = VectorType::get(Member->getType(), VF);
2301           StridedVec = createBitOrPointerCast(StridedVec, OtherVTy, DL);
2302         }
2303 
2304         if (Group->isReverse())
2305           StridedVec = reverseVector(StridedVec);
2306 
2307         VectorLoopValueMap.setVectorValue(Member, Part, StridedVec);
2308       }
2309     }
2310     return;
2311   }
2312 
2313   // The sub vector type for current instruction.
2314   VectorType *SubVT = VectorType::get(ScalarTy, VF);
2315 
2316   // Vectorize the interleaved store group.
2317   for (unsigned Part = 0; Part < UF; Part++) {
2318     // Collect the stored vector from each member.
2319     SmallVector<Value *, 4> StoredVecs;
2320     for (unsigned i = 0; i < InterleaveFactor; i++) {
2321       // Interleaved store group doesn't allow a gap, so each index has a member
2322       Instruction *Member = Group->getMember(i);
2323       assert(Member && "Fail to get a member from an interleaved store group");
2324 
2325       Value *StoredVec = getOrCreateVectorValue(
2326           cast<StoreInst>(Member)->getValueOperand(), Part);
2327       if (Group->isReverse())
2328         StoredVec = reverseVector(StoredVec);
2329 
2330       // If this member has different type, cast it to a unified type.
2331 
2332       if (StoredVec->getType() != SubVT)
2333         StoredVec = createBitOrPointerCast(StoredVec, SubVT, DL);
2334 
2335       StoredVecs.push_back(StoredVec);
2336     }
2337 
2338     // Concatenate all vectors into a wide vector.
2339     Value *WideVec = concatenateVectors(Builder, StoredVecs);
2340 
2341     // Interleave the elements in the wide vector.
2342     Value *IVec = Builder.CreateShuffleVector(
2343         WideVec, UndefVec, createInterleaveMask(VF, InterleaveFactor),
2344         "interleaved.vec");
2345 
2346     Instruction *NewStoreInstr;
2347     if (BlockInMask) {
2348       Value *BlockInMaskPart = State.get(BlockInMask, Part);
2349       auto *Undefs = UndefValue::get(BlockInMaskPart->getType());
2350       Value *ShuffledMask = Builder.CreateShuffleVector(
2351           BlockInMaskPart, Undefs, createReplicatedMask(InterleaveFactor, VF),
2352           "interleaved.mask");
2353       NewStoreInstr = Builder.CreateMaskedStore(
2354           IVec, AddrParts[Part], Group->getAlign(), ShuffledMask);
2355     }
2356     else
2357       NewStoreInstr =
2358           Builder.CreateAlignedStore(IVec, AddrParts[Part], Group->getAlign());
2359 
2360     Group->addMetadata(NewStoreInstr);
2361   }
2362 }
2363 
2364 void InnerLoopVectorizer::vectorizeMemoryInstruction(Instruction *Instr,
2365                                                      VPTransformState &State,
2366                                                      VPValue *Addr,
2367                                                      VPValue *StoredValue,
2368                                                      VPValue *BlockInMask) {
2369   // Attempt to issue a wide load.
2370   LoadInst *LI = dyn_cast<LoadInst>(Instr);
2371   StoreInst *SI = dyn_cast<StoreInst>(Instr);
2372 
2373   assert((LI || SI) && "Invalid Load/Store instruction");
2374   assert((!SI || StoredValue) && "No stored value provided for widened store");
2375   assert((!LI || !StoredValue) && "Stored value provided for widened load");
2376 
2377   LoopVectorizationCostModel::InstWidening Decision =
2378       Cost->getWideningDecision(Instr, VF);
2379   assert((Decision == LoopVectorizationCostModel::CM_Widen ||
2380           Decision == LoopVectorizationCostModel::CM_Widen_Reverse ||
2381           Decision == LoopVectorizationCostModel::CM_GatherScatter) &&
2382          "CM decision is not to widen the memory instruction");
2383 
2384   Type *ScalarDataTy = getMemInstValueType(Instr);
2385   Type *DataTy = VectorType::get(ScalarDataTy, VF);
2386   const Align Alignment = getLoadStoreAlignment(Instr);
2387 
2388   // Determine if the pointer operand of the access is either consecutive or
2389   // reverse consecutive.
2390   bool Reverse = (Decision == LoopVectorizationCostModel::CM_Widen_Reverse);
2391   bool ConsecutiveStride =
2392       Reverse || (Decision == LoopVectorizationCostModel::CM_Widen);
2393   bool CreateGatherScatter =
2394       (Decision == LoopVectorizationCostModel::CM_GatherScatter);
2395 
2396   // Either Ptr feeds a vector load/store, or a vector GEP should feed a vector
2397   // gather/scatter. Otherwise Decision should have been to Scalarize.
2398   assert((ConsecutiveStride || CreateGatherScatter) &&
2399          "The instruction should be scalarized");
2400   (void)ConsecutiveStride;
2401 
2402   VectorParts BlockInMaskParts(UF);
2403   bool isMaskRequired = BlockInMask;
2404   if (isMaskRequired)
2405     for (unsigned Part = 0; Part < UF; ++Part)
2406       BlockInMaskParts[Part] = State.get(BlockInMask, Part);
2407 
2408   const auto CreateVecPtr = [&](unsigned Part, Value *Ptr) -> Value * {
2409     // Calculate the pointer for the specific unroll-part.
2410     GetElementPtrInst *PartPtr = nullptr;
2411 
2412     bool InBounds = false;
2413     if (auto *gep = dyn_cast<GetElementPtrInst>(Ptr->stripPointerCasts()))
2414       InBounds = gep->isInBounds();
2415 
2416     if (Reverse) {
2417       // If the address is consecutive but reversed, then the
2418       // wide store needs to start at the last vector element.
2419       PartPtr = cast<GetElementPtrInst>(
2420           Builder.CreateGEP(ScalarDataTy, Ptr, Builder.getInt32(-Part * VF)));
2421       PartPtr->setIsInBounds(InBounds);
2422       PartPtr = cast<GetElementPtrInst>(
2423           Builder.CreateGEP(ScalarDataTy, PartPtr, Builder.getInt32(1 - VF)));
2424       PartPtr->setIsInBounds(InBounds);
2425       if (isMaskRequired) // Reverse of a null all-one mask is a null mask.
2426         BlockInMaskParts[Part] = reverseVector(BlockInMaskParts[Part]);
2427     } else {
2428       PartPtr = cast<GetElementPtrInst>(
2429           Builder.CreateGEP(ScalarDataTy, Ptr, Builder.getInt32(Part * VF)));
2430       PartPtr->setIsInBounds(InBounds);
2431     }
2432 
2433     unsigned AddressSpace = Ptr->getType()->getPointerAddressSpace();
2434     return Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace));
2435   };
2436 
2437   // Handle Stores:
2438   if (SI) {
2439     setDebugLocFromInst(Builder, SI);
2440 
2441     for (unsigned Part = 0; Part < UF; ++Part) {
2442       Instruction *NewSI = nullptr;
2443       Value *StoredVal = State.get(StoredValue, Part);
2444       if (CreateGatherScatter) {
2445         Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr;
2446         Value *VectorGep = State.get(Addr, Part);
2447         NewSI = Builder.CreateMaskedScatter(StoredVal, VectorGep, Alignment,
2448                                             MaskPart);
2449       } else {
2450         if (Reverse) {
2451           // If we store to reverse consecutive memory locations, then we need
2452           // to reverse the order of elements in the stored value.
2453           StoredVal = reverseVector(StoredVal);
2454           // We don't want to update the value in the map as it might be used in
2455           // another expression. So don't call resetVectorValue(StoredVal).
2456         }
2457         auto *VecPtr = CreateVecPtr(Part, State.get(Addr, {0, 0}));
2458         if (isMaskRequired)
2459           NewSI = Builder.CreateMaskedStore(StoredVal, VecPtr, Alignment,
2460                                             BlockInMaskParts[Part]);
2461         else
2462           NewSI = Builder.CreateAlignedStore(StoredVal, VecPtr, Alignment);
2463       }
2464       addMetadata(NewSI, SI);
2465     }
2466     return;
2467   }
2468 
2469   // Handle loads.
2470   assert(LI && "Must have a load instruction");
2471   setDebugLocFromInst(Builder, LI);
2472   for (unsigned Part = 0; Part < UF; ++Part) {
2473     Value *NewLI;
2474     if (CreateGatherScatter) {
2475       Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr;
2476       Value *VectorGep = State.get(Addr, Part);
2477       NewLI = Builder.CreateMaskedGather(VectorGep, Alignment, MaskPart,
2478                                          nullptr, "wide.masked.gather");
2479       addMetadata(NewLI, LI);
2480     } else {
2481       auto *VecPtr = CreateVecPtr(Part, State.get(Addr, {0, 0}));
2482       if (isMaskRequired)
2483         NewLI = Builder.CreateMaskedLoad(
2484             VecPtr, Alignment, BlockInMaskParts[Part], UndefValue::get(DataTy),
2485             "wide.masked.load");
2486       else
2487         NewLI =
2488             Builder.CreateAlignedLoad(DataTy, VecPtr, Alignment, "wide.load");
2489 
2490       // Add metadata to the load, but setVectorValue to the reverse shuffle.
2491       addMetadata(NewLI, LI);
2492       if (Reverse)
2493         NewLI = reverseVector(NewLI);
2494     }
2495     VectorLoopValueMap.setVectorValue(Instr, Part, NewLI);
2496   }
2497 }
2498 
2499 void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr,
2500                                                const VPIteration &Instance,
2501                                                bool IfPredicateInstr) {
2502   assert(!Instr->getType()->isAggregateType() && "Can't handle vectors");
2503 
2504   setDebugLocFromInst(Builder, Instr);
2505 
2506   // Does this instruction return a value ?
2507   bool IsVoidRetTy = Instr->getType()->isVoidTy();
2508 
2509   Instruction *Cloned = Instr->clone();
2510   if (!IsVoidRetTy)
2511     Cloned->setName(Instr->getName() + ".cloned");
2512 
2513   // Replace the operands of the cloned instructions with their scalar
2514   // equivalents in the new loop.
2515   for (unsigned op = 0, e = Instr->getNumOperands(); op != e; ++op) {
2516     auto *NewOp = getOrCreateScalarValue(Instr->getOperand(op), Instance);
2517     Cloned->setOperand(op, NewOp);
2518   }
2519   addNewMetadata(Cloned, Instr);
2520 
2521   // Place the cloned scalar in the new loop.
2522   Builder.Insert(Cloned);
2523 
2524   // Add the cloned scalar to the scalar map entry.
2525   VectorLoopValueMap.setScalarValue(Instr, Instance, Cloned);
2526 
2527   // If we just cloned a new assumption, add it the assumption cache.
2528   if (auto *II = dyn_cast<IntrinsicInst>(Cloned))
2529     if (II->getIntrinsicID() == Intrinsic::assume)
2530       AC->registerAssumption(II);
2531 
2532   // End if-block.
2533   if (IfPredicateInstr)
2534     PredicatedInstructions.push_back(Cloned);
2535 }
2536 
2537 PHINode *InnerLoopVectorizer::createInductionVariable(Loop *L, Value *Start,
2538                                                       Value *End, Value *Step,
2539                                                       Instruction *DL) {
2540   BasicBlock *Header = L->getHeader();
2541   BasicBlock *Latch = L->getLoopLatch();
2542   // As we're just creating this loop, it's possible no latch exists
2543   // yet. If so, use the header as this will be a single block loop.
2544   if (!Latch)
2545     Latch = Header;
2546 
2547   IRBuilder<> Builder(&*Header->getFirstInsertionPt());
2548   Instruction *OldInst = getDebugLocFromInstOrOperands(OldInduction);
2549   setDebugLocFromInst(Builder, OldInst);
2550   auto *Induction = Builder.CreatePHI(Start->getType(), 2, "index");
2551 
2552   Builder.SetInsertPoint(Latch->getTerminator());
2553   setDebugLocFromInst(Builder, OldInst);
2554 
2555   // Create i+1 and fill the PHINode.
2556   Value *Next = Builder.CreateAdd(Induction, Step, "index.next");
2557   Induction->addIncoming(Start, L->getLoopPreheader());
2558   Induction->addIncoming(Next, Latch);
2559   // Create the compare.
2560   Value *ICmp = Builder.CreateICmpEQ(Next, End);
2561   Builder.CreateCondBr(ICmp, L->getExitBlock(), Header);
2562 
2563   // Now we have two terminators. Remove the old one from the block.
2564   Latch->getTerminator()->eraseFromParent();
2565 
2566   return Induction;
2567 }
2568 
2569 Value *InnerLoopVectorizer::getOrCreateTripCount(Loop *L) {
2570   if (TripCount)
2571     return TripCount;
2572 
2573   assert(L && "Create Trip Count for null loop.");
2574   IRBuilder<> Builder(L->getLoopPreheader()->getTerminator());
2575   // Find the loop boundaries.
2576   ScalarEvolution *SE = PSE.getSE();
2577   const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount();
2578   assert(BackedgeTakenCount != SE->getCouldNotCompute() &&
2579          "Invalid loop count");
2580 
2581   Type *IdxTy = Legal->getWidestInductionType();
2582   assert(IdxTy && "No type for induction");
2583 
2584   // The exit count might have the type of i64 while the phi is i32. This can
2585   // happen if we have an induction variable that is sign extended before the
2586   // compare. The only way that we get a backedge taken count is that the
2587   // induction variable was signed and as such will not overflow. In such a case
2588   // truncation is legal.
2589   if (SE->getTypeSizeInBits(BackedgeTakenCount->getType()) >
2590       IdxTy->getPrimitiveSizeInBits())
2591     BackedgeTakenCount = SE->getTruncateOrNoop(BackedgeTakenCount, IdxTy);
2592   BackedgeTakenCount = SE->getNoopOrZeroExtend(BackedgeTakenCount, IdxTy);
2593 
2594   // Get the total trip count from the count by adding 1.
2595   const SCEV *ExitCount = SE->getAddExpr(
2596       BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType()));
2597 
2598   const DataLayout &DL = L->getHeader()->getModule()->getDataLayout();
2599 
2600   // Expand the trip count and place the new instructions in the preheader.
2601   // Notice that the pre-header does not change, only the loop body.
2602   SCEVExpander Exp(*SE, DL, "induction");
2603 
2604   // Count holds the overall loop count (N).
2605   TripCount = Exp.expandCodeFor(ExitCount, ExitCount->getType(),
2606                                 L->getLoopPreheader()->getTerminator());
2607 
2608   if (TripCount->getType()->isPointerTy())
2609     TripCount =
2610         CastInst::CreatePointerCast(TripCount, IdxTy, "exitcount.ptrcnt.to.int",
2611                                     L->getLoopPreheader()->getTerminator());
2612 
2613   return TripCount;
2614 }
2615 
2616 Value *InnerLoopVectorizer::getOrCreateVectorTripCount(Loop *L) {
2617   if (VectorTripCount)
2618     return VectorTripCount;
2619 
2620   Value *TC = getOrCreateTripCount(L);
2621   IRBuilder<> Builder(L->getLoopPreheader()->getTerminator());
2622 
2623   Type *Ty = TC->getType();
2624   Constant *Step = ConstantInt::get(Ty, VF * UF);
2625 
2626   // If the tail is to be folded by masking, round the number of iterations N
2627   // up to a multiple of Step instead of rounding down. This is done by first
2628   // adding Step-1 and then rounding down. Note that it's ok if this addition
2629   // overflows: the vector induction variable will eventually wrap to zero given
2630   // that it starts at zero and its Step is a power of two; the loop will then
2631   // exit, with the last early-exit vector comparison also producing all-true.
2632   if (Cost->foldTailByMasking()) {
2633     assert(isPowerOf2_32(VF * UF) &&
2634            "VF*UF must be a power of 2 when folding tail by masking");
2635     TC = Builder.CreateAdd(TC, ConstantInt::get(Ty, VF * UF - 1), "n.rnd.up");
2636   }
2637 
2638   // Now we need to generate the expression for the part of the loop that the
2639   // vectorized body will execute. This is equal to N - (N % Step) if scalar
2640   // iterations are not required for correctness, or N - Step, otherwise. Step
2641   // is equal to the vectorization factor (number of SIMD elements) times the
2642   // unroll factor (number of SIMD instructions).
2643   Value *R = Builder.CreateURem(TC, Step, "n.mod.vf");
2644 
2645   // If there is a non-reversed interleaved group that may speculatively access
2646   // memory out-of-bounds, we need to ensure that there will be at least one
2647   // iteration of the scalar epilogue loop. Thus, if the step evenly divides
2648   // the trip count, we set the remainder to be equal to the step. If the step
2649   // does not evenly divide the trip count, no adjustment is necessary since
2650   // there will already be scalar iterations. Note that the minimum iterations
2651   // check ensures that N >= Step.
2652   if (VF > 1 && Cost->requiresScalarEpilogue()) {
2653     auto *IsZero = Builder.CreateICmpEQ(R, ConstantInt::get(R->getType(), 0));
2654     R = Builder.CreateSelect(IsZero, Step, R);
2655   }
2656 
2657   VectorTripCount = Builder.CreateSub(TC, R, "n.vec");
2658 
2659   return VectorTripCount;
2660 }
2661 
2662 Value *InnerLoopVectorizer::createBitOrPointerCast(Value *V, VectorType *DstVTy,
2663                                                    const DataLayout &DL) {
2664   // Verify that V is a vector type with same number of elements as DstVTy.
2665   unsigned VF = DstVTy->getNumElements();
2666   VectorType *SrcVecTy = cast<VectorType>(V->getType());
2667   assert((VF == SrcVecTy->getNumElements()) && "Vector dimensions do not match");
2668   Type *SrcElemTy = SrcVecTy->getElementType();
2669   Type *DstElemTy = DstVTy->getElementType();
2670   assert((DL.getTypeSizeInBits(SrcElemTy) == DL.getTypeSizeInBits(DstElemTy)) &&
2671          "Vector elements must have same size");
2672 
2673   // Do a direct cast if element types are castable.
2674   if (CastInst::isBitOrNoopPointerCastable(SrcElemTy, DstElemTy, DL)) {
2675     return Builder.CreateBitOrPointerCast(V, DstVTy);
2676   }
2677   // V cannot be directly casted to desired vector type.
2678   // May happen when V is a floating point vector but DstVTy is a vector of
2679   // pointers or vice-versa. Handle this using a two-step bitcast using an
2680   // intermediate Integer type for the bitcast i.e. Ptr <-> Int <-> Float.
2681   assert((DstElemTy->isPointerTy() != SrcElemTy->isPointerTy()) &&
2682          "Only one type should be a pointer type");
2683   assert((DstElemTy->isFloatingPointTy() != SrcElemTy->isFloatingPointTy()) &&
2684          "Only one type should be a floating point type");
2685   Type *IntTy =
2686       IntegerType::getIntNTy(V->getContext(), DL.getTypeSizeInBits(SrcElemTy));
2687   VectorType *VecIntTy = VectorType::get(IntTy, VF);
2688   Value *CastVal = Builder.CreateBitOrPointerCast(V, VecIntTy);
2689   return Builder.CreateBitOrPointerCast(CastVal, DstVTy);
2690 }
2691 
2692 void InnerLoopVectorizer::emitMinimumIterationCountCheck(Loop *L,
2693                                                          BasicBlock *Bypass) {
2694   Value *Count = getOrCreateTripCount(L);
2695   // Reuse existing vector loop preheader for TC checks.
2696   // Note that new preheader block is generated for vector loop.
2697   BasicBlock *const TCCheckBlock = LoopVectorPreHeader;
2698   IRBuilder<> Builder(TCCheckBlock->getTerminator());
2699 
2700   // Generate code to check if the loop's trip count is less than VF * UF, or
2701   // equal to it in case a scalar epilogue is required; this implies that the
2702   // vector trip count is zero. This check also covers the case where adding one
2703   // to the backedge-taken count overflowed leading to an incorrect trip count
2704   // of zero. In this case we will also jump to the scalar loop.
2705   auto P = Cost->requiresScalarEpilogue() ? ICmpInst::ICMP_ULE
2706                                           : ICmpInst::ICMP_ULT;
2707 
2708   // If tail is to be folded, vector loop takes care of all iterations.
2709   Value *CheckMinIters = Builder.getFalse();
2710   if (!Cost->foldTailByMasking())
2711     CheckMinIters = Builder.CreateICmp(
2712         P, Count, ConstantInt::get(Count->getType(), VF * UF),
2713         "min.iters.check");
2714 
2715   // Create new preheader for vector loop.
2716   LoopVectorPreHeader =
2717       SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(), DT, LI, nullptr,
2718                  "vector.ph");
2719 
2720   assert(DT->properlyDominates(DT->getNode(TCCheckBlock),
2721                                DT->getNode(Bypass)->getIDom()) &&
2722          "TC check is expected to dominate Bypass");
2723 
2724   // Update dominator for Bypass & LoopExit.
2725   DT->changeImmediateDominator(Bypass, TCCheckBlock);
2726   DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock);
2727 
2728   ReplaceInstWithInst(
2729       TCCheckBlock->getTerminator(),
2730       BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters));
2731   LoopBypassBlocks.push_back(TCCheckBlock);
2732 }
2733 
2734 void InnerLoopVectorizer::emitSCEVChecks(Loop *L, BasicBlock *Bypass) {
2735   // Reuse existing vector loop preheader for SCEV checks.
2736   // Note that new preheader block is generated for vector loop.
2737   BasicBlock *const SCEVCheckBlock = LoopVectorPreHeader;
2738 
2739   // Generate the code to check that the SCEV assumptions that we made.
2740   // We want the new basic block to start at the first instruction in a
2741   // sequence of instructions that form a check.
2742   SCEVExpander Exp(*PSE.getSE(), Bypass->getModule()->getDataLayout(),
2743                    "scev.check");
2744   Value *SCEVCheck = Exp.expandCodeForPredicate(
2745       &PSE.getUnionPredicate(), SCEVCheckBlock->getTerminator());
2746 
2747   if (auto *C = dyn_cast<ConstantInt>(SCEVCheck))
2748     if (C->isZero())
2749       return;
2750 
2751   assert(!SCEVCheckBlock->getParent()->hasOptSize() &&
2752          "Cannot SCEV check stride or overflow when optimizing for size");
2753 
2754   SCEVCheckBlock->setName("vector.scevcheck");
2755   // Create new preheader for vector loop.
2756   LoopVectorPreHeader =
2757       SplitBlock(SCEVCheckBlock, SCEVCheckBlock->getTerminator(), DT, LI,
2758                  nullptr, "vector.ph");
2759 
2760   // Update dominator only if this is first RT check.
2761   if (LoopBypassBlocks.empty()) {
2762     DT->changeImmediateDominator(Bypass, SCEVCheckBlock);
2763     DT->changeImmediateDominator(LoopExitBlock, SCEVCheckBlock);
2764   }
2765 
2766   ReplaceInstWithInst(
2767       SCEVCheckBlock->getTerminator(),
2768       BranchInst::Create(Bypass, LoopVectorPreHeader, SCEVCheck));
2769   LoopBypassBlocks.push_back(SCEVCheckBlock);
2770   AddedSafetyChecks = true;
2771 }
2772 
2773 void InnerLoopVectorizer::emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass) {
2774   // VPlan-native path does not do any analysis for runtime checks currently.
2775   if (EnableVPlanNativePath)
2776     return;
2777 
2778   // Reuse existing vector loop preheader for runtime memory checks.
2779   // Note that new preheader block is generated for vector loop.
2780   BasicBlock *const MemCheckBlock = L->getLoopPreheader();
2781 
2782   // Generate the code that checks in runtime if arrays overlap. We put the
2783   // checks into a separate block to make the more common case of few elements
2784   // faster.
2785   auto *LAI = Legal->getLAI();
2786   const auto &RtPtrChecking = *LAI->getRuntimePointerChecking();
2787   if (!RtPtrChecking.Need)
2788     return;
2789   Instruction *FirstCheckInst;
2790   Instruction *MemRuntimeCheck;
2791   std::tie(FirstCheckInst, MemRuntimeCheck) =
2792       addRuntimeChecks(MemCheckBlock->getTerminator(), OrigLoop,
2793                        RtPtrChecking.getChecks(), RtPtrChecking.getSE());
2794   if (!MemRuntimeCheck)
2795     return;
2796 
2797   if (MemCheckBlock->getParent()->hasOptSize()) {
2798     assert(Cost->Hints->getForce() == LoopVectorizeHints::FK_Enabled &&
2799            "Cannot emit memory checks when optimizing for size, unless forced "
2800            "to vectorize.");
2801     ORE->emit([&]() {
2802       return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationCodeSize",
2803                                         L->getStartLoc(), L->getHeader())
2804              << "Code-size may be reduced by not forcing "
2805                 "vectorization, or by source-code modifications "
2806                 "eliminating the need for runtime checks "
2807                 "(e.g., adding 'restrict').";
2808     });
2809   }
2810 
2811   MemCheckBlock->setName("vector.memcheck");
2812   // Create new preheader for vector loop.
2813   LoopVectorPreHeader =
2814       SplitBlock(MemCheckBlock, MemCheckBlock->getTerminator(), DT, LI, nullptr,
2815                  "vector.ph");
2816 
2817   // Update dominator only if this is first RT check.
2818   if (LoopBypassBlocks.empty()) {
2819     DT->changeImmediateDominator(Bypass, MemCheckBlock);
2820     DT->changeImmediateDominator(LoopExitBlock, MemCheckBlock);
2821   }
2822 
2823   ReplaceInstWithInst(
2824       MemCheckBlock->getTerminator(),
2825       BranchInst::Create(Bypass, LoopVectorPreHeader, MemRuntimeCheck));
2826   LoopBypassBlocks.push_back(MemCheckBlock);
2827   AddedSafetyChecks = true;
2828 
2829   // We currently don't use LoopVersioning for the actual loop cloning but we
2830   // still use it to add the noalias metadata.
2831   LVer = std::make_unique<LoopVersioning>(*Legal->getLAI(), OrigLoop, LI, DT,
2832                                           PSE.getSE());
2833   LVer->prepareNoAliasMetadata();
2834 }
2835 
2836 Value *InnerLoopVectorizer::emitTransformedIndex(
2837     IRBuilder<> &B, Value *Index, ScalarEvolution *SE, const DataLayout &DL,
2838     const InductionDescriptor &ID) const {
2839 
2840   SCEVExpander Exp(*SE, DL, "induction");
2841   auto Step = ID.getStep();
2842   auto StartValue = ID.getStartValue();
2843   assert(Index->getType() == Step->getType() &&
2844          "Index type does not match StepValue type");
2845 
2846   // Note: the IR at this point is broken. We cannot use SE to create any new
2847   // SCEV and then expand it, hoping that SCEV's simplification will give us
2848   // a more optimal code. Unfortunately, attempt of doing so on invalid IR may
2849   // lead to various SCEV crashes. So all we can do is to use builder and rely
2850   // on InstCombine for future simplifications. Here we handle some trivial
2851   // cases only.
2852   auto CreateAdd = [&B](Value *X, Value *Y) {
2853     assert(X->getType() == Y->getType() && "Types don't match!");
2854     if (auto *CX = dyn_cast<ConstantInt>(X))
2855       if (CX->isZero())
2856         return Y;
2857     if (auto *CY = dyn_cast<ConstantInt>(Y))
2858       if (CY->isZero())
2859         return X;
2860     return B.CreateAdd(X, Y);
2861   };
2862 
2863   auto CreateMul = [&B](Value *X, Value *Y) {
2864     assert(X->getType() == Y->getType() && "Types don't match!");
2865     if (auto *CX = dyn_cast<ConstantInt>(X))
2866       if (CX->isOne())
2867         return Y;
2868     if (auto *CY = dyn_cast<ConstantInt>(Y))
2869       if (CY->isOne())
2870         return X;
2871     return B.CreateMul(X, Y);
2872   };
2873 
2874   switch (ID.getKind()) {
2875   case InductionDescriptor::IK_IntInduction: {
2876     assert(Index->getType() == StartValue->getType() &&
2877            "Index type does not match StartValue type");
2878     if (ID.getConstIntStepValue() && ID.getConstIntStepValue()->isMinusOne())
2879       return B.CreateSub(StartValue, Index);
2880     auto *Offset = CreateMul(
2881         Index, Exp.expandCodeFor(Step, Index->getType(), &*B.GetInsertPoint()));
2882     return CreateAdd(StartValue, Offset);
2883   }
2884   case InductionDescriptor::IK_PtrInduction: {
2885     assert(isa<SCEVConstant>(Step) &&
2886            "Expected constant step for pointer induction");
2887     return B.CreateGEP(
2888         StartValue->getType()->getPointerElementType(), StartValue,
2889         CreateMul(Index, Exp.expandCodeFor(Step, Index->getType(),
2890                                            &*B.GetInsertPoint())));
2891   }
2892   case InductionDescriptor::IK_FpInduction: {
2893     assert(Step->getType()->isFloatingPointTy() && "Expected FP Step value");
2894     auto InductionBinOp = ID.getInductionBinOp();
2895     assert(InductionBinOp &&
2896            (InductionBinOp->getOpcode() == Instruction::FAdd ||
2897             InductionBinOp->getOpcode() == Instruction::FSub) &&
2898            "Original bin op should be defined for FP induction");
2899 
2900     Value *StepValue = cast<SCEVUnknown>(Step)->getValue();
2901 
2902     // Floating point operations had to be 'fast' to enable the induction.
2903     FastMathFlags Flags;
2904     Flags.setFast();
2905 
2906     Value *MulExp = B.CreateFMul(StepValue, Index);
2907     if (isa<Instruction>(MulExp))
2908       // We have to check, the MulExp may be a constant.
2909       cast<Instruction>(MulExp)->setFastMathFlags(Flags);
2910 
2911     Value *BOp = B.CreateBinOp(InductionBinOp->getOpcode(), StartValue, MulExp,
2912                                "induction");
2913     if (isa<Instruction>(BOp))
2914       cast<Instruction>(BOp)->setFastMathFlags(Flags);
2915 
2916     return BOp;
2917   }
2918   case InductionDescriptor::IK_NoInduction:
2919     return nullptr;
2920   }
2921   llvm_unreachable("invalid enum");
2922 }
2923 
2924 BasicBlock *InnerLoopVectorizer::createVectorizedLoopSkeleton() {
2925   /*
2926    In this function we generate a new loop. The new loop will contain
2927    the vectorized instructions while the old loop will continue to run the
2928    scalar remainder.
2929 
2930        [ ] <-- loop iteration number check.
2931     /   |
2932    /    v
2933   |    [ ] <-- vector loop bypass (may consist of multiple blocks).
2934   |  /  |
2935   | /   v
2936   ||   [ ]     <-- vector pre header.
2937   |/    |
2938   |     v
2939   |    [  ] \
2940   |    [  ]_|   <-- vector loop.
2941   |     |
2942   |     v
2943   |   -[ ]   <--- middle-block.
2944   |  /  |
2945   | /   v
2946   -|- >[ ]     <--- new preheader.
2947    |    |
2948    |    v
2949    |   [ ] \
2950    |   [ ]_|   <-- old scalar loop to handle remainder.
2951     \   |
2952      \  v
2953       >[ ]     <-- exit block.
2954    ...
2955    */
2956 
2957   MDNode *OrigLoopID = OrigLoop->getLoopID();
2958 
2959   // Some loops have a single integer induction variable, while other loops
2960   // don't. One example is c++ iterators that often have multiple pointer
2961   // induction variables. In the code below we also support a case where we
2962   // don't have a single induction variable.
2963   //
2964   // We try to obtain an induction variable from the original loop as hard
2965   // as possible. However if we don't find one that:
2966   //   - is an integer
2967   //   - counts from zero, stepping by one
2968   //   - is the size of the widest induction variable type
2969   // then we create a new one.
2970   OldInduction = Legal->getPrimaryInduction();
2971   Type *IdxTy = Legal->getWidestInductionType();
2972 
2973   // Split the single block loop into the two loop structure described above.
2974   LoopScalarBody = OrigLoop->getHeader();
2975   LoopVectorPreHeader = OrigLoop->getLoopPreheader();
2976   LoopExitBlock = OrigLoop->getExitBlock();
2977   assert(LoopExitBlock && "Must have an exit block");
2978   assert(LoopVectorPreHeader && "Invalid loop structure");
2979 
2980   LoopMiddleBlock =
2981       SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT,
2982                  LI, nullptr, "middle.block");
2983   LoopScalarPreHeader =
2984       SplitBlock(LoopMiddleBlock, LoopMiddleBlock->getTerminator(), DT, LI,
2985                  nullptr, "scalar.ph");
2986   // We intentionally don't let SplitBlock to update LoopInfo since
2987   // LoopVectorBody should belong to another loop than LoopVectorPreHeader.
2988   // LoopVectorBody is explicitly added to the correct place few lines later.
2989   LoopVectorBody =
2990       SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT,
2991                  nullptr, nullptr, "vector.body");
2992 
2993   // Update dominator for loop exit.
2994   DT->changeImmediateDominator(LoopExitBlock, LoopMiddleBlock);
2995 
2996   // Create and register the new vector loop.
2997   Loop *Lp = LI->AllocateLoop();
2998   Loop *ParentLoop = OrigLoop->getParentLoop();
2999 
3000   // Insert the new loop into the loop nest and register the new basic blocks
3001   // before calling any utilities such as SCEV that require valid LoopInfo.
3002   if (ParentLoop) {
3003     ParentLoop->addChildLoop(Lp);
3004   } else {
3005     LI->addTopLevelLoop(Lp);
3006   }
3007   Lp->addBasicBlockToLoop(LoopVectorBody, *LI);
3008 
3009   // Find the loop boundaries.
3010   Value *Count = getOrCreateTripCount(Lp);
3011 
3012   Value *StartIdx = ConstantInt::get(IdxTy, 0);
3013 
3014   // Now, compare the new count to zero. If it is zero skip the vector loop and
3015   // jump to the scalar loop. This check also covers the case where the
3016   // backedge-taken count is uint##_max: adding one to it will overflow leading
3017   // to an incorrect trip count of zero. In this (rare) case we will also jump
3018   // to the scalar loop.
3019   emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader);
3020 
3021   // Generate the code to check any assumptions that we've made for SCEV
3022   // expressions.
3023   emitSCEVChecks(Lp, LoopScalarPreHeader);
3024 
3025   // Generate the code that checks in runtime if arrays overlap. We put the
3026   // checks into a separate block to make the more common case of few elements
3027   // faster.
3028   emitMemRuntimeChecks(Lp, LoopScalarPreHeader);
3029 
3030   // Generate the induction variable.
3031   // The loop step is equal to the vectorization factor (num of SIMD elements)
3032   // times the unroll factor (num of SIMD instructions).
3033   Value *CountRoundDown = getOrCreateVectorTripCount(Lp);
3034   Constant *Step = ConstantInt::get(IdxTy, VF * UF);
3035   Induction =
3036       createInductionVariable(Lp, StartIdx, CountRoundDown, Step,
3037                               getDebugLocFromInstOrOperands(OldInduction));
3038 
3039   // We are going to resume the execution of the scalar loop.
3040   // Go over all of the induction variables that we found and fix the
3041   // PHIs that are left in the scalar version of the loop.
3042   // The starting values of PHI nodes depend on the counter of the last
3043   // iteration in the vectorized loop.
3044   // If we come from a bypass edge then we need to start from the original
3045   // start value.
3046 
3047   // This variable saves the new starting index for the scalar loop. It is used
3048   // to test if there are any tail iterations left once the vector loop has
3049   // completed.
3050   for (auto &InductionEntry : Legal->getInductionVars()) {
3051     PHINode *OrigPhi = InductionEntry.first;
3052     InductionDescriptor II = InductionEntry.second;
3053 
3054     // Create phi nodes to merge from the  backedge-taken check block.
3055     PHINode *BCResumeVal =
3056         PHINode::Create(OrigPhi->getType(), 3, "bc.resume.val",
3057                         LoopScalarPreHeader->getTerminator());
3058     // Copy original phi DL over to the new one.
3059     BCResumeVal->setDebugLoc(OrigPhi->getDebugLoc());
3060     Value *&EndValue = IVEndValues[OrigPhi];
3061     if (OrigPhi == OldInduction) {
3062       // We know what the end value is.
3063       EndValue = CountRoundDown;
3064     } else {
3065       IRBuilder<> B(Lp->getLoopPreheader()->getTerminator());
3066       Type *StepType = II.getStep()->getType();
3067       Instruction::CastOps CastOp =
3068           CastInst::getCastOpcode(CountRoundDown, true, StepType, true);
3069       Value *CRD = B.CreateCast(CastOp, CountRoundDown, StepType, "cast.crd");
3070       const DataLayout &DL = LoopScalarBody->getModule()->getDataLayout();
3071       EndValue = emitTransformedIndex(B, CRD, PSE.getSE(), DL, II);
3072       EndValue->setName("ind.end");
3073     }
3074 
3075     // The new PHI merges the original incoming value, in case of a bypass,
3076     // or the value at the end of the vectorized loop.
3077     BCResumeVal->addIncoming(EndValue, LoopMiddleBlock);
3078 
3079     // Fix the scalar body counter (PHI node).
3080     // The old induction's phi node in the scalar body needs the truncated
3081     // value.
3082     for (BasicBlock *BB : LoopBypassBlocks)
3083       BCResumeVal->addIncoming(II.getStartValue(), BB);
3084     OrigPhi->setIncomingValueForBlock(LoopScalarPreHeader, BCResumeVal);
3085   }
3086 
3087   // We need the OrigLoop (scalar loop part) latch terminator to help
3088   // produce correct debug info for the middle block BB instructions.
3089   // The legality check stage guarantees that the loop will have a single
3090   // latch.
3091   assert(isa<BranchInst>(OrigLoop->getLoopLatch()->getTerminator()) &&
3092          "Scalar loop latch terminator isn't a branch");
3093   BranchInst *ScalarLatchBr =
3094       cast<BranchInst>(OrigLoop->getLoopLatch()->getTerminator());
3095 
3096   // Add a check in the middle block to see if we have completed
3097   // all of the iterations in the first vector loop.
3098   // If (N - N%VF) == N, then we *don't* need to run the remainder.
3099   // If tail is to be folded, we know we don't need to run the remainder.
3100   Value *CmpN = Builder.getTrue();
3101   if (!Cost->foldTailByMasking()) {
3102     CmpN = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ, Count,
3103                            CountRoundDown, "cmp.n",
3104                            LoopMiddleBlock->getTerminator());
3105 
3106     // Here we use the same DebugLoc as the scalar loop latch branch instead
3107     // of the corresponding compare because they may have ended up with
3108     // different line numbers and we want to avoid awkward line stepping while
3109     // debugging. Eg. if the compare has got a line number inside the loop.
3110     cast<Instruction>(CmpN)->setDebugLoc(ScalarLatchBr->getDebugLoc());
3111   }
3112 
3113   BranchInst *BrInst =
3114       BranchInst::Create(LoopExitBlock, LoopScalarPreHeader, CmpN);
3115   BrInst->setDebugLoc(ScalarLatchBr->getDebugLoc());
3116   ReplaceInstWithInst(LoopMiddleBlock->getTerminator(), BrInst);
3117 
3118   // Get ready to start creating new instructions into the vectorized body.
3119   assert(LoopVectorPreHeader == Lp->getLoopPreheader() &&
3120          "Inconsistent vector loop preheader");
3121   Builder.SetInsertPoint(&*LoopVectorBody->getFirstInsertionPt());
3122 
3123   Optional<MDNode *> VectorizedLoopID =
3124       makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll,
3125                                       LLVMLoopVectorizeFollowupVectorized});
3126   if (VectorizedLoopID.hasValue()) {
3127     Lp->setLoopID(VectorizedLoopID.getValue());
3128 
3129     // Do not setAlreadyVectorized if loop attributes have been defined
3130     // explicitly.
3131     return LoopVectorPreHeader;
3132   }
3133 
3134   // Keep all loop hints from the original loop on the vector loop (we'll
3135   // replace the vectorizer-specific hints below).
3136   if (MDNode *LID = OrigLoop->getLoopID())
3137     Lp->setLoopID(LID);
3138 
3139   LoopVectorizeHints Hints(Lp, true, *ORE);
3140   Hints.setAlreadyVectorized();
3141 
3142 #ifdef EXPENSIVE_CHECKS
3143   assert(DT->verify(DominatorTree::VerificationLevel::Fast));
3144   LI->verify(*DT);
3145 #endif
3146 
3147   return LoopVectorPreHeader;
3148 }
3149 
3150 // Fix up external users of the induction variable. At this point, we are
3151 // in LCSSA form, with all external PHIs that use the IV having one input value,
3152 // coming from the remainder loop. We need those PHIs to also have a correct
3153 // value for the IV when arriving directly from the middle block.
3154 void InnerLoopVectorizer::fixupIVUsers(PHINode *OrigPhi,
3155                                        const InductionDescriptor &II,
3156                                        Value *CountRoundDown, Value *EndValue,
3157                                        BasicBlock *MiddleBlock) {
3158   // There are two kinds of external IV usages - those that use the value
3159   // computed in the last iteration (the PHI) and those that use the penultimate
3160   // value (the value that feeds into the phi from the loop latch).
3161   // We allow both, but they, obviously, have different values.
3162 
3163   assert(OrigLoop->getExitBlock() && "Expected a single exit block");
3164 
3165   DenseMap<Value *, Value *> MissingVals;
3166 
3167   // An external user of the last iteration's value should see the value that
3168   // the remainder loop uses to initialize its own IV.
3169   Value *PostInc = OrigPhi->getIncomingValueForBlock(OrigLoop->getLoopLatch());
3170   for (User *U : PostInc->users()) {
3171     Instruction *UI = cast<Instruction>(U);
3172     if (!OrigLoop->contains(UI)) {
3173       assert(isa<PHINode>(UI) && "Expected LCSSA form");
3174       MissingVals[UI] = EndValue;
3175     }
3176   }
3177 
3178   // An external user of the penultimate value need to see EndValue - Step.
3179   // The simplest way to get this is to recompute it from the constituent SCEVs,
3180   // that is Start + (Step * (CRD - 1)).
3181   for (User *U : OrigPhi->users()) {
3182     auto *UI = cast<Instruction>(U);
3183     if (!OrigLoop->contains(UI)) {
3184       const DataLayout &DL =
3185           OrigLoop->getHeader()->getModule()->getDataLayout();
3186       assert(isa<PHINode>(UI) && "Expected LCSSA form");
3187 
3188       IRBuilder<> B(MiddleBlock->getTerminator());
3189       Value *CountMinusOne = B.CreateSub(
3190           CountRoundDown, ConstantInt::get(CountRoundDown->getType(), 1));
3191       Value *CMO =
3192           !II.getStep()->getType()->isIntegerTy()
3193               ? B.CreateCast(Instruction::SIToFP, CountMinusOne,
3194                              II.getStep()->getType())
3195               : B.CreateSExtOrTrunc(CountMinusOne, II.getStep()->getType());
3196       CMO->setName("cast.cmo");
3197       Value *Escape = emitTransformedIndex(B, CMO, PSE.getSE(), DL, II);
3198       Escape->setName("ind.escape");
3199       MissingVals[UI] = Escape;
3200     }
3201   }
3202 
3203   for (auto &I : MissingVals) {
3204     PHINode *PHI = cast<PHINode>(I.first);
3205     // One corner case we have to handle is two IVs "chasing" each-other,
3206     // that is %IV2 = phi [...], [ %IV1, %latch ]
3207     // In this case, if IV1 has an external use, we need to avoid adding both
3208     // "last value of IV1" and "penultimate value of IV2". So, verify that we
3209     // don't already have an incoming value for the middle block.
3210     if (PHI->getBasicBlockIndex(MiddleBlock) == -1)
3211       PHI->addIncoming(I.second, MiddleBlock);
3212   }
3213 }
3214 
3215 namespace {
3216 
3217 struct CSEDenseMapInfo {
3218   static bool canHandle(const Instruction *I) {
3219     return isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) ||
3220            isa<ShuffleVectorInst>(I) || isa<GetElementPtrInst>(I);
3221   }
3222 
3223   static inline Instruction *getEmptyKey() {
3224     return DenseMapInfo<Instruction *>::getEmptyKey();
3225   }
3226 
3227   static inline Instruction *getTombstoneKey() {
3228     return DenseMapInfo<Instruction *>::getTombstoneKey();
3229   }
3230 
3231   static unsigned getHashValue(const Instruction *I) {
3232     assert(canHandle(I) && "Unknown instruction!");
3233     return hash_combine(I->getOpcode(), hash_combine_range(I->value_op_begin(),
3234                                                            I->value_op_end()));
3235   }
3236 
3237   static bool isEqual(const Instruction *LHS, const Instruction *RHS) {
3238     if (LHS == getEmptyKey() || RHS == getEmptyKey() ||
3239         LHS == getTombstoneKey() || RHS == getTombstoneKey())
3240       return LHS == RHS;
3241     return LHS->isIdenticalTo(RHS);
3242   }
3243 };
3244 
3245 } // end anonymous namespace
3246 
3247 ///Perform cse of induction variable instructions.
3248 static void cse(BasicBlock *BB) {
3249   // Perform simple cse.
3250   SmallDenseMap<Instruction *, Instruction *, 4, CSEDenseMapInfo> CSEMap;
3251   for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) {
3252     Instruction *In = &*I++;
3253 
3254     if (!CSEDenseMapInfo::canHandle(In))
3255       continue;
3256 
3257     // Check if we can replace this instruction with any of the
3258     // visited instructions.
3259     if (Instruction *V = CSEMap.lookup(In)) {
3260       In->replaceAllUsesWith(V);
3261       In->eraseFromParent();
3262       continue;
3263     }
3264 
3265     CSEMap[In] = In;
3266   }
3267 }
3268 
3269 unsigned LoopVectorizationCostModel::getVectorCallCost(CallInst *CI,
3270                                                        unsigned VF,
3271                                                        bool &NeedToScalarize) {
3272   Function *F = CI->getCalledFunction();
3273   Type *ScalarRetTy = CI->getType();
3274   SmallVector<Type *, 4> Tys, ScalarTys;
3275   for (auto &ArgOp : CI->arg_operands())
3276     ScalarTys.push_back(ArgOp->getType());
3277 
3278   // Estimate cost of scalarized vector call. The source operands are assumed
3279   // to be vectors, so we need to extract individual elements from there,
3280   // execute VF scalar calls, and then gather the result into the vector return
3281   // value.
3282   unsigned ScalarCallCost = TTI.getCallInstrCost(F, ScalarRetTy, ScalarTys,
3283                                                  TTI::TCK_RecipThroughput);
3284   if (VF == 1)
3285     return ScalarCallCost;
3286 
3287   // Compute corresponding vector type for return value and arguments.
3288   Type *RetTy = ToVectorTy(ScalarRetTy, VF);
3289   for (Type *ScalarTy : ScalarTys)
3290     Tys.push_back(ToVectorTy(ScalarTy, VF));
3291 
3292   // Compute costs of unpacking argument values for the scalar calls and
3293   // packing the return values to a vector.
3294   unsigned ScalarizationCost = getScalarizationOverhead(CI, VF);
3295 
3296   unsigned Cost = ScalarCallCost * VF + ScalarizationCost;
3297 
3298   // If we can't emit a vector call for this function, then the currently found
3299   // cost is the cost we need to return.
3300   NeedToScalarize = true;
3301   VFShape Shape = VFShape::get(*CI, {VF, false}, false /*HasGlobalPred*/);
3302   Function *VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape);
3303 
3304   if (!TLI || CI->isNoBuiltin() || !VecFunc)
3305     return Cost;
3306 
3307   // If the corresponding vector cost is cheaper, return its cost.
3308   unsigned VectorCallCost = TTI.getCallInstrCost(nullptr, RetTy, Tys,
3309                                                  TTI::TCK_RecipThroughput);
3310   if (VectorCallCost < Cost) {
3311     NeedToScalarize = false;
3312     return VectorCallCost;
3313   }
3314   return Cost;
3315 }
3316 
3317 unsigned LoopVectorizationCostModel::getVectorIntrinsicCost(CallInst *CI,
3318                                                             unsigned VF) {
3319   Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
3320   assert(ID && "Expected intrinsic call!");
3321 
3322   FastMathFlags FMF;
3323   if (auto *FPMO = dyn_cast<FPMathOperator>(CI))
3324     FMF = FPMO->getFastMathFlags();
3325 
3326   SmallVector<Value *, 4> Operands(CI->arg_operands());
3327   return TTI.getIntrinsicInstrCost(ID, CI->getType(), Operands, FMF, VF,
3328                                    TargetTransformInfo::TCK_RecipThroughput,
3329                                    CI);
3330 }
3331 
3332 static Type *smallestIntegerVectorType(Type *T1, Type *T2) {
3333   auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType());
3334   auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType());
3335   return I1->getBitWidth() < I2->getBitWidth() ? T1 : T2;
3336 }
3337 
3338 static Type *largestIntegerVectorType(Type *T1, Type *T2) {
3339   auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType());
3340   auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType());
3341   return I1->getBitWidth() > I2->getBitWidth() ? T1 : T2;
3342 }
3343 
3344 void InnerLoopVectorizer::truncateToMinimalBitwidths() {
3345   // For every instruction `I` in MinBWs, truncate the operands, create a
3346   // truncated version of `I` and reextend its result. InstCombine runs
3347   // later and will remove any ext/trunc pairs.
3348   SmallPtrSet<Value *, 4> Erased;
3349   for (const auto &KV : Cost->getMinimalBitwidths()) {
3350     // If the value wasn't vectorized, we must maintain the original scalar
3351     // type. The absence of the value from VectorLoopValueMap indicates that it
3352     // wasn't vectorized.
3353     if (!VectorLoopValueMap.hasAnyVectorValue(KV.first))
3354       continue;
3355     for (unsigned Part = 0; Part < UF; ++Part) {
3356       Value *I = getOrCreateVectorValue(KV.first, Part);
3357       if (Erased.find(I) != Erased.end() || I->use_empty() ||
3358           !isa<Instruction>(I))
3359         continue;
3360       Type *OriginalTy = I->getType();
3361       Type *ScalarTruncatedTy =
3362           IntegerType::get(OriginalTy->getContext(), KV.second);
3363       Type *TruncatedTy = VectorType::get(
3364           ScalarTruncatedTy, cast<VectorType>(OriginalTy)->getNumElements());
3365       if (TruncatedTy == OriginalTy)
3366         continue;
3367 
3368       IRBuilder<> B(cast<Instruction>(I));
3369       auto ShrinkOperand = [&](Value *V) -> Value * {
3370         if (auto *ZI = dyn_cast<ZExtInst>(V))
3371           if (ZI->getSrcTy() == TruncatedTy)
3372             return ZI->getOperand(0);
3373         return B.CreateZExtOrTrunc(V, TruncatedTy);
3374       };
3375 
3376       // The actual instruction modification depends on the instruction type,
3377       // unfortunately.
3378       Value *NewI = nullptr;
3379       if (auto *BO = dyn_cast<BinaryOperator>(I)) {
3380         NewI = B.CreateBinOp(BO->getOpcode(), ShrinkOperand(BO->getOperand(0)),
3381                              ShrinkOperand(BO->getOperand(1)));
3382 
3383         // Any wrapping introduced by shrinking this operation shouldn't be
3384         // considered undefined behavior. So, we can't unconditionally copy
3385         // arithmetic wrapping flags to NewI.
3386         cast<BinaryOperator>(NewI)->copyIRFlags(I, /*IncludeWrapFlags=*/false);
3387       } else if (auto *CI = dyn_cast<ICmpInst>(I)) {
3388         NewI =
3389             B.CreateICmp(CI->getPredicate(), ShrinkOperand(CI->getOperand(0)),
3390                          ShrinkOperand(CI->getOperand(1)));
3391       } else if (auto *SI = dyn_cast<SelectInst>(I)) {
3392         NewI = B.CreateSelect(SI->getCondition(),
3393                               ShrinkOperand(SI->getTrueValue()),
3394                               ShrinkOperand(SI->getFalseValue()));
3395       } else if (auto *CI = dyn_cast<CastInst>(I)) {
3396         switch (CI->getOpcode()) {
3397         default:
3398           llvm_unreachable("Unhandled cast!");
3399         case Instruction::Trunc:
3400           NewI = ShrinkOperand(CI->getOperand(0));
3401           break;
3402         case Instruction::SExt:
3403           NewI = B.CreateSExtOrTrunc(
3404               CI->getOperand(0),
3405               smallestIntegerVectorType(OriginalTy, TruncatedTy));
3406           break;
3407         case Instruction::ZExt:
3408           NewI = B.CreateZExtOrTrunc(
3409               CI->getOperand(0),
3410               smallestIntegerVectorType(OriginalTy, TruncatedTy));
3411           break;
3412         }
3413       } else if (auto *SI = dyn_cast<ShuffleVectorInst>(I)) {
3414         auto Elements0 =
3415             cast<VectorType>(SI->getOperand(0)->getType())->getNumElements();
3416         auto *O0 = B.CreateZExtOrTrunc(
3417             SI->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements0));
3418         auto Elements1 =
3419             cast<VectorType>(SI->getOperand(1)->getType())->getNumElements();
3420         auto *O1 = B.CreateZExtOrTrunc(
3421             SI->getOperand(1), VectorType::get(ScalarTruncatedTy, Elements1));
3422 
3423         NewI = B.CreateShuffleVector(O0, O1, SI->getShuffleMask());
3424       } else if (isa<LoadInst>(I) || isa<PHINode>(I)) {
3425         // Don't do anything with the operands, just extend the result.
3426         continue;
3427       } else if (auto *IE = dyn_cast<InsertElementInst>(I)) {
3428         auto Elements =
3429             cast<VectorType>(IE->getOperand(0)->getType())->getNumElements();
3430         auto *O0 = B.CreateZExtOrTrunc(
3431             IE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements));
3432         auto *O1 = B.CreateZExtOrTrunc(IE->getOperand(1), ScalarTruncatedTy);
3433         NewI = B.CreateInsertElement(O0, O1, IE->getOperand(2));
3434       } else if (auto *EE = dyn_cast<ExtractElementInst>(I)) {
3435         auto Elements =
3436             cast<VectorType>(EE->getOperand(0)->getType())->getNumElements();
3437         auto *O0 = B.CreateZExtOrTrunc(
3438             EE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements));
3439         NewI = B.CreateExtractElement(O0, EE->getOperand(2));
3440       } else {
3441         // If we don't know what to do, be conservative and don't do anything.
3442         continue;
3443       }
3444 
3445       // Lastly, extend the result.
3446       NewI->takeName(cast<Instruction>(I));
3447       Value *Res = B.CreateZExtOrTrunc(NewI, OriginalTy);
3448       I->replaceAllUsesWith(Res);
3449       cast<Instruction>(I)->eraseFromParent();
3450       Erased.insert(I);
3451       VectorLoopValueMap.resetVectorValue(KV.first, Part, Res);
3452     }
3453   }
3454 
3455   // We'll have created a bunch of ZExts that are now parentless. Clean up.
3456   for (const auto &KV : Cost->getMinimalBitwidths()) {
3457     // If the value wasn't vectorized, we must maintain the original scalar
3458     // type. The absence of the value from VectorLoopValueMap indicates that it
3459     // wasn't vectorized.
3460     if (!VectorLoopValueMap.hasAnyVectorValue(KV.first))
3461       continue;
3462     for (unsigned Part = 0; Part < UF; ++Part) {
3463       Value *I = getOrCreateVectorValue(KV.first, Part);
3464       ZExtInst *Inst = dyn_cast<ZExtInst>(I);
3465       if (Inst && Inst->use_empty()) {
3466         Value *NewI = Inst->getOperand(0);
3467         Inst->eraseFromParent();
3468         VectorLoopValueMap.resetVectorValue(KV.first, Part, NewI);
3469       }
3470     }
3471   }
3472 }
3473 
3474 void InnerLoopVectorizer::fixVectorizedLoop() {
3475   // Insert truncates and extends for any truncated instructions as hints to
3476   // InstCombine.
3477   if (VF > 1)
3478     truncateToMinimalBitwidths();
3479 
3480   // Fix widened non-induction PHIs by setting up the PHI operands.
3481   if (OrigPHIsToFix.size()) {
3482     assert(EnableVPlanNativePath &&
3483            "Unexpected non-induction PHIs for fixup in non VPlan-native path");
3484     fixNonInductionPHIs();
3485   }
3486 
3487   // At this point every instruction in the original loop is widened to a
3488   // vector form. Now we need to fix the recurrences in the loop. These PHI
3489   // nodes are currently empty because we did not want to introduce cycles.
3490   // This is the second stage of vectorizing recurrences.
3491   fixCrossIterationPHIs();
3492 
3493   // Forget the original basic block.
3494   PSE.getSE()->forgetLoop(OrigLoop);
3495 
3496   // Fix-up external users of the induction variables.
3497   for (auto &Entry : Legal->getInductionVars())
3498     fixupIVUsers(Entry.first, Entry.second,
3499                  getOrCreateVectorTripCount(LI->getLoopFor(LoopVectorBody)),
3500                  IVEndValues[Entry.first], LoopMiddleBlock);
3501 
3502   fixLCSSAPHIs();
3503   for (Instruction *PI : PredicatedInstructions)
3504     sinkScalarOperands(&*PI);
3505 
3506   // Remove redundant induction instructions.
3507   cse(LoopVectorBody);
3508 
3509   // Set/update profile weights for the vector and remainder loops as original
3510   // loop iterations are now distributed among them. Note that original loop
3511   // represented by LoopScalarBody becomes remainder loop after vectorization.
3512   //
3513   // For cases like foldTailByMasking() and requiresScalarEpiloque() we may
3514   // end up getting slightly roughened result but that should be OK since
3515   // profile is not inherently precise anyway. Note also possible bypass of
3516   // vector code caused by legality checks is ignored, assigning all the weight
3517   // to the vector loop, optimistically.
3518   setProfileInfoAfterUnrolling(LI->getLoopFor(LoopScalarBody),
3519                                LI->getLoopFor(LoopVectorBody),
3520                                LI->getLoopFor(LoopScalarBody), VF * UF);
3521 }
3522 
3523 void InnerLoopVectorizer::fixCrossIterationPHIs() {
3524   // In order to support recurrences we need to be able to vectorize Phi nodes.
3525   // Phi nodes have cycles, so we need to vectorize them in two stages. This is
3526   // stage #2: We now need to fix the recurrences by adding incoming edges to
3527   // the currently empty PHI nodes. At this point every instruction in the
3528   // original loop is widened to a vector form so we can use them to construct
3529   // the incoming edges.
3530   for (PHINode &Phi : OrigLoop->getHeader()->phis()) {
3531     // Handle first-order recurrences and reductions that need to be fixed.
3532     if (Legal->isFirstOrderRecurrence(&Phi))
3533       fixFirstOrderRecurrence(&Phi);
3534     else if (Legal->isReductionVariable(&Phi))
3535       fixReduction(&Phi);
3536   }
3537 }
3538 
3539 void InnerLoopVectorizer::fixFirstOrderRecurrence(PHINode *Phi) {
3540   // This is the second phase of vectorizing first-order recurrences. An
3541   // overview of the transformation is described below. Suppose we have the
3542   // following loop.
3543   //
3544   //   for (int i = 0; i < n; ++i)
3545   //     b[i] = a[i] - a[i - 1];
3546   //
3547   // There is a first-order recurrence on "a". For this loop, the shorthand
3548   // scalar IR looks like:
3549   //
3550   //   scalar.ph:
3551   //     s_init = a[-1]
3552   //     br scalar.body
3553   //
3554   //   scalar.body:
3555   //     i = phi [0, scalar.ph], [i+1, scalar.body]
3556   //     s1 = phi [s_init, scalar.ph], [s2, scalar.body]
3557   //     s2 = a[i]
3558   //     b[i] = s2 - s1
3559   //     br cond, scalar.body, ...
3560   //
3561   // In this example, s1 is a recurrence because it's value depends on the
3562   // previous iteration. In the first phase of vectorization, we created a
3563   // temporary value for s1. We now complete the vectorization and produce the
3564   // shorthand vector IR shown below (for VF = 4, UF = 1).
3565   //
3566   //   vector.ph:
3567   //     v_init = vector(..., ..., ..., a[-1])
3568   //     br vector.body
3569   //
3570   //   vector.body
3571   //     i = phi [0, vector.ph], [i+4, vector.body]
3572   //     v1 = phi [v_init, vector.ph], [v2, vector.body]
3573   //     v2 = a[i, i+1, i+2, i+3];
3574   //     v3 = vector(v1(3), v2(0, 1, 2))
3575   //     b[i, i+1, i+2, i+3] = v2 - v3
3576   //     br cond, vector.body, middle.block
3577   //
3578   //   middle.block:
3579   //     x = v2(3)
3580   //     br scalar.ph
3581   //
3582   //   scalar.ph:
3583   //     s_init = phi [x, middle.block], [a[-1], otherwise]
3584   //     br scalar.body
3585   //
3586   // After execution completes the vector loop, we extract the next value of
3587   // the recurrence (x) to use as the initial value in the scalar loop.
3588 
3589   // Get the original loop preheader and single loop latch.
3590   auto *Preheader = OrigLoop->getLoopPreheader();
3591   auto *Latch = OrigLoop->getLoopLatch();
3592 
3593   // Get the initial and previous values of the scalar recurrence.
3594   auto *ScalarInit = Phi->getIncomingValueForBlock(Preheader);
3595   auto *Previous = Phi->getIncomingValueForBlock(Latch);
3596 
3597   // Create a vector from the initial value.
3598   auto *VectorInit = ScalarInit;
3599   if (VF > 1) {
3600     Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
3601     VectorInit = Builder.CreateInsertElement(
3602         UndefValue::get(VectorType::get(VectorInit->getType(), VF)), VectorInit,
3603         Builder.getInt32(VF - 1), "vector.recur.init");
3604   }
3605 
3606   // We constructed a temporary phi node in the first phase of vectorization.
3607   // This phi node will eventually be deleted.
3608   Builder.SetInsertPoint(
3609       cast<Instruction>(VectorLoopValueMap.getVectorValue(Phi, 0)));
3610 
3611   // Create a phi node for the new recurrence. The current value will either be
3612   // the initial value inserted into a vector or loop-varying vector value.
3613   auto *VecPhi = Builder.CreatePHI(VectorInit->getType(), 2, "vector.recur");
3614   VecPhi->addIncoming(VectorInit, LoopVectorPreHeader);
3615 
3616   // Get the vectorized previous value of the last part UF - 1. It appears last
3617   // among all unrolled iterations, due to the order of their construction.
3618   Value *PreviousLastPart = getOrCreateVectorValue(Previous, UF - 1);
3619 
3620   // Find and set the insertion point after the previous value if it is an
3621   // instruction.
3622   BasicBlock::iterator InsertPt;
3623   // Note that the previous value may have been constant-folded so it is not
3624   // guaranteed to be an instruction in the vector loop.
3625   // FIXME: Loop invariant values do not form recurrences. We should deal with
3626   //        them earlier.
3627   if (LI->getLoopFor(LoopVectorBody)->isLoopInvariant(PreviousLastPart))
3628     InsertPt = LoopVectorBody->getFirstInsertionPt();
3629   else {
3630     Instruction *PreviousInst = cast<Instruction>(PreviousLastPart);
3631     if (isa<PHINode>(PreviousLastPart))
3632       // If the previous value is a phi node, we should insert after all the phi
3633       // nodes in the block containing the PHI to avoid breaking basic block
3634       // verification. Note that the basic block may be different to
3635       // LoopVectorBody, in case we predicate the loop.
3636       InsertPt = PreviousInst->getParent()->getFirstInsertionPt();
3637     else
3638       InsertPt = ++PreviousInst->getIterator();
3639   }
3640   Builder.SetInsertPoint(&*InsertPt);
3641 
3642   // We will construct a vector for the recurrence by combining the values for
3643   // the current and previous iterations. This is the required shuffle mask.
3644   SmallVector<int, 8> ShuffleMask(VF);
3645   ShuffleMask[0] = VF - 1;
3646   for (unsigned I = 1; I < VF; ++I)
3647     ShuffleMask[I] = I + VF - 1;
3648 
3649   // The vector from which to take the initial value for the current iteration
3650   // (actual or unrolled). Initially, this is the vector phi node.
3651   Value *Incoming = VecPhi;
3652 
3653   // Shuffle the current and previous vector and update the vector parts.
3654   for (unsigned Part = 0; Part < UF; ++Part) {
3655     Value *PreviousPart = getOrCreateVectorValue(Previous, Part);
3656     Value *PhiPart = VectorLoopValueMap.getVectorValue(Phi, Part);
3657     auto *Shuffle = VF > 1 ? Builder.CreateShuffleVector(Incoming, PreviousPart,
3658                                                          ShuffleMask)
3659                            : Incoming;
3660     PhiPart->replaceAllUsesWith(Shuffle);
3661     cast<Instruction>(PhiPart)->eraseFromParent();
3662     VectorLoopValueMap.resetVectorValue(Phi, Part, Shuffle);
3663     Incoming = PreviousPart;
3664   }
3665 
3666   // Fix the latch value of the new recurrence in the vector loop.
3667   VecPhi->addIncoming(Incoming, LI->getLoopFor(LoopVectorBody)->getLoopLatch());
3668 
3669   // Extract the last vector element in the middle block. This will be the
3670   // initial value for the recurrence when jumping to the scalar loop.
3671   auto *ExtractForScalar = Incoming;
3672   if (VF > 1) {
3673     Builder.SetInsertPoint(LoopMiddleBlock->getTerminator());
3674     ExtractForScalar = Builder.CreateExtractElement(
3675         ExtractForScalar, Builder.getInt32(VF - 1), "vector.recur.extract");
3676   }
3677   // Extract the second last element in the middle block if the
3678   // Phi is used outside the loop. We need to extract the phi itself
3679   // and not the last element (the phi update in the current iteration). This
3680   // will be the value when jumping to the exit block from the LoopMiddleBlock,
3681   // when the scalar loop is not run at all.
3682   Value *ExtractForPhiUsedOutsideLoop = nullptr;
3683   if (VF > 1)
3684     ExtractForPhiUsedOutsideLoop = Builder.CreateExtractElement(
3685         Incoming, Builder.getInt32(VF - 2), "vector.recur.extract.for.phi");
3686   // When loop is unrolled without vectorizing, initialize
3687   // ExtractForPhiUsedOutsideLoop with the value just prior to unrolled value of
3688   // `Incoming`. This is analogous to the vectorized case above: extracting the
3689   // second last element when VF > 1.
3690   else if (UF > 1)
3691     ExtractForPhiUsedOutsideLoop = getOrCreateVectorValue(Previous, UF - 2);
3692 
3693   // Fix the initial value of the original recurrence in the scalar loop.
3694   Builder.SetInsertPoint(&*LoopScalarPreHeader->begin());
3695   auto *Start = Builder.CreatePHI(Phi->getType(), 2, "scalar.recur.init");
3696   for (auto *BB : predecessors(LoopScalarPreHeader)) {
3697     auto *Incoming = BB == LoopMiddleBlock ? ExtractForScalar : ScalarInit;
3698     Start->addIncoming(Incoming, BB);
3699   }
3700 
3701   Phi->setIncomingValueForBlock(LoopScalarPreHeader, Start);
3702   Phi->setName("scalar.recur");
3703 
3704   // Finally, fix users of the recurrence outside the loop. The users will need
3705   // either the last value of the scalar recurrence or the last value of the
3706   // vector recurrence we extracted in the middle block. Since the loop is in
3707   // LCSSA form, we just need to find all the phi nodes for the original scalar
3708   // recurrence in the exit block, and then add an edge for the middle block.
3709   for (PHINode &LCSSAPhi : LoopExitBlock->phis()) {
3710     if (LCSSAPhi.getIncomingValue(0) == Phi) {
3711       LCSSAPhi.addIncoming(ExtractForPhiUsedOutsideLoop, LoopMiddleBlock);
3712     }
3713   }
3714 }
3715 
3716 void InnerLoopVectorizer::fixReduction(PHINode *Phi) {
3717   Constant *Zero = Builder.getInt32(0);
3718 
3719   // Get it's reduction variable descriptor.
3720   assert(Legal->isReductionVariable(Phi) &&
3721          "Unable to find the reduction variable");
3722   RecurrenceDescriptor RdxDesc = Legal->getReductionVars()[Phi];
3723 
3724   RecurrenceDescriptor::RecurrenceKind RK = RdxDesc.getRecurrenceKind();
3725   TrackingVH<Value> ReductionStartValue = RdxDesc.getRecurrenceStartValue();
3726   Instruction *LoopExitInst = RdxDesc.getLoopExitInstr();
3727   RecurrenceDescriptor::MinMaxRecurrenceKind MinMaxKind =
3728     RdxDesc.getMinMaxRecurrenceKind();
3729   setDebugLocFromInst(Builder, ReductionStartValue);
3730 
3731   // We need to generate a reduction vector from the incoming scalar.
3732   // To do so, we need to generate the 'identity' vector and override
3733   // one of the elements with the incoming scalar reduction. We need
3734   // to do it in the vector-loop preheader.
3735   Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
3736 
3737   // This is the vector-clone of the value that leaves the loop.
3738   Type *VecTy = getOrCreateVectorValue(LoopExitInst, 0)->getType();
3739 
3740   // Find the reduction identity variable. Zero for addition, or, xor,
3741   // one for multiplication, -1 for And.
3742   Value *Identity;
3743   Value *VectorStart;
3744   if (RK == RecurrenceDescriptor::RK_IntegerMinMax ||
3745       RK == RecurrenceDescriptor::RK_FloatMinMax) {
3746     // MinMax reduction have the start value as their identify.
3747     if (VF == 1) {
3748       VectorStart = Identity = ReductionStartValue;
3749     } else {
3750       VectorStart = Identity =
3751         Builder.CreateVectorSplat(VF, ReductionStartValue, "minmax.ident");
3752     }
3753   } else {
3754     // Handle other reduction kinds:
3755     Constant *Iden = RecurrenceDescriptor::getRecurrenceIdentity(
3756         RK, VecTy->getScalarType());
3757     if (VF == 1) {
3758       Identity = Iden;
3759       // This vector is the Identity vector where the first element is the
3760       // incoming scalar reduction.
3761       VectorStart = ReductionStartValue;
3762     } else {
3763       Identity = ConstantVector::getSplat({VF, false}, Iden);
3764 
3765       // This vector is the Identity vector where the first element is the
3766       // incoming scalar reduction.
3767       VectorStart =
3768         Builder.CreateInsertElement(Identity, ReductionStartValue, Zero);
3769     }
3770   }
3771 
3772   // Wrap flags are in general invalid after vectorization, clear them.
3773   clearReductionWrapFlags(RdxDesc);
3774 
3775   // Fix the vector-loop phi.
3776 
3777   // Reductions do not have to start at zero. They can start with
3778   // any loop invariant values.
3779   BasicBlock *Latch = OrigLoop->getLoopLatch();
3780   Value *LoopVal = Phi->getIncomingValueForBlock(Latch);
3781 
3782   for (unsigned Part = 0; Part < UF; ++Part) {
3783     Value *VecRdxPhi = getOrCreateVectorValue(Phi, Part);
3784     Value *Val = getOrCreateVectorValue(LoopVal, Part);
3785     // Make sure to add the reduction start value only to the
3786     // first unroll part.
3787     Value *StartVal = (Part == 0) ? VectorStart : Identity;
3788     cast<PHINode>(VecRdxPhi)->addIncoming(StartVal, LoopVectorPreHeader);
3789     cast<PHINode>(VecRdxPhi)
3790       ->addIncoming(Val, LI->getLoopFor(LoopVectorBody)->getLoopLatch());
3791   }
3792 
3793   // Before each round, move the insertion point right between
3794   // the PHIs and the values we are going to write.
3795   // This allows us to write both PHINodes and the extractelement
3796   // instructions.
3797   Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt());
3798 
3799   setDebugLocFromInst(Builder, LoopExitInst);
3800 
3801   // If tail is folded by masking, the vector value to leave the loop should be
3802   // a Select choosing between the vectorized LoopExitInst and vectorized Phi,
3803   // instead of the former.
3804   if (Cost->foldTailByMasking()) {
3805     for (unsigned Part = 0; Part < UF; ++Part) {
3806       Value *VecLoopExitInst =
3807           VectorLoopValueMap.getVectorValue(LoopExitInst, Part);
3808       Value *Sel = nullptr;
3809       for (User *U : VecLoopExitInst->users()) {
3810         if (isa<SelectInst>(U)) {
3811           assert(!Sel && "Reduction exit feeding two selects");
3812           Sel = U;
3813         } else
3814           assert(isa<PHINode>(U) && "Reduction exit must feed Phi's or select");
3815       }
3816       assert(Sel && "Reduction exit feeds no select");
3817       VectorLoopValueMap.resetVectorValue(LoopExitInst, Part, Sel);
3818     }
3819   }
3820 
3821   // If the vector reduction can be performed in a smaller type, we truncate
3822   // then extend the loop exit value to enable InstCombine to evaluate the
3823   // entire expression in the smaller type.
3824   if (VF > 1 && Phi->getType() != RdxDesc.getRecurrenceType()) {
3825     Type *RdxVecTy = VectorType::get(RdxDesc.getRecurrenceType(), VF);
3826     Builder.SetInsertPoint(
3827         LI->getLoopFor(LoopVectorBody)->getLoopLatch()->getTerminator());
3828     VectorParts RdxParts(UF);
3829     for (unsigned Part = 0; Part < UF; ++Part) {
3830       RdxParts[Part] = VectorLoopValueMap.getVectorValue(LoopExitInst, Part);
3831       Value *Trunc = Builder.CreateTrunc(RdxParts[Part], RdxVecTy);
3832       Value *Extnd = RdxDesc.isSigned() ? Builder.CreateSExt(Trunc, VecTy)
3833                                         : Builder.CreateZExt(Trunc, VecTy);
3834       for (Value::user_iterator UI = RdxParts[Part]->user_begin();
3835            UI != RdxParts[Part]->user_end();)
3836         if (*UI != Trunc) {
3837           (*UI++)->replaceUsesOfWith(RdxParts[Part], Extnd);
3838           RdxParts[Part] = Extnd;
3839         } else {
3840           ++UI;
3841         }
3842     }
3843     Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt());
3844     for (unsigned Part = 0; Part < UF; ++Part) {
3845       RdxParts[Part] = Builder.CreateTrunc(RdxParts[Part], RdxVecTy);
3846       VectorLoopValueMap.resetVectorValue(LoopExitInst, Part, RdxParts[Part]);
3847     }
3848   }
3849 
3850   // Reduce all of the unrolled parts into a single vector.
3851   Value *ReducedPartRdx = VectorLoopValueMap.getVectorValue(LoopExitInst, 0);
3852   unsigned Op = RecurrenceDescriptor::getRecurrenceBinOp(RK);
3853 
3854   // The middle block terminator has already been assigned a DebugLoc here (the
3855   // OrigLoop's single latch terminator). We want the whole middle block to
3856   // appear to execute on this line because: (a) it is all compiler generated,
3857   // (b) these instructions are always executed after evaluating the latch
3858   // conditional branch, and (c) other passes may add new predecessors which
3859   // terminate on this line. This is the easiest way to ensure we don't
3860   // accidentally cause an extra step back into the loop while debugging.
3861   setDebugLocFromInst(Builder, LoopMiddleBlock->getTerminator());
3862   for (unsigned Part = 1; Part < UF; ++Part) {
3863     Value *RdxPart = VectorLoopValueMap.getVectorValue(LoopExitInst, Part);
3864     if (Op != Instruction::ICmp && Op != Instruction::FCmp)
3865       // Floating point operations had to be 'fast' to enable the reduction.
3866       ReducedPartRdx = addFastMathFlag(
3867           Builder.CreateBinOp((Instruction::BinaryOps)Op, RdxPart,
3868                               ReducedPartRdx, "bin.rdx"),
3869           RdxDesc.getFastMathFlags());
3870     else
3871       ReducedPartRdx = createMinMaxOp(Builder, MinMaxKind, ReducedPartRdx,
3872                                       RdxPart);
3873   }
3874 
3875   if (VF > 1) {
3876     bool NoNaN = Legal->hasFunNoNaNAttr();
3877     ReducedPartRdx =
3878         createTargetReduction(Builder, TTI, RdxDesc, ReducedPartRdx, NoNaN);
3879     // If the reduction can be performed in a smaller type, we need to extend
3880     // the reduction to the wider type before we branch to the original loop.
3881     if (Phi->getType() != RdxDesc.getRecurrenceType())
3882       ReducedPartRdx =
3883         RdxDesc.isSigned()
3884         ? Builder.CreateSExt(ReducedPartRdx, Phi->getType())
3885         : Builder.CreateZExt(ReducedPartRdx, Phi->getType());
3886   }
3887 
3888   // Create a phi node that merges control-flow from the backedge-taken check
3889   // block and the middle block.
3890   PHINode *BCBlockPhi = PHINode::Create(Phi->getType(), 2, "bc.merge.rdx",
3891                                         LoopScalarPreHeader->getTerminator());
3892   for (unsigned I = 0, E = LoopBypassBlocks.size(); I != E; ++I)
3893     BCBlockPhi->addIncoming(ReductionStartValue, LoopBypassBlocks[I]);
3894   BCBlockPhi->addIncoming(ReducedPartRdx, LoopMiddleBlock);
3895 
3896   // Now, we need to fix the users of the reduction variable
3897   // inside and outside of the scalar remainder loop.
3898   // We know that the loop is in LCSSA form. We need to update the
3899   // PHI nodes in the exit blocks.
3900   for (PHINode &LCSSAPhi : LoopExitBlock->phis()) {
3901     // All PHINodes need to have a single entry edge, or two if
3902     // we already fixed them.
3903     assert(LCSSAPhi.getNumIncomingValues() < 3 && "Invalid LCSSA PHI");
3904 
3905     // We found a reduction value exit-PHI. Update it with the
3906     // incoming bypass edge.
3907     if (LCSSAPhi.getIncomingValue(0) == LoopExitInst)
3908       LCSSAPhi.addIncoming(ReducedPartRdx, LoopMiddleBlock);
3909   } // end of the LCSSA phi scan.
3910 
3911     // Fix the scalar loop reduction variable with the incoming reduction sum
3912     // from the vector body and from the backedge value.
3913   int IncomingEdgeBlockIdx =
3914     Phi->getBasicBlockIndex(OrigLoop->getLoopLatch());
3915   assert(IncomingEdgeBlockIdx >= 0 && "Invalid block index");
3916   // Pick the other block.
3917   int SelfEdgeBlockIdx = (IncomingEdgeBlockIdx ? 0 : 1);
3918   Phi->setIncomingValue(SelfEdgeBlockIdx, BCBlockPhi);
3919   Phi->setIncomingValue(IncomingEdgeBlockIdx, LoopExitInst);
3920 }
3921 
3922 void InnerLoopVectorizer::clearReductionWrapFlags(
3923     RecurrenceDescriptor &RdxDesc) {
3924   RecurrenceDescriptor::RecurrenceKind RK = RdxDesc.getRecurrenceKind();
3925   if (RK != RecurrenceDescriptor::RK_IntegerAdd &&
3926       RK != RecurrenceDescriptor::RK_IntegerMult)
3927     return;
3928 
3929   Instruction *LoopExitInstr = RdxDesc.getLoopExitInstr();
3930   assert(LoopExitInstr && "null loop exit instruction");
3931   SmallVector<Instruction *, 8> Worklist;
3932   SmallPtrSet<Instruction *, 8> Visited;
3933   Worklist.push_back(LoopExitInstr);
3934   Visited.insert(LoopExitInstr);
3935 
3936   while (!Worklist.empty()) {
3937     Instruction *Cur = Worklist.pop_back_val();
3938     if (isa<OverflowingBinaryOperator>(Cur))
3939       for (unsigned Part = 0; Part < UF; ++Part) {
3940         Value *V = getOrCreateVectorValue(Cur, Part);
3941         cast<Instruction>(V)->dropPoisonGeneratingFlags();
3942       }
3943 
3944     for (User *U : Cur->users()) {
3945       Instruction *UI = cast<Instruction>(U);
3946       if ((Cur != LoopExitInstr || OrigLoop->contains(UI->getParent())) &&
3947           Visited.insert(UI).second)
3948         Worklist.push_back(UI);
3949     }
3950   }
3951 }
3952 
3953 void InnerLoopVectorizer::fixLCSSAPHIs() {
3954   for (PHINode &LCSSAPhi : LoopExitBlock->phis()) {
3955     if (LCSSAPhi.getNumIncomingValues() == 1) {
3956       auto *IncomingValue = LCSSAPhi.getIncomingValue(0);
3957       // Non-instruction incoming values will have only one value.
3958       unsigned LastLane = 0;
3959       if (isa<Instruction>(IncomingValue))
3960           LastLane = Cost->isUniformAfterVectorization(
3961                          cast<Instruction>(IncomingValue), VF)
3962                          ? 0
3963                          : VF - 1;
3964       // Can be a loop invariant incoming value or the last scalar value to be
3965       // extracted from the vectorized loop.
3966       Builder.SetInsertPoint(LoopMiddleBlock->getTerminator());
3967       Value *lastIncomingValue =
3968           getOrCreateScalarValue(IncomingValue, { UF - 1, LastLane });
3969       LCSSAPhi.addIncoming(lastIncomingValue, LoopMiddleBlock);
3970     }
3971   }
3972 }
3973 
3974 void InnerLoopVectorizer::sinkScalarOperands(Instruction *PredInst) {
3975   // The basic block and loop containing the predicated instruction.
3976   auto *PredBB = PredInst->getParent();
3977   auto *VectorLoop = LI->getLoopFor(PredBB);
3978 
3979   // Initialize a worklist with the operands of the predicated instruction.
3980   SetVector<Value *> Worklist(PredInst->op_begin(), PredInst->op_end());
3981 
3982   // Holds instructions that we need to analyze again. An instruction may be
3983   // reanalyzed if we don't yet know if we can sink it or not.
3984   SmallVector<Instruction *, 8> InstsToReanalyze;
3985 
3986   // Returns true if a given use occurs in the predicated block. Phi nodes use
3987   // their operands in their corresponding predecessor blocks.
3988   auto isBlockOfUsePredicated = [&](Use &U) -> bool {
3989     auto *I = cast<Instruction>(U.getUser());
3990     BasicBlock *BB = I->getParent();
3991     if (auto *Phi = dyn_cast<PHINode>(I))
3992       BB = Phi->getIncomingBlock(
3993           PHINode::getIncomingValueNumForOperand(U.getOperandNo()));
3994     return BB == PredBB;
3995   };
3996 
3997   // Iteratively sink the scalarized operands of the predicated instruction
3998   // into the block we created for it. When an instruction is sunk, it's
3999   // operands are then added to the worklist. The algorithm ends after one pass
4000   // through the worklist doesn't sink a single instruction.
4001   bool Changed;
4002   do {
4003     // Add the instructions that need to be reanalyzed to the worklist, and
4004     // reset the changed indicator.
4005     Worklist.insert(InstsToReanalyze.begin(), InstsToReanalyze.end());
4006     InstsToReanalyze.clear();
4007     Changed = false;
4008 
4009     while (!Worklist.empty()) {
4010       auto *I = dyn_cast<Instruction>(Worklist.pop_back_val());
4011 
4012       // We can't sink an instruction if it is a phi node, is already in the
4013       // predicated block, is not in the loop, or may have side effects.
4014       if (!I || isa<PHINode>(I) || I->getParent() == PredBB ||
4015           !VectorLoop->contains(I) || I->mayHaveSideEffects())
4016         continue;
4017 
4018       // It's legal to sink the instruction if all its uses occur in the
4019       // predicated block. Otherwise, there's nothing to do yet, and we may
4020       // need to reanalyze the instruction.
4021       if (!llvm::all_of(I->uses(), isBlockOfUsePredicated)) {
4022         InstsToReanalyze.push_back(I);
4023         continue;
4024       }
4025 
4026       // Move the instruction to the beginning of the predicated block, and add
4027       // it's operands to the worklist.
4028       I->moveBefore(&*PredBB->getFirstInsertionPt());
4029       Worklist.insert(I->op_begin(), I->op_end());
4030 
4031       // The sinking may have enabled other instructions to be sunk, so we will
4032       // need to iterate.
4033       Changed = true;
4034     }
4035   } while (Changed);
4036 }
4037 
4038 void InnerLoopVectorizer::fixNonInductionPHIs() {
4039   for (PHINode *OrigPhi : OrigPHIsToFix) {
4040     PHINode *NewPhi =
4041         cast<PHINode>(VectorLoopValueMap.getVectorValue(OrigPhi, 0));
4042     unsigned NumIncomingValues = OrigPhi->getNumIncomingValues();
4043 
4044     SmallVector<BasicBlock *, 2> ScalarBBPredecessors(
4045         predecessors(OrigPhi->getParent()));
4046     SmallVector<BasicBlock *, 2> VectorBBPredecessors(
4047         predecessors(NewPhi->getParent()));
4048     assert(ScalarBBPredecessors.size() == VectorBBPredecessors.size() &&
4049            "Scalar and Vector BB should have the same number of predecessors");
4050 
4051     // The insertion point in Builder may be invalidated by the time we get
4052     // here. Force the Builder insertion point to something valid so that we do
4053     // not run into issues during insertion point restore in
4054     // getOrCreateVectorValue calls below.
4055     Builder.SetInsertPoint(NewPhi);
4056 
4057     // The predecessor order is preserved and we can rely on mapping between
4058     // scalar and vector block predecessors.
4059     for (unsigned i = 0; i < NumIncomingValues; ++i) {
4060       BasicBlock *NewPredBB = VectorBBPredecessors[i];
4061 
4062       // When looking up the new scalar/vector values to fix up, use incoming
4063       // values from original phi.
4064       Value *ScIncV =
4065           OrigPhi->getIncomingValueForBlock(ScalarBBPredecessors[i]);
4066 
4067       // Scalar incoming value may need a broadcast
4068       Value *NewIncV = getOrCreateVectorValue(ScIncV, 0);
4069       NewPhi->addIncoming(NewIncV, NewPredBB);
4070     }
4071   }
4072 }
4073 
4074 void InnerLoopVectorizer::widenGEP(GetElementPtrInst *GEP, unsigned UF,
4075                                    unsigned VF, bool IsPtrLoopInvariant,
4076                                    SmallBitVector &IsIndexLoopInvariant) {
4077   // Construct a vector GEP by widening the operands of the scalar GEP as
4078   // necessary. We mark the vector GEP 'inbounds' if appropriate. A GEP
4079   // results in a vector of pointers when at least one operand of the GEP
4080   // is vector-typed. Thus, to keep the representation compact, we only use
4081   // vector-typed operands for loop-varying values.
4082 
4083   if (VF > 1 && IsPtrLoopInvariant && IsIndexLoopInvariant.all()) {
4084     // If we are vectorizing, but the GEP has only loop-invariant operands,
4085     // the GEP we build (by only using vector-typed operands for
4086     // loop-varying values) would be a scalar pointer. Thus, to ensure we
4087     // produce a vector of pointers, we need to either arbitrarily pick an
4088     // operand to broadcast, or broadcast a clone of the original GEP.
4089     // Here, we broadcast a clone of the original.
4090     //
4091     // TODO: If at some point we decide to scalarize instructions having
4092     //       loop-invariant operands, this special case will no longer be
4093     //       required. We would add the scalarization decision to
4094     //       collectLoopScalars() and teach getVectorValue() to broadcast
4095     //       the lane-zero scalar value.
4096     auto *Clone = Builder.Insert(GEP->clone());
4097     for (unsigned Part = 0; Part < UF; ++Part) {
4098       Value *EntryPart = Builder.CreateVectorSplat(VF, Clone);
4099       VectorLoopValueMap.setVectorValue(GEP, Part, EntryPart);
4100       addMetadata(EntryPart, GEP);
4101     }
4102   } else {
4103     // If the GEP has at least one loop-varying operand, we are sure to
4104     // produce a vector of pointers. But if we are only unrolling, we want
4105     // to produce a scalar GEP for each unroll part. Thus, the GEP we
4106     // produce with the code below will be scalar (if VF == 1) or vector
4107     // (otherwise). Note that for the unroll-only case, we still maintain
4108     // values in the vector mapping with initVector, as we do for other
4109     // instructions.
4110     for (unsigned Part = 0; Part < UF; ++Part) {
4111       // The pointer operand of the new GEP. If it's loop-invariant, we
4112       // won't broadcast it.
4113       auto *Ptr = IsPtrLoopInvariant
4114                       ? GEP->getPointerOperand()
4115                       : getOrCreateVectorValue(GEP->getPointerOperand(), Part);
4116 
4117       // Collect all the indices for the new GEP. If any index is
4118       // loop-invariant, we won't broadcast it.
4119       SmallVector<Value *, 4> Indices;
4120       for (auto Index : enumerate(GEP->indices())) {
4121         Value *User = Index.value().get();
4122         if (IsIndexLoopInvariant[Index.index()])
4123           Indices.push_back(User);
4124         else
4125           Indices.push_back(getOrCreateVectorValue(User, Part));
4126       }
4127 
4128       // Create the new GEP. Note that this GEP may be a scalar if VF == 1,
4129       // but it should be a vector, otherwise.
4130       auto *NewGEP =
4131           GEP->isInBounds()
4132               ? Builder.CreateInBoundsGEP(GEP->getSourceElementType(), Ptr,
4133                                           Indices)
4134               : Builder.CreateGEP(GEP->getSourceElementType(), Ptr, Indices);
4135       assert((VF == 1 || NewGEP->getType()->isVectorTy()) &&
4136              "NewGEP is not a pointer vector");
4137       VectorLoopValueMap.setVectorValue(GEP, Part, NewGEP);
4138       addMetadata(NewGEP, GEP);
4139     }
4140   }
4141 }
4142 
4143 void InnerLoopVectorizer::widenPHIInstruction(Instruction *PN, unsigned UF,
4144                                               unsigned VF) {
4145   PHINode *P = cast<PHINode>(PN);
4146   if (EnableVPlanNativePath) {
4147     // Currently we enter here in the VPlan-native path for non-induction
4148     // PHIs where all control flow is uniform. We simply widen these PHIs.
4149     // Create a vector phi with no operands - the vector phi operands will be
4150     // set at the end of vector code generation.
4151     Type *VecTy =
4152         (VF == 1) ? PN->getType() : VectorType::get(PN->getType(), VF);
4153     Value *VecPhi = Builder.CreatePHI(VecTy, PN->getNumOperands(), "vec.phi");
4154     VectorLoopValueMap.setVectorValue(P, 0, VecPhi);
4155     OrigPHIsToFix.push_back(P);
4156 
4157     return;
4158   }
4159 
4160   assert(PN->getParent() == OrigLoop->getHeader() &&
4161          "Non-header phis should have been handled elsewhere");
4162 
4163   // In order to support recurrences we need to be able to vectorize Phi nodes.
4164   // Phi nodes have cycles, so we need to vectorize them in two stages. This is
4165   // stage #1: We create a new vector PHI node with no incoming edges. We'll use
4166   // this value when we vectorize all of the instructions that use the PHI.
4167   if (Legal->isReductionVariable(P) || Legal->isFirstOrderRecurrence(P)) {
4168     for (unsigned Part = 0; Part < UF; ++Part) {
4169       // This is phase one of vectorizing PHIs.
4170       Type *VecTy =
4171           (VF == 1) ? PN->getType() : VectorType::get(PN->getType(), VF);
4172       Value *EntryPart = PHINode::Create(
4173           VecTy, 2, "vec.phi", &*LoopVectorBody->getFirstInsertionPt());
4174       VectorLoopValueMap.setVectorValue(P, Part, EntryPart);
4175     }
4176     return;
4177   }
4178 
4179   setDebugLocFromInst(Builder, P);
4180 
4181   // This PHINode must be an induction variable.
4182   // Make sure that we know about it.
4183   assert(Legal->getInductionVars().count(P) && "Not an induction variable");
4184 
4185   InductionDescriptor II = Legal->getInductionVars().lookup(P);
4186   const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout();
4187 
4188   // FIXME: The newly created binary instructions should contain nsw/nuw flags,
4189   // which can be found from the original scalar operations.
4190   switch (II.getKind()) {
4191   case InductionDescriptor::IK_NoInduction:
4192     llvm_unreachable("Unknown induction");
4193   case InductionDescriptor::IK_IntInduction:
4194   case InductionDescriptor::IK_FpInduction:
4195     llvm_unreachable("Integer/fp induction is handled elsewhere.");
4196   case InductionDescriptor::IK_PtrInduction: {
4197     // Handle the pointer induction variable case.
4198     assert(P->getType()->isPointerTy() && "Unexpected type.");
4199     // This is the normalized GEP that starts counting at zero.
4200     Value *PtrInd = Induction;
4201     PtrInd = Builder.CreateSExtOrTrunc(PtrInd, II.getStep()->getType());
4202     // Determine the number of scalars we need to generate for each unroll
4203     // iteration. If the instruction is uniform, we only need to generate the
4204     // first lane. Otherwise, we generate all VF values.
4205     unsigned Lanes = Cost->isUniformAfterVectorization(P, VF) ? 1 : VF;
4206     // These are the scalar results. Notice that we don't generate vector GEPs
4207     // because scalar GEPs result in better code.
4208     for (unsigned Part = 0; Part < UF; ++Part) {
4209       for (unsigned Lane = 0; Lane < Lanes; ++Lane) {
4210         Constant *Idx = ConstantInt::get(PtrInd->getType(), Lane + Part * VF);
4211         Value *GlobalIdx = Builder.CreateAdd(PtrInd, Idx);
4212         Value *SclrGep =
4213             emitTransformedIndex(Builder, GlobalIdx, PSE.getSE(), DL, II);
4214         SclrGep->setName("next.gep");
4215         VectorLoopValueMap.setScalarValue(P, {Part, Lane}, SclrGep);
4216       }
4217     }
4218     return;
4219   }
4220   }
4221 }
4222 
4223 /// A helper function for checking whether an integer division-related
4224 /// instruction may divide by zero (in which case it must be predicated if
4225 /// executed conditionally in the scalar code).
4226 /// TODO: It may be worthwhile to generalize and check isKnownNonZero().
4227 /// Non-zero divisors that are non compile-time constants will not be
4228 /// converted into multiplication, so we will still end up scalarizing
4229 /// the division, but can do so w/o predication.
4230 static bool mayDivideByZero(Instruction &I) {
4231   assert((I.getOpcode() == Instruction::UDiv ||
4232           I.getOpcode() == Instruction::SDiv ||
4233           I.getOpcode() == Instruction::URem ||
4234           I.getOpcode() == Instruction::SRem) &&
4235          "Unexpected instruction");
4236   Value *Divisor = I.getOperand(1);
4237   auto *CInt = dyn_cast<ConstantInt>(Divisor);
4238   return !CInt || CInt->isZero();
4239 }
4240 
4241 void InnerLoopVectorizer::widenInstruction(Instruction &I, VPUser &User,
4242                                            VPTransformState &State) {
4243   switch (I.getOpcode()) {
4244   case Instruction::Call:
4245   case Instruction::Br:
4246   case Instruction::PHI:
4247   case Instruction::GetElementPtr:
4248   case Instruction::Select:
4249     llvm_unreachable("This instruction is handled by a different recipe.");
4250   case Instruction::UDiv:
4251   case Instruction::SDiv:
4252   case Instruction::SRem:
4253   case Instruction::URem:
4254   case Instruction::Add:
4255   case Instruction::FAdd:
4256   case Instruction::Sub:
4257   case Instruction::FSub:
4258   case Instruction::FNeg:
4259   case Instruction::Mul:
4260   case Instruction::FMul:
4261   case Instruction::FDiv:
4262   case Instruction::FRem:
4263   case Instruction::Shl:
4264   case Instruction::LShr:
4265   case Instruction::AShr:
4266   case Instruction::And:
4267   case Instruction::Or:
4268   case Instruction::Xor: {
4269     // Just widen unops and binops.
4270     setDebugLocFromInst(Builder, &I);
4271 
4272     for (unsigned Part = 0; Part < UF; ++Part) {
4273       SmallVector<Value *, 2> Ops;
4274       for (VPValue *VPOp : User.operands())
4275         Ops.push_back(State.get(VPOp, Part));
4276 
4277       Value *V = Builder.CreateNAryOp(I.getOpcode(), Ops);
4278 
4279       if (auto *VecOp = dyn_cast<Instruction>(V))
4280         VecOp->copyIRFlags(&I);
4281 
4282       // Use this vector value for all users of the original instruction.
4283       VectorLoopValueMap.setVectorValue(&I, Part, V);
4284       addMetadata(V, &I);
4285     }
4286 
4287     break;
4288   }
4289   case Instruction::ICmp:
4290   case Instruction::FCmp: {
4291     // Widen compares. Generate vector compares.
4292     bool FCmp = (I.getOpcode() == Instruction::FCmp);
4293     auto *Cmp = cast<CmpInst>(&I);
4294     setDebugLocFromInst(Builder, Cmp);
4295     for (unsigned Part = 0; Part < UF; ++Part) {
4296       Value *A = State.get(User.getOperand(0), Part);
4297       Value *B = State.get(User.getOperand(1), Part);
4298       Value *C = nullptr;
4299       if (FCmp) {
4300         // Propagate fast math flags.
4301         IRBuilder<>::FastMathFlagGuard FMFG(Builder);
4302         Builder.setFastMathFlags(Cmp->getFastMathFlags());
4303         C = Builder.CreateFCmp(Cmp->getPredicate(), A, B);
4304       } else {
4305         C = Builder.CreateICmp(Cmp->getPredicate(), A, B);
4306       }
4307       VectorLoopValueMap.setVectorValue(&I, Part, C);
4308       addMetadata(C, &I);
4309     }
4310 
4311     break;
4312   }
4313 
4314   case Instruction::ZExt:
4315   case Instruction::SExt:
4316   case Instruction::FPToUI:
4317   case Instruction::FPToSI:
4318   case Instruction::FPExt:
4319   case Instruction::PtrToInt:
4320   case Instruction::IntToPtr:
4321   case Instruction::SIToFP:
4322   case Instruction::UIToFP:
4323   case Instruction::Trunc:
4324   case Instruction::FPTrunc:
4325   case Instruction::BitCast: {
4326     auto *CI = cast<CastInst>(&I);
4327     setDebugLocFromInst(Builder, CI);
4328 
4329     /// Vectorize casts.
4330     Type *DestTy =
4331         (VF == 1) ? CI->getType() : VectorType::get(CI->getType(), VF);
4332 
4333     for (unsigned Part = 0; Part < UF; ++Part) {
4334       Value *A = State.get(User.getOperand(0), Part);
4335       Value *Cast = Builder.CreateCast(CI->getOpcode(), A, DestTy);
4336       VectorLoopValueMap.setVectorValue(&I, Part, Cast);
4337       addMetadata(Cast, &I);
4338     }
4339     break;
4340   }
4341   default:
4342     // This instruction is not vectorized by simple widening.
4343     LLVM_DEBUG(dbgs() << "LV: Found an unhandled instruction: " << I);
4344     llvm_unreachable("Unhandled instruction!");
4345   } // end of switch.
4346 }
4347 
4348 void InnerLoopVectorizer::widenCallInstruction(CallInst &I, VPUser &ArgOperands,
4349                                                VPTransformState &State) {
4350   assert(!isa<DbgInfoIntrinsic>(I) &&
4351          "DbgInfoIntrinsic should have been dropped during VPlan construction");
4352   setDebugLocFromInst(Builder, &I);
4353 
4354   Module *M = I.getParent()->getParent()->getParent();
4355   auto *CI = cast<CallInst>(&I);
4356 
4357   SmallVector<Type *, 4> Tys;
4358   for (Value *ArgOperand : CI->arg_operands())
4359     Tys.push_back(ToVectorTy(ArgOperand->getType(), VF));
4360 
4361   Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
4362 
4363   // The flag shows whether we use Intrinsic or a usual Call for vectorized
4364   // version of the instruction.
4365   // Is it beneficial to perform intrinsic call compared to lib call?
4366   bool NeedToScalarize = false;
4367   unsigned CallCost = Cost->getVectorCallCost(CI, VF, NeedToScalarize);
4368   bool UseVectorIntrinsic =
4369       ID && Cost->getVectorIntrinsicCost(CI, VF) <= CallCost;
4370   assert((UseVectorIntrinsic || !NeedToScalarize) &&
4371          "Instruction should be scalarized elsewhere.");
4372 
4373   for (unsigned Part = 0; Part < UF; ++Part) {
4374     SmallVector<Value *, 4> Args;
4375     for (auto &I : enumerate(ArgOperands.operands())) {
4376       // Some intrinsics have a scalar argument - don't replace it with a
4377       // vector.
4378       Value *Arg;
4379       if (!UseVectorIntrinsic || !hasVectorInstrinsicScalarOpd(ID, I.index()))
4380         Arg = State.get(I.value(), Part);
4381       else
4382         Arg = State.get(I.value(), {0, 0});
4383       Args.push_back(Arg);
4384     }
4385 
4386     Function *VectorF;
4387     if (UseVectorIntrinsic) {
4388       // Use vector version of the intrinsic.
4389       Type *TysForDecl[] = {CI->getType()};
4390       if (VF > 1)
4391         TysForDecl[0] = VectorType::get(CI->getType()->getScalarType(), VF);
4392       VectorF = Intrinsic::getDeclaration(M, ID, TysForDecl);
4393       assert(VectorF && "Can't retrieve vector intrinsic.");
4394     } else {
4395       // Use vector version of the function call.
4396       const VFShape Shape =
4397           VFShape::get(*CI, {VF, false} /*EC*/, false /*HasGlobalPred*/);
4398 #ifndef NDEBUG
4399       assert(VFDatabase(*CI).getVectorizedFunction(Shape) != nullptr &&
4400              "Can't create vector function.");
4401 #endif
4402         VectorF = VFDatabase(*CI).getVectorizedFunction(Shape);
4403     }
4404       SmallVector<OperandBundleDef, 1> OpBundles;
4405       CI->getOperandBundlesAsDefs(OpBundles);
4406       CallInst *V = Builder.CreateCall(VectorF, Args, OpBundles);
4407 
4408       if (isa<FPMathOperator>(V))
4409         V->copyFastMathFlags(CI);
4410 
4411       VectorLoopValueMap.setVectorValue(&I, Part, V);
4412       addMetadata(V, &I);
4413   }
4414 }
4415 
4416 void InnerLoopVectorizer::widenSelectInstruction(SelectInst &I,
4417                                                  bool InvariantCond) {
4418   setDebugLocFromInst(Builder, &I);
4419 
4420   // The condition can be loop invariant  but still defined inside the
4421   // loop. This means that we can't just use the original 'cond' value.
4422   // We have to take the 'vectorized' value and pick the first lane.
4423   // Instcombine will make this a no-op.
4424 
4425   auto *ScalarCond = getOrCreateScalarValue(I.getOperand(0), {0, 0});
4426 
4427   for (unsigned Part = 0; Part < UF; ++Part) {
4428     Value *Cond = getOrCreateVectorValue(I.getOperand(0), Part);
4429     Value *Op0 = getOrCreateVectorValue(I.getOperand(1), Part);
4430     Value *Op1 = getOrCreateVectorValue(I.getOperand(2), Part);
4431     Value *Sel =
4432         Builder.CreateSelect(InvariantCond ? ScalarCond : Cond, Op0, Op1);
4433     VectorLoopValueMap.setVectorValue(&I, Part, Sel);
4434     addMetadata(Sel, &I);
4435   }
4436 }
4437 
4438 void LoopVectorizationCostModel::collectLoopScalars(unsigned VF) {
4439   // We should not collect Scalars more than once per VF. Right now, this
4440   // function is called from collectUniformsAndScalars(), which already does
4441   // this check. Collecting Scalars for VF=1 does not make any sense.
4442   assert(VF >= 2 && Scalars.find(VF) == Scalars.end() &&
4443          "This function should not be visited twice for the same VF");
4444 
4445   SmallSetVector<Instruction *, 8> Worklist;
4446 
4447   // These sets are used to seed the analysis with pointers used by memory
4448   // accesses that will remain scalar.
4449   SmallSetVector<Instruction *, 8> ScalarPtrs;
4450   SmallPtrSet<Instruction *, 8> PossibleNonScalarPtrs;
4451 
4452   // A helper that returns true if the use of Ptr by MemAccess will be scalar.
4453   // The pointer operands of loads and stores will be scalar as long as the
4454   // memory access is not a gather or scatter operation. The value operand of a
4455   // store will remain scalar if the store is scalarized.
4456   auto isScalarUse = [&](Instruction *MemAccess, Value *Ptr) {
4457     InstWidening WideningDecision = getWideningDecision(MemAccess, VF);
4458     assert(WideningDecision != CM_Unknown &&
4459            "Widening decision should be ready at this moment");
4460     if (auto *Store = dyn_cast<StoreInst>(MemAccess))
4461       if (Ptr == Store->getValueOperand())
4462         return WideningDecision == CM_Scalarize;
4463     assert(Ptr == getLoadStorePointerOperand(MemAccess) &&
4464            "Ptr is neither a value or pointer operand");
4465     return WideningDecision != CM_GatherScatter;
4466   };
4467 
4468   // A helper that returns true if the given value is a bitcast or
4469   // getelementptr instruction contained in the loop.
4470   auto isLoopVaryingBitCastOrGEP = [&](Value *V) {
4471     return ((isa<BitCastInst>(V) && V->getType()->isPointerTy()) ||
4472             isa<GetElementPtrInst>(V)) &&
4473            !TheLoop->isLoopInvariant(V);
4474   };
4475 
4476   // A helper that evaluates a memory access's use of a pointer. If the use
4477   // will be a scalar use, and the pointer is only used by memory accesses, we
4478   // place the pointer in ScalarPtrs. Otherwise, the pointer is placed in
4479   // PossibleNonScalarPtrs.
4480   auto evaluatePtrUse = [&](Instruction *MemAccess, Value *Ptr) {
4481     // We only care about bitcast and getelementptr instructions contained in
4482     // the loop.
4483     if (!isLoopVaryingBitCastOrGEP(Ptr))
4484       return;
4485 
4486     // If the pointer has already been identified as scalar (e.g., if it was
4487     // also identified as uniform), there's nothing to do.
4488     auto *I = cast<Instruction>(Ptr);
4489     if (Worklist.count(I))
4490       return;
4491 
4492     // If the use of the pointer will be a scalar use, and all users of the
4493     // pointer are memory accesses, place the pointer in ScalarPtrs. Otherwise,
4494     // place the pointer in PossibleNonScalarPtrs.
4495     if (isScalarUse(MemAccess, Ptr) && llvm::all_of(I->users(), [&](User *U) {
4496           return isa<LoadInst>(U) || isa<StoreInst>(U);
4497         }))
4498       ScalarPtrs.insert(I);
4499     else
4500       PossibleNonScalarPtrs.insert(I);
4501   };
4502 
4503   // We seed the scalars analysis with three classes of instructions: (1)
4504   // instructions marked uniform-after-vectorization, (2) bitcast and
4505   // getelementptr instructions used by memory accesses requiring a scalar use,
4506   // and (3) pointer induction variables and their update instructions (we
4507   // currently only scalarize these).
4508   //
4509   // (1) Add to the worklist all instructions that have been identified as
4510   // uniform-after-vectorization.
4511   Worklist.insert(Uniforms[VF].begin(), Uniforms[VF].end());
4512 
4513   // (2) Add to the worklist all bitcast and getelementptr instructions used by
4514   // memory accesses requiring a scalar use. The pointer operands of loads and
4515   // stores will be scalar as long as the memory accesses is not a gather or
4516   // scatter operation. The value operand of a store will remain scalar if the
4517   // store is scalarized.
4518   for (auto *BB : TheLoop->blocks())
4519     for (auto &I : *BB) {
4520       if (auto *Load = dyn_cast<LoadInst>(&I)) {
4521         evaluatePtrUse(Load, Load->getPointerOperand());
4522       } else if (auto *Store = dyn_cast<StoreInst>(&I)) {
4523         evaluatePtrUse(Store, Store->getPointerOperand());
4524         evaluatePtrUse(Store, Store->getValueOperand());
4525       }
4526     }
4527   for (auto *I : ScalarPtrs)
4528     if (PossibleNonScalarPtrs.find(I) == PossibleNonScalarPtrs.end()) {
4529       LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *I << "\n");
4530       Worklist.insert(I);
4531     }
4532 
4533   // (3) Add to the worklist all pointer induction variables and their update
4534   // instructions.
4535   //
4536   // TODO: Once we are able to vectorize pointer induction variables we should
4537   //       no longer insert them into the worklist here.
4538   auto *Latch = TheLoop->getLoopLatch();
4539   for (auto &Induction : Legal->getInductionVars()) {
4540     auto *Ind = Induction.first;
4541     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
4542     if (Induction.second.getKind() != InductionDescriptor::IK_PtrInduction)
4543       continue;
4544     Worklist.insert(Ind);
4545     Worklist.insert(IndUpdate);
4546     LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n");
4547     LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate
4548                       << "\n");
4549   }
4550 
4551   // Insert the forced scalars.
4552   // FIXME: Currently widenPHIInstruction() often creates a dead vector
4553   // induction variable when the PHI user is scalarized.
4554   auto ForcedScalar = ForcedScalars.find(VF);
4555   if (ForcedScalar != ForcedScalars.end())
4556     for (auto *I : ForcedScalar->second)
4557       Worklist.insert(I);
4558 
4559   // Expand the worklist by looking through any bitcasts and getelementptr
4560   // instructions we've already identified as scalar. This is similar to the
4561   // expansion step in collectLoopUniforms(); however, here we're only
4562   // expanding to include additional bitcasts and getelementptr instructions.
4563   unsigned Idx = 0;
4564   while (Idx != Worklist.size()) {
4565     Instruction *Dst = Worklist[Idx++];
4566     if (!isLoopVaryingBitCastOrGEP(Dst->getOperand(0)))
4567       continue;
4568     auto *Src = cast<Instruction>(Dst->getOperand(0));
4569     if (llvm::all_of(Src->users(), [&](User *U) -> bool {
4570           auto *J = cast<Instruction>(U);
4571           return !TheLoop->contains(J) || Worklist.count(J) ||
4572                  ((isa<LoadInst>(J) || isa<StoreInst>(J)) &&
4573                   isScalarUse(J, Src));
4574         })) {
4575       Worklist.insert(Src);
4576       LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Src << "\n");
4577     }
4578   }
4579 
4580   // An induction variable will remain scalar if all users of the induction
4581   // variable and induction variable update remain scalar.
4582   for (auto &Induction : Legal->getInductionVars()) {
4583     auto *Ind = Induction.first;
4584     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
4585 
4586     // We already considered pointer induction variables, so there's no reason
4587     // to look at their users again.
4588     //
4589     // TODO: Once we are able to vectorize pointer induction variables we
4590     //       should no longer skip over them here.
4591     if (Induction.second.getKind() == InductionDescriptor::IK_PtrInduction)
4592       continue;
4593 
4594     // If tail-folding is applied, the primary induction variable will be used
4595     // to feed a vector compare.
4596     if (Ind == Legal->getPrimaryInduction() && foldTailByMasking())
4597       continue;
4598 
4599     // Determine if all users of the induction variable are scalar after
4600     // vectorization.
4601     auto ScalarInd = llvm::all_of(Ind->users(), [&](User *U) -> bool {
4602       auto *I = cast<Instruction>(U);
4603       return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I);
4604     });
4605     if (!ScalarInd)
4606       continue;
4607 
4608     // Determine if all users of the induction variable update instruction are
4609     // scalar after vectorization.
4610     auto ScalarIndUpdate =
4611         llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
4612           auto *I = cast<Instruction>(U);
4613           return I == Ind || !TheLoop->contains(I) || Worklist.count(I);
4614         });
4615     if (!ScalarIndUpdate)
4616       continue;
4617 
4618     // The induction variable and its update instruction will remain scalar.
4619     Worklist.insert(Ind);
4620     Worklist.insert(IndUpdate);
4621     LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n");
4622     LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate
4623                       << "\n");
4624   }
4625 
4626   Scalars[VF].insert(Worklist.begin(), Worklist.end());
4627 }
4628 
4629 bool LoopVectorizationCostModel::isScalarWithPredication(Instruction *I, unsigned VF) {
4630   if (!blockNeedsPredication(I->getParent()))
4631     return false;
4632   switch(I->getOpcode()) {
4633   default:
4634     break;
4635   case Instruction::Load:
4636   case Instruction::Store: {
4637     if (!Legal->isMaskRequired(I))
4638       return false;
4639     auto *Ptr = getLoadStorePointerOperand(I);
4640     auto *Ty = getMemInstValueType(I);
4641     // We have already decided how to vectorize this instruction, get that
4642     // result.
4643     if (VF > 1) {
4644       InstWidening WideningDecision = getWideningDecision(I, VF);
4645       assert(WideningDecision != CM_Unknown &&
4646              "Widening decision should be ready at this moment");
4647       return WideningDecision == CM_Scalarize;
4648     }
4649     const Align Alignment = getLoadStoreAlignment(I);
4650     return isa<LoadInst>(I) ? !(isLegalMaskedLoad(Ty, Ptr, Alignment) ||
4651                                 isLegalMaskedGather(Ty, Alignment))
4652                             : !(isLegalMaskedStore(Ty, Ptr, Alignment) ||
4653                                 isLegalMaskedScatter(Ty, Alignment));
4654   }
4655   case Instruction::UDiv:
4656   case Instruction::SDiv:
4657   case Instruction::SRem:
4658   case Instruction::URem:
4659     return mayDivideByZero(*I);
4660   }
4661   return false;
4662 }
4663 
4664 bool LoopVectorizationCostModel::interleavedAccessCanBeWidened(Instruction *I,
4665                                                                unsigned VF) {
4666   assert(isAccessInterleaved(I) && "Expecting interleaved access.");
4667   assert(getWideningDecision(I, VF) == CM_Unknown &&
4668          "Decision should not be set yet.");
4669   auto *Group = getInterleavedAccessGroup(I);
4670   assert(Group && "Must have a group.");
4671 
4672   // If the instruction's allocated size doesn't equal it's type size, it
4673   // requires padding and will be scalarized.
4674   auto &DL = I->getModule()->getDataLayout();
4675   auto *ScalarTy = getMemInstValueType(I);
4676   if (hasIrregularType(ScalarTy, DL, VF))
4677     return false;
4678 
4679   // Check if masking is required.
4680   // A Group may need masking for one of two reasons: it resides in a block that
4681   // needs predication, or it was decided to use masking to deal with gaps.
4682   bool PredicatedAccessRequiresMasking =
4683       Legal->blockNeedsPredication(I->getParent()) && Legal->isMaskRequired(I);
4684   bool AccessWithGapsRequiresMasking =
4685       Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed();
4686   if (!PredicatedAccessRequiresMasking && !AccessWithGapsRequiresMasking)
4687     return true;
4688 
4689   // If masked interleaving is required, we expect that the user/target had
4690   // enabled it, because otherwise it either wouldn't have been created or
4691   // it should have been invalidated by the CostModel.
4692   assert(useMaskedInterleavedAccesses(TTI) &&
4693          "Masked interleave-groups for predicated accesses are not enabled.");
4694 
4695   auto *Ty = getMemInstValueType(I);
4696   const Align Alignment = getLoadStoreAlignment(I);
4697   return isa<LoadInst>(I) ? TTI.isLegalMaskedLoad(Ty, Alignment)
4698                           : TTI.isLegalMaskedStore(Ty, Alignment);
4699 }
4700 
4701 bool LoopVectorizationCostModel::memoryInstructionCanBeWidened(Instruction *I,
4702                                                                unsigned VF) {
4703   // Get and ensure we have a valid memory instruction.
4704   LoadInst *LI = dyn_cast<LoadInst>(I);
4705   StoreInst *SI = dyn_cast<StoreInst>(I);
4706   assert((LI || SI) && "Invalid memory instruction");
4707 
4708   auto *Ptr = getLoadStorePointerOperand(I);
4709 
4710   // In order to be widened, the pointer should be consecutive, first of all.
4711   if (!Legal->isConsecutivePtr(Ptr))
4712     return false;
4713 
4714   // If the instruction is a store located in a predicated block, it will be
4715   // scalarized.
4716   if (isScalarWithPredication(I))
4717     return false;
4718 
4719   // If the instruction's allocated size doesn't equal it's type size, it
4720   // requires padding and will be scalarized.
4721   auto &DL = I->getModule()->getDataLayout();
4722   auto *ScalarTy = LI ? LI->getType() : SI->getValueOperand()->getType();
4723   if (hasIrregularType(ScalarTy, DL, VF))
4724     return false;
4725 
4726   return true;
4727 }
4728 
4729 void LoopVectorizationCostModel::collectLoopUniforms(unsigned VF) {
4730   // We should not collect Uniforms more than once per VF. Right now,
4731   // this function is called from collectUniformsAndScalars(), which
4732   // already does this check. Collecting Uniforms for VF=1 does not make any
4733   // sense.
4734 
4735   assert(VF >= 2 && Uniforms.find(VF) == Uniforms.end() &&
4736          "This function should not be visited twice for the same VF");
4737 
4738   // Visit the list of Uniforms. If we'll not find any uniform value, we'll
4739   // not analyze again.  Uniforms.count(VF) will return 1.
4740   Uniforms[VF].clear();
4741 
4742   // We now know that the loop is vectorizable!
4743   // Collect instructions inside the loop that will remain uniform after
4744   // vectorization.
4745 
4746   // Global values, params and instructions outside of current loop are out of
4747   // scope.
4748   auto isOutOfScope = [&](Value *V) -> bool {
4749     Instruction *I = dyn_cast<Instruction>(V);
4750     return (!I || !TheLoop->contains(I));
4751   };
4752 
4753   SetVector<Instruction *> Worklist;
4754   BasicBlock *Latch = TheLoop->getLoopLatch();
4755 
4756   // Instructions that are scalar with predication must not be considered
4757   // uniform after vectorization, because that would create an erroneous
4758   // replicating region where only a single instance out of VF should be formed.
4759   // TODO: optimize such seldom cases if found important, see PR40816.
4760   auto addToWorklistIfAllowed = [&](Instruction *I) -> void {
4761     if (isScalarWithPredication(I, VF)) {
4762       LLVM_DEBUG(dbgs() << "LV: Found not uniform being ScalarWithPredication: "
4763                         << *I << "\n");
4764       return;
4765     }
4766     LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *I << "\n");
4767     Worklist.insert(I);
4768   };
4769 
4770   // Start with the conditional branch. If the branch condition is an
4771   // instruction contained in the loop that is only used by the branch, it is
4772   // uniform.
4773   auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0));
4774   if (Cmp && TheLoop->contains(Cmp) && Cmp->hasOneUse())
4775     addToWorklistIfAllowed(Cmp);
4776 
4777   // Holds consecutive and consecutive-like pointers. Consecutive-like pointers
4778   // are pointers that are treated like consecutive pointers during
4779   // vectorization. The pointer operands of interleaved accesses are an
4780   // example.
4781   SmallSetVector<Instruction *, 8> ConsecutiveLikePtrs;
4782 
4783   // Holds pointer operands of instructions that are possibly non-uniform.
4784   SmallPtrSet<Instruction *, 8> PossibleNonUniformPtrs;
4785 
4786   auto isUniformDecision = [&](Instruction *I, unsigned VF) {
4787     InstWidening WideningDecision = getWideningDecision(I, VF);
4788     assert(WideningDecision != CM_Unknown &&
4789            "Widening decision should be ready at this moment");
4790 
4791     return (WideningDecision == CM_Widen ||
4792             WideningDecision == CM_Widen_Reverse ||
4793             WideningDecision == CM_Interleave);
4794   };
4795   // Iterate over the instructions in the loop, and collect all
4796   // consecutive-like pointer operands in ConsecutiveLikePtrs. If it's possible
4797   // that a consecutive-like pointer operand will be scalarized, we collect it
4798   // in PossibleNonUniformPtrs instead. We use two sets here because a single
4799   // getelementptr instruction can be used by both vectorized and scalarized
4800   // memory instructions. For example, if a loop loads and stores from the same
4801   // location, but the store is conditional, the store will be scalarized, and
4802   // the getelementptr won't remain uniform.
4803   for (auto *BB : TheLoop->blocks())
4804     for (auto &I : *BB) {
4805       // If there's no pointer operand, there's nothing to do.
4806       auto *Ptr = dyn_cast_or_null<Instruction>(getLoadStorePointerOperand(&I));
4807       if (!Ptr)
4808         continue;
4809 
4810       // True if all users of Ptr are memory accesses that have Ptr as their
4811       // pointer operand.
4812       auto UsersAreMemAccesses =
4813           llvm::all_of(Ptr->users(), [&](User *U) -> bool {
4814             return getLoadStorePointerOperand(U) == Ptr;
4815           });
4816 
4817       // Ensure the memory instruction will not be scalarized or used by
4818       // gather/scatter, making its pointer operand non-uniform. If the pointer
4819       // operand is used by any instruction other than a memory access, we
4820       // conservatively assume the pointer operand may be non-uniform.
4821       if (!UsersAreMemAccesses || !isUniformDecision(&I, VF))
4822         PossibleNonUniformPtrs.insert(Ptr);
4823 
4824       // If the memory instruction will be vectorized and its pointer operand
4825       // is consecutive-like, or interleaving - the pointer operand should
4826       // remain uniform.
4827       else
4828         ConsecutiveLikePtrs.insert(Ptr);
4829     }
4830 
4831   // Add to the Worklist all consecutive and consecutive-like pointers that
4832   // aren't also identified as possibly non-uniform.
4833   for (auto *V : ConsecutiveLikePtrs)
4834     if (PossibleNonUniformPtrs.find(V) == PossibleNonUniformPtrs.end())
4835       addToWorklistIfAllowed(V);
4836 
4837   // Expand Worklist in topological order: whenever a new instruction
4838   // is added , its users should be already inside Worklist.  It ensures
4839   // a uniform instruction will only be used by uniform instructions.
4840   unsigned idx = 0;
4841   while (idx != Worklist.size()) {
4842     Instruction *I = Worklist[idx++];
4843 
4844     for (auto OV : I->operand_values()) {
4845       // isOutOfScope operands cannot be uniform instructions.
4846       if (isOutOfScope(OV))
4847         continue;
4848       // First order recurrence Phi's should typically be considered
4849       // non-uniform.
4850       auto *OP = dyn_cast<PHINode>(OV);
4851       if (OP && Legal->isFirstOrderRecurrence(OP))
4852         continue;
4853       // If all the users of the operand are uniform, then add the
4854       // operand into the uniform worklist.
4855       auto *OI = cast<Instruction>(OV);
4856       if (llvm::all_of(OI->users(), [&](User *U) -> bool {
4857             auto *J = cast<Instruction>(U);
4858             return Worklist.count(J) ||
4859                    (OI == getLoadStorePointerOperand(J) &&
4860                     isUniformDecision(J, VF));
4861           }))
4862         addToWorklistIfAllowed(OI);
4863     }
4864   }
4865 
4866   // Returns true if Ptr is the pointer operand of a memory access instruction
4867   // I, and I is known to not require scalarization.
4868   auto isVectorizedMemAccessUse = [&](Instruction *I, Value *Ptr) -> bool {
4869     return getLoadStorePointerOperand(I) == Ptr && isUniformDecision(I, VF);
4870   };
4871 
4872   // For an instruction to be added into Worklist above, all its users inside
4873   // the loop should also be in Worklist. However, this condition cannot be
4874   // true for phi nodes that form a cyclic dependence. We must process phi
4875   // nodes separately. An induction variable will remain uniform if all users
4876   // of the induction variable and induction variable update remain uniform.
4877   // The code below handles both pointer and non-pointer induction variables.
4878   for (auto &Induction : Legal->getInductionVars()) {
4879     auto *Ind = Induction.first;
4880     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
4881 
4882     // Determine if all users of the induction variable are uniform after
4883     // vectorization.
4884     auto UniformInd = llvm::all_of(Ind->users(), [&](User *U) -> bool {
4885       auto *I = cast<Instruction>(U);
4886       return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) ||
4887              isVectorizedMemAccessUse(I, Ind);
4888     });
4889     if (!UniformInd)
4890       continue;
4891 
4892     // Determine if all users of the induction variable update instruction are
4893     // uniform after vectorization.
4894     auto UniformIndUpdate =
4895         llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
4896           auto *I = cast<Instruction>(U);
4897           return I == Ind || !TheLoop->contains(I) || Worklist.count(I) ||
4898                  isVectorizedMemAccessUse(I, IndUpdate);
4899         });
4900     if (!UniformIndUpdate)
4901       continue;
4902 
4903     // The induction variable and its update instruction will remain uniform.
4904     addToWorklistIfAllowed(Ind);
4905     addToWorklistIfAllowed(IndUpdate);
4906   }
4907 
4908   Uniforms[VF].insert(Worklist.begin(), Worklist.end());
4909 }
4910 
4911 bool LoopVectorizationCostModel::runtimeChecksRequired() {
4912   LLVM_DEBUG(dbgs() << "LV: Performing code size checks.\n");
4913 
4914   if (Legal->getRuntimePointerChecking()->Need) {
4915     reportVectorizationFailure("Runtime ptr check is required with -Os/-Oz",
4916         "runtime pointer checks needed. Enable vectorization of this "
4917         "loop with '#pragma clang loop vectorize(enable)' when "
4918         "compiling with -Os/-Oz",
4919         "CantVersionLoopWithOptForSize", ORE, TheLoop);
4920     return true;
4921   }
4922 
4923   if (!PSE.getUnionPredicate().getPredicates().empty()) {
4924     reportVectorizationFailure("Runtime SCEV check is required with -Os/-Oz",
4925         "runtime SCEV checks needed. Enable vectorization of this "
4926         "loop with '#pragma clang loop vectorize(enable)' when "
4927         "compiling with -Os/-Oz",
4928         "CantVersionLoopWithOptForSize", ORE, TheLoop);
4929     return true;
4930   }
4931 
4932   // FIXME: Avoid specializing for stride==1 instead of bailing out.
4933   if (!Legal->getLAI()->getSymbolicStrides().empty()) {
4934     reportVectorizationFailure("Runtime stride check is required with -Os/-Oz",
4935         "runtime stride == 1 checks needed. Enable vectorization of "
4936         "this loop with '#pragma clang loop vectorize(enable)' when "
4937         "compiling with -Os/-Oz",
4938         "CantVersionLoopWithOptForSize", ORE, TheLoop);
4939     return true;
4940   }
4941 
4942   return false;
4943 }
4944 
4945 Optional<unsigned> LoopVectorizationCostModel::computeMaxVF() {
4946   if (Legal->getRuntimePointerChecking()->Need && TTI.hasBranchDivergence()) {
4947     // TODO: It may by useful to do since it's still likely to be dynamically
4948     // uniform if the target can skip.
4949     reportVectorizationFailure(
4950         "Not inserting runtime ptr check for divergent target",
4951         "runtime pointer checks needed. Not enabled for divergent target",
4952         "CantVersionLoopWithDivergentTarget", ORE, TheLoop);
4953     return None;
4954   }
4955 
4956   unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop);
4957   LLVM_DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n');
4958   if (TC == 1) {
4959     reportVectorizationFailure("Single iteration (non) loop",
4960         "loop trip count is one, irrelevant for vectorization",
4961         "SingleIterationLoop", ORE, TheLoop);
4962     return None;
4963   }
4964 
4965   switch (ScalarEpilogueStatus) {
4966   case CM_ScalarEpilogueAllowed:
4967     return computeFeasibleMaxVF(TC);
4968   case CM_ScalarEpilogueNotNeededUsePredicate:
4969     LLVM_DEBUG(
4970         dbgs() << "LV: vector predicate hint/switch found.\n"
4971                << "LV: Not allowing scalar epilogue, creating predicated "
4972                << "vector loop.\n");
4973     break;
4974   case CM_ScalarEpilogueNotAllowedLowTripLoop:
4975     // fallthrough as a special case of OptForSize
4976   case CM_ScalarEpilogueNotAllowedOptSize:
4977     if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedOptSize)
4978       LLVM_DEBUG(
4979           dbgs() << "LV: Not allowing scalar epilogue due to -Os/-Oz.\n");
4980     else
4981       LLVM_DEBUG(dbgs() << "LV: Not allowing scalar epilogue due to low trip "
4982                         << "count.\n");
4983 
4984     // Bail if runtime checks are required, which are not good when optimising
4985     // for size.
4986     if (runtimeChecksRequired())
4987       return None;
4988     break;
4989   }
4990 
4991   // Now try the tail folding
4992 
4993   // Invalidate interleave groups that require an epilogue if we can't mask
4994   // the interleave-group.
4995   if (!useMaskedInterleavedAccesses(TTI)) {
4996     assert(WideningDecisions.empty() && Uniforms.empty() && Scalars.empty() &&
4997            "No decisions should have been taken at this point");
4998     // Note: There is no need to invalidate any cost modeling decisions here, as
4999     // non where taken so far.
5000     InterleaveInfo.invalidateGroupsRequiringScalarEpilogue();
5001   }
5002 
5003   unsigned MaxVF = computeFeasibleMaxVF(TC);
5004   if (TC > 0 && TC % MaxVF == 0) {
5005     // Accept MaxVF if we do not have a tail.
5006     LLVM_DEBUG(dbgs() << "LV: No tail will remain for any chosen VF.\n");
5007     return MaxVF;
5008   }
5009 
5010   // If we don't know the precise trip count, or if the trip count that we
5011   // found modulo the vectorization factor is not zero, try to fold the tail
5012   // by masking.
5013   // FIXME: look for a smaller MaxVF that does divide TC rather than masking.
5014   if (Legal->prepareToFoldTailByMasking()) {
5015     FoldTailByMasking = true;
5016     return MaxVF;
5017   }
5018 
5019   if (TC == 0) {
5020     reportVectorizationFailure(
5021         "Unable to calculate the loop count due to complex control flow",
5022         "unable to calculate the loop count due to complex control flow",
5023         "UnknownLoopCountComplexCFG", ORE, TheLoop);
5024     return None;
5025   }
5026 
5027   reportVectorizationFailure(
5028       "Cannot optimize for size and vectorize at the same time.",
5029       "cannot optimize for size and vectorize at the same time. "
5030       "Enable vectorization of this loop with '#pragma clang loop "
5031       "vectorize(enable)' when compiling with -Os/-Oz",
5032       "NoTailLoopWithOptForSize", ORE, TheLoop);
5033   return None;
5034 }
5035 
5036 unsigned
5037 LoopVectorizationCostModel::computeFeasibleMaxVF(unsigned ConstTripCount) {
5038   MinBWs = computeMinimumValueSizes(TheLoop->getBlocks(), *DB, &TTI);
5039   unsigned SmallestType, WidestType;
5040   std::tie(SmallestType, WidestType) = getSmallestAndWidestTypes();
5041   unsigned WidestRegister = TTI.getRegisterBitWidth(true);
5042 
5043   // Get the maximum safe dependence distance in bits computed by LAA.
5044   // It is computed by MaxVF * sizeOf(type) * 8, where type is taken from
5045   // the memory accesses that is most restrictive (involved in the smallest
5046   // dependence distance).
5047   unsigned MaxSafeRegisterWidth = Legal->getMaxSafeRegisterWidth();
5048 
5049   WidestRegister = std::min(WidestRegister, MaxSafeRegisterWidth);
5050 
5051   unsigned MaxVectorSize = WidestRegister / WidestType;
5052 
5053   LLVM_DEBUG(dbgs() << "LV: The Smallest and Widest types: " << SmallestType
5054                     << " / " << WidestType << " bits.\n");
5055   LLVM_DEBUG(dbgs() << "LV: The Widest register safe to use is: "
5056                     << WidestRegister << " bits.\n");
5057 
5058   assert(MaxVectorSize <= 256 && "Did not expect to pack so many elements"
5059                                  " into one vector!");
5060   if (MaxVectorSize == 0) {
5061     LLVM_DEBUG(dbgs() << "LV: The target has no vector registers.\n");
5062     MaxVectorSize = 1;
5063     return MaxVectorSize;
5064   } else if (ConstTripCount && ConstTripCount < MaxVectorSize &&
5065              isPowerOf2_32(ConstTripCount)) {
5066     // We need to clamp the VF to be the ConstTripCount. There is no point in
5067     // choosing a higher viable VF as done in the loop below.
5068     LLVM_DEBUG(dbgs() << "LV: Clamping the MaxVF to the constant trip count: "
5069                       << ConstTripCount << "\n");
5070     MaxVectorSize = ConstTripCount;
5071     return MaxVectorSize;
5072   }
5073 
5074   unsigned MaxVF = MaxVectorSize;
5075   if (TTI.shouldMaximizeVectorBandwidth(!isScalarEpilogueAllowed()) ||
5076       (MaximizeBandwidth && isScalarEpilogueAllowed())) {
5077     // Collect all viable vectorization factors larger than the default MaxVF
5078     // (i.e. MaxVectorSize).
5079     SmallVector<unsigned, 8> VFs;
5080     unsigned NewMaxVectorSize = WidestRegister / SmallestType;
5081     for (unsigned VS = MaxVectorSize * 2; VS <= NewMaxVectorSize; VS *= 2)
5082       VFs.push_back(VS);
5083 
5084     // For each VF calculate its register usage.
5085     auto RUs = calculateRegisterUsage(VFs);
5086 
5087     // Select the largest VF which doesn't require more registers than existing
5088     // ones.
5089     for (int i = RUs.size() - 1; i >= 0; --i) {
5090       bool Selected = true;
5091       for (auto& pair : RUs[i].MaxLocalUsers) {
5092         unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first);
5093         if (pair.second > TargetNumRegisters)
5094           Selected = false;
5095       }
5096       if (Selected) {
5097         MaxVF = VFs[i];
5098         break;
5099       }
5100     }
5101     if (unsigned MinVF = TTI.getMinimumVF(SmallestType)) {
5102       if (MaxVF < MinVF) {
5103         LLVM_DEBUG(dbgs() << "LV: Overriding calculated MaxVF(" << MaxVF
5104                           << ") with target's minimum: " << MinVF << '\n');
5105         MaxVF = MinVF;
5106       }
5107     }
5108   }
5109   return MaxVF;
5110 }
5111 
5112 VectorizationFactor
5113 LoopVectorizationCostModel::selectVectorizationFactor(unsigned MaxVF) {
5114   float Cost = expectedCost(1).first;
5115   const float ScalarCost = Cost;
5116   unsigned Width = 1;
5117   LLVM_DEBUG(dbgs() << "LV: Scalar loop costs: " << (int)ScalarCost << ".\n");
5118 
5119   bool ForceVectorization = Hints->getForce() == LoopVectorizeHints::FK_Enabled;
5120   if (ForceVectorization && MaxVF > 1) {
5121     // Ignore scalar width, because the user explicitly wants vectorization.
5122     // Initialize cost to max so that VF = 2 is, at least, chosen during cost
5123     // evaluation.
5124     Cost = std::numeric_limits<float>::max();
5125   }
5126 
5127   for (unsigned i = 2; i <= MaxVF; i *= 2) {
5128     // Notice that the vector loop needs to be executed less times, so
5129     // we need to divide the cost of the vector loops by the width of
5130     // the vector elements.
5131     VectorizationCostTy C = expectedCost(i);
5132     float VectorCost = C.first / (float)i;
5133     LLVM_DEBUG(dbgs() << "LV: Vector loop of width " << i
5134                       << " costs: " << (int)VectorCost << ".\n");
5135     if (!C.second && !ForceVectorization) {
5136       LLVM_DEBUG(
5137           dbgs() << "LV: Not considering vector loop of width " << i
5138                  << " because it will not generate any vector instructions.\n");
5139       continue;
5140     }
5141     if (VectorCost < Cost) {
5142       Cost = VectorCost;
5143       Width = i;
5144     }
5145   }
5146 
5147   if (!EnableCondStoresVectorization && NumPredStores) {
5148     reportVectorizationFailure("There are conditional stores.",
5149         "store that is conditionally executed prevents vectorization",
5150         "ConditionalStore", ORE, TheLoop);
5151     Width = 1;
5152     Cost = ScalarCost;
5153   }
5154 
5155   LLVM_DEBUG(if (ForceVectorization && Width > 1 && Cost >= ScalarCost) dbgs()
5156              << "LV: Vectorization seems to be not beneficial, "
5157              << "but was forced by a user.\n");
5158   LLVM_DEBUG(dbgs() << "LV: Selecting VF: " << Width << ".\n");
5159   VectorizationFactor Factor = {Width, (unsigned)(Width * Cost)};
5160   return Factor;
5161 }
5162 
5163 std::pair<unsigned, unsigned>
5164 LoopVectorizationCostModel::getSmallestAndWidestTypes() {
5165   unsigned MinWidth = -1U;
5166   unsigned MaxWidth = 8;
5167   const DataLayout &DL = TheFunction->getParent()->getDataLayout();
5168 
5169   // For each block.
5170   for (BasicBlock *BB : TheLoop->blocks()) {
5171     // For each instruction in the loop.
5172     for (Instruction &I : BB->instructionsWithoutDebug()) {
5173       Type *T = I.getType();
5174 
5175       // Skip ignored values.
5176       if (ValuesToIgnore.find(&I) != ValuesToIgnore.end())
5177         continue;
5178 
5179       // Only examine Loads, Stores and PHINodes.
5180       if (!isa<LoadInst>(I) && !isa<StoreInst>(I) && !isa<PHINode>(I))
5181         continue;
5182 
5183       // Examine PHI nodes that are reduction variables. Update the type to
5184       // account for the recurrence type.
5185       if (auto *PN = dyn_cast<PHINode>(&I)) {
5186         if (!Legal->isReductionVariable(PN))
5187           continue;
5188         RecurrenceDescriptor RdxDesc = Legal->getReductionVars()[PN];
5189         T = RdxDesc.getRecurrenceType();
5190       }
5191 
5192       // Examine the stored values.
5193       if (auto *ST = dyn_cast<StoreInst>(&I))
5194         T = ST->getValueOperand()->getType();
5195 
5196       // Ignore loaded pointer types and stored pointer types that are not
5197       // vectorizable.
5198       //
5199       // FIXME: The check here attempts to predict whether a load or store will
5200       //        be vectorized. We only know this for certain after a VF has
5201       //        been selected. Here, we assume that if an access can be
5202       //        vectorized, it will be. We should also look at extending this
5203       //        optimization to non-pointer types.
5204       //
5205       if (T->isPointerTy() && !isConsecutiveLoadOrStore(&I) &&
5206           !isAccessInterleaved(&I) && !isLegalGatherOrScatter(&I))
5207         continue;
5208 
5209       MinWidth = std::min(MinWidth,
5210                           (unsigned)DL.getTypeSizeInBits(T->getScalarType()));
5211       MaxWidth = std::max(MaxWidth,
5212                           (unsigned)DL.getTypeSizeInBits(T->getScalarType()));
5213     }
5214   }
5215 
5216   return {MinWidth, MaxWidth};
5217 }
5218 
5219 unsigned LoopVectorizationCostModel::selectInterleaveCount(unsigned VF,
5220                                                            unsigned LoopCost) {
5221   // -- The interleave heuristics --
5222   // We interleave the loop in order to expose ILP and reduce the loop overhead.
5223   // There are many micro-architectural considerations that we can't predict
5224   // at this level. For example, frontend pressure (on decode or fetch) due to
5225   // code size, or the number and capabilities of the execution ports.
5226   //
5227   // We use the following heuristics to select the interleave count:
5228   // 1. If the code has reductions, then we interleave to break the cross
5229   // iteration dependency.
5230   // 2. If the loop is really small, then we interleave to reduce the loop
5231   // overhead.
5232   // 3. We don't interleave if we think that we will spill registers to memory
5233   // due to the increased register pressure.
5234 
5235   if (!isScalarEpilogueAllowed())
5236     return 1;
5237 
5238   // We used the distance for the interleave count.
5239   if (Legal->getMaxSafeDepDistBytes() != -1U)
5240     return 1;
5241 
5242   // Do not interleave loops with a relatively small known or estimated trip
5243   // count.
5244   auto BestKnownTC = getSmallBestKnownTC(*PSE.getSE(), TheLoop);
5245   if (BestKnownTC && *BestKnownTC < TinyTripCountInterleaveThreshold)
5246     return 1;
5247 
5248   RegisterUsage R = calculateRegisterUsage({VF})[0];
5249   // We divide by these constants so assume that we have at least one
5250   // instruction that uses at least one register.
5251   for (auto& pair : R.MaxLocalUsers) {
5252     pair.second = std::max(pair.second, 1U);
5253   }
5254 
5255   // We calculate the interleave count using the following formula.
5256   // Subtract the number of loop invariants from the number of available
5257   // registers. These registers are used by all of the interleaved instances.
5258   // Next, divide the remaining registers by the number of registers that is
5259   // required by the loop, in order to estimate how many parallel instances
5260   // fit without causing spills. All of this is rounded down if necessary to be
5261   // a power of two. We want power of two interleave count to simplify any
5262   // addressing operations or alignment considerations.
5263   // We also want power of two interleave counts to ensure that the induction
5264   // variable of the vector loop wraps to zero, when tail is folded by masking;
5265   // this currently happens when OptForSize, in which case IC is set to 1 above.
5266   unsigned IC = UINT_MAX;
5267 
5268   for (auto& pair : R.MaxLocalUsers) {
5269     unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first);
5270     LLVM_DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters
5271                       << " registers of "
5272                       << TTI.getRegisterClassName(pair.first) << " register class\n");
5273     if (VF == 1) {
5274       if (ForceTargetNumScalarRegs.getNumOccurrences() > 0)
5275         TargetNumRegisters = ForceTargetNumScalarRegs;
5276     } else {
5277       if (ForceTargetNumVectorRegs.getNumOccurrences() > 0)
5278         TargetNumRegisters = ForceTargetNumVectorRegs;
5279     }
5280     unsigned MaxLocalUsers = pair.second;
5281     unsigned LoopInvariantRegs = 0;
5282     if (R.LoopInvariantRegs.find(pair.first) != R.LoopInvariantRegs.end())
5283       LoopInvariantRegs = R.LoopInvariantRegs[pair.first];
5284 
5285     unsigned TmpIC = PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs) / MaxLocalUsers);
5286     // Don't count the induction variable as interleaved.
5287     if (EnableIndVarRegisterHeur) {
5288       TmpIC =
5289           PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs - 1) /
5290                         std::max(1U, (MaxLocalUsers - 1)));
5291     }
5292 
5293     IC = std::min(IC, TmpIC);
5294   }
5295 
5296   // Clamp the interleave ranges to reasonable counts.
5297   unsigned MaxInterleaveCount = TTI.getMaxInterleaveFactor(VF);
5298 
5299   // Check if the user has overridden the max.
5300   if (VF == 1) {
5301     if (ForceTargetMaxScalarInterleaveFactor.getNumOccurrences() > 0)
5302       MaxInterleaveCount = ForceTargetMaxScalarInterleaveFactor;
5303   } else {
5304     if (ForceTargetMaxVectorInterleaveFactor.getNumOccurrences() > 0)
5305       MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor;
5306   }
5307 
5308   // If trip count is known or estimated compile time constant, limit the
5309   // interleave count to be less than the trip count divided by VF.
5310   if (BestKnownTC) {
5311     MaxInterleaveCount = std::min(*BestKnownTC / VF, MaxInterleaveCount);
5312   }
5313 
5314   // If we did not calculate the cost for VF (because the user selected the VF)
5315   // then we calculate the cost of VF here.
5316   if (LoopCost == 0)
5317     LoopCost = expectedCost(VF).first;
5318 
5319   assert(LoopCost && "Non-zero loop cost expected");
5320 
5321   // Clamp the calculated IC to be between the 1 and the max interleave count
5322   // that the target and trip count allows.
5323   if (IC > MaxInterleaveCount)
5324     IC = MaxInterleaveCount;
5325   else if (IC < 1)
5326     IC = 1;
5327 
5328   // Interleave if we vectorized this loop and there is a reduction that could
5329   // benefit from interleaving.
5330   if (VF > 1 && !Legal->getReductionVars().empty()) {
5331     LLVM_DEBUG(dbgs() << "LV: Interleaving because of reductions.\n");
5332     return IC;
5333   }
5334 
5335   // Note that if we've already vectorized the loop we will have done the
5336   // runtime check and so interleaving won't require further checks.
5337   bool InterleavingRequiresRuntimePointerCheck =
5338       (VF == 1 && Legal->getRuntimePointerChecking()->Need);
5339 
5340   // We want to interleave small loops in order to reduce the loop overhead and
5341   // potentially expose ILP opportunities.
5342   LLVM_DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n');
5343   if (!InterleavingRequiresRuntimePointerCheck && LoopCost < SmallLoopCost) {
5344     // We assume that the cost overhead is 1 and we use the cost model
5345     // to estimate the cost of the loop and interleave until the cost of the
5346     // loop overhead is about 5% of the cost of the loop.
5347     unsigned SmallIC =
5348         std::min(IC, (unsigned)PowerOf2Floor(SmallLoopCost / LoopCost));
5349 
5350     // Interleave until store/load ports (estimated by max interleave count) are
5351     // saturated.
5352     unsigned NumStores = Legal->getNumStores();
5353     unsigned NumLoads = Legal->getNumLoads();
5354     unsigned StoresIC = IC / (NumStores ? NumStores : 1);
5355     unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1);
5356 
5357     // If we have a scalar reduction (vector reductions are already dealt with
5358     // by this point), we can increase the critical path length if the loop
5359     // we're interleaving is inside another loop. Limit, by default to 2, so the
5360     // critical path only gets increased by one reduction operation.
5361     if (!Legal->getReductionVars().empty() && TheLoop->getLoopDepth() > 1) {
5362       unsigned F = static_cast<unsigned>(MaxNestedScalarReductionIC);
5363       SmallIC = std::min(SmallIC, F);
5364       StoresIC = std::min(StoresIC, F);
5365       LoadsIC = std::min(LoadsIC, F);
5366     }
5367 
5368     if (EnableLoadStoreRuntimeInterleave &&
5369         std::max(StoresIC, LoadsIC) > SmallIC) {
5370       LLVM_DEBUG(
5371           dbgs() << "LV: Interleaving to saturate store or load ports.\n");
5372       return std::max(StoresIC, LoadsIC);
5373     }
5374 
5375     LLVM_DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n");
5376     return SmallIC;
5377   }
5378 
5379   // Interleave if this is a large loop (small loops are already dealt with by
5380   // this point) that could benefit from interleaving.
5381   bool HasReductions = !Legal->getReductionVars().empty();
5382   if (TTI.enableAggressiveInterleaving(HasReductions)) {
5383     LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n");
5384     return IC;
5385   }
5386 
5387   LLVM_DEBUG(dbgs() << "LV: Not Interleaving.\n");
5388   return 1;
5389 }
5390 
5391 SmallVector<LoopVectorizationCostModel::RegisterUsage, 8>
5392 LoopVectorizationCostModel::calculateRegisterUsage(ArrayRef<unsigned> VFs) {
5393   // This function calculates the register usage by measuring the highest number
5394   // of values that are alive at a single location. Obviously, this is a very
5395   // rough estimation. We scan the loop in a topological order in order and
5396   // assign a number to each instruction. We use RPO to ensure that defs are
5397   // met before their users. We assume that each instruction that has in-loop
5398   // users starts an interval. We record every time that an in-loop value is
5399   // used, so we have a list of the first and last occurrences of each
5400   // instruction. Next, we transpose this data structure into a multi map that
5401   // holds the list of intervals that *end* at a specific location. This multi
5402   // map allows us to perform a linear search. We scan the instructions linearly
5403   // and record each time that a new interval starts, by placing it in a set.
5404   // If we find this value in the multi-map then we remove it from the set.
5405   // The max register usage is the maximum size of the set.
5406   // We also search for instructions that are defined outside the loop, but are
5407   // used inside the loop. We need this number separately from the max-interval
5408   // usage number because when we unroll, loop-invariant values do not take
5409   // more register.
5410   LoopBlocksDFS DFS(TheLoop);
5411   DFS.perform(LI);
5412 
5413   RegisterUsage RU;
5414 
5415   // Each 'key' in the map opens a new interval. The values
5416   // of the map are the index of the 'last seen' usage of the
5417   // instruction that is the key.
5418   using IntervalMap = DenseMap<Instruction *, unsigned>;
5419 
5420   // Maps instruction to its index.
5421   SmallVector<Instruction *, 64> IdxToInstr;
5422   // Marks the end of each interval.
5423   IntervalMap EndPoint;
5424   // Saves the list of instruction indices that are used in the loop.
5425   SmallPtrSet<Instruction *, 8> Ends;
5426   // Saves the list of values that are used in the loop but are
5427   // defined outside the loop, such as arguments and constants.
5428   SmallPtrSet<Value *, 8> LoopInvariants;
5429 
5430   for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) {
5431     for (Instruction &I : BB->instructionsWithoutDebug()) {
5432       IdxToInstr.push_back(&I);
5433 
5434       // Save the end location of each USE.
5435       for (Value *U : I.operands()) {
5436         auto *Instr = dyn_cast<Instruction>(U);
5437 
5438         // Ignore non-instruction values such as arguments, constants, etc.
5439         if (!Instr)
5440           continue;
5441 
5442         // If this instruction is outside the loop then record it and continue.
5443         if (!TheLoop->contains(Instr)) {
5444           LoopInvariants.insert(Instr);
5445           continue;
5446         }
5447 
5448         // Overwrite previous end points.
5449         EndPoint[Instr] = IdxToInstr.size();
5450         Ends.insert(Instr);
5451       }
5452     }
5453   }
5454 
5455   // Saves the list of intervals that end with the index in 'key'.
5456   using InstrList = SmallVector<Instruction *, 2>;
5457   DenseMap<unsigned, InstrList> TransposeEnds;
5458 
5459   // Transpose the EndPoints to a list of values that end at each index.
5460   for (auto &Interval : EndPoint)
5461     TransposeEnds[Interval.second].push_back(Interval.first);
5462 
5463   SmallPtrSet<Instruction *, 8> OpenIntervals;
5464 
5465   // Get the size of the widest register.
5466   unsigned MaxSafeDepDist = -1U;
5467   if (Legal->getMaxSafeDepDistBytes() != -1U)
5468     MaxSafeDepDist = Legal->getMaxSafeDepDistBytes() * 8;
5469   unsigned WidestRegister =
5470       std::min(TTI.getRegisterBitWidth(true), MaxSafeDepDist);
5471   const DataLayout &DL = TheFunction->getParent()->getDataLayout();
5472 
5473   SmallVector<RegisterUsage, 8> RUs(VFs.size());
5474   SmallVector<SmallMapVector<unsigned, unsigned, 4>, 8> MaxUsages(VFs.size());
5475 
5476   LLVM_DEBUG(dbgs() << "LV(REG): Calculating max register usage:\n");
5477 
5478   // A lambda that gets the register usage for the given type and VF.
5479   auto GetRegUsage = [&DL, WidestRegister](Type *Ty, unsigned VF) {
5480     if (Ty->isTokenTy())
5481       return 0U;
5482     unsigned TypeSize = DL.getTypeSizeInBits(Ty->getScalarType());
5483     return std::max<unsigned>(1, VF * TypeSize / WidestRegister);
5484   };
5485 
5486   for (unsigned int i = 0, s = IdxToInstr.size(); i < s; ++i) {
5487     Instruction *I = IdxToInstr[i];
5488 
5489     // Remove all of the instructions that end at this location.
5490     InstrList &List = TransposeEnds[i];
5491     for (Instruction *ToRemove : List)
5492       OpenIntervals.erase(ToRemove);
5493 
5494     // Ignore instructions that are never used within the loop.
5495     if (Ends.find(I) == Ends.end())
5496       continue;
5497 
5498     // Skip ignored values.
5499     if (ValuesToIgnore.find(I) != ValuesToIgnore.end())
5500       continue;
5501 
5502     // For each VF find the maximum usage of registers.
5503     for (unsigned j = 0, e = VFs.size(); j < e; ++j) {
5504       // Count the number of live intervals.
5505       SmallMapVector<unsigned, unsigned, 4> RegUsage;
5506 
5507       if (VFs[j] == 1) {
5508         for (auto Inst : OpenIntervals) {
5509           unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType());
5510           if (RegUsage.find(ClassID) == RegUsage.end())
5511             RegUsage[ClassID] = 1;
5512           else
5513             RegUsage[ClassID] += 1;
5514         }
5515       } else {
5516         collectUniformsAndScalars(VFs[j]);
5517         for (auto Inst : OpenIntervals) {
5518           // Skip ignored values for VF > 1.
5519           if (VecValuesToIgnore.find(Inst) != VecValuesToIgnore.end())
5520             continue;
5521           if (isScalarAfterVectorization(Inst, VFs[j])) {
5522             unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType());
5523             if (RegUsage.find(ClassID) == RegUsage.end())
5524               RegUsage[ClassID] = 1;
5525             else
5526               RegUsage[ClassID] += 1;
5527           } else {
5528             unsigned ClassID = TTI.getRegisterClassForType(true, Inst->getType());
5529             if (RegUsage.find(ClassID) == RegUsage.end())
5530               RegUsage[ClassID] = GetRegUsage(Inst->getType(), VFs[j]);
5531             else
5532               RegUsage[ClassID] += GetRegUsage(Inst->getType(), VFs[j]);
5533           }
5534         }
5535       }
5536 
5537       for (auto& pair : RegUsage) {
5538         if (MaxUsages[j].find(pair.first) != MaxUsages[j].end())
5539           MaxUsages[j][pair.first] = std::max(MaxUsages[j][pair.first], pair.second);
5540         else
5541           MaxUsages[j][pair.first] = pair.second;
5542       }
5543     }
5544 
5545     LLVM_DEBUG(dbgs() << "LV(REG): At #" << i << " Interval # "
5546                       << OpenIntervals.size() << '\n');
5547 
5548     // Add the current instruction to the list of open intervals.
5549     OpenIntervals.insert(I);
5550   }
5551 
5552   for (unsigned i = 0, e = VFs.size(); i < e; ++i) {
5553     SmallMapVector<unsigned, unsigned, 4> Invariant;
5554 
5555     for (auto Inst : LoopInvariants) {
5556       unsigned Usage = VFs[i] == 1 ? 1 : GetRegUsage(Inst->getType(), VFs[i]);
5557       unsigned ClassID = TTI.getRegisterClassForType(VFs[i] > 1, Inst->getType());
5558       if (Invariant.find(ClassID) == Invariant.end())
5559         Invariant[ClassID] = Usage;
5560       else
5561         Invariant[ClassID] += Usage;
5562     }
5563 
5564     LLVM_DEBUG({
5565       dbgs() << "LV(REG): VF = " << VFs[i] << '\n';
5566       dbgs() << "LV(REG): Found max usage: " << MaxUsages[i].size()
5567              << " item\n";
5568       for (const auto &pair : MaxUsages[i]) {
5569         dbgs() << "LV(REG): RegisterClass: "
5570                << TTI.getRegisterClassName(pair.first) << ", " << pair.second
5571                << " registers\n";
5572       }
5573       dbgs() << "LV(REG): Found invariant usage: " << Invariant.size()
5574              << " item\n";
5575       for (const auto &pair : Invariant) {
5576         dbgs() << "LV(REG): RegisterClass: "
5577                << TTI.getRegisterClassName(pair.first) << ", " << pair.second
5578                << " registers\n";
5579       }
5580     });
5581 
5582     RU.LoopInvariantRegs = Invariant;
5583     RU.MaxLocalUsers = MaxUsages[i];
5584     RUs[i] = RU;
5585   }
5586 
5587   return RUs;
5588 }
5589 
5590 bool LoopVectorizationCostModel::useEmulatedMaskMemRefHack(Instruction *I){
5591   // TODO: Cost model for emulated masked load/store is completely
5592   // broken. This hack guides the cost model to use an artificially
5593   // high enough value to practically disable vectorization with such
5594   // operations, except where previously deployed legality hack allowed
5595   // using very low cost values. This is to avoid regressions coming simply
5596   // from moving "masked load/store" check from legality to cost model.
5597   // Masked Load/Gather emulation was previously never allowed.
5598   // Limited number of Masked Store/Scatter emulation was allowed.
5599   assert(isPredicatedInst(I) && "Expecting a scalar emulated instruction");
5600   return isa<LoadInst>(I) ||
5601          (isa<StoreInst>(I) &&
5602           NumPredStores > NumberOfStoresToPredicate);
5603 }
5604 
5605 void LoopVectorizationCostModel::collectInstsToScalarize(unsigned VF) {
5606   // If we aren't vectorizing the loop, or if we've already collected the
5607   // instructions to scalarize, there's nothing to do. Collection may already
5608   // have occurred if we have a user-selected VF and are now computing the
5609   // expected cost for interleaving.
5610   if (VF < 2 || InstsToScalarize.find(VF) != InstsToScalarize.end())
5611     return;
5612 
5613   // Initialize a mapping for VF in InstsToScalalarize. If we find that it's
5614   // not profitable to scalarize any instructions, the presence of VF in the
5615   // map will indicate that we've analyzed it already.
5616   ScalarCostsTy &ScalarCostsVF = InstsToScalarize[VF];
5617 
5618   // Find all the instructions that are scalar with predication in the loop and
5619   // determine if it would be better to not if-convert the blocks they are in.
5620   // If so, we also record the instructions to scalarize.
5621   for (BasicBlock *BB : TheLoop->blocks()) {
5622     if (!blockNeedsPredication(BB))
5623       continue;
5624     for (Instruction &I : *BB)
5625       if (isScalarWithPredication(&I)) {
5626         ScalarCostsTy ScalarCosts;
5627         // Do not apply discount logic if hacked cost is needed
5628         // for emulated masked memrefs.
5629         if (!useEmulatedMaskMemRefHack(&I) &&
5630             computePredInstDiscount(&I, ScalarCosts, VF) >= 0)
5631           ScalarCostsVF.insert(ScalarCosts.begin(), ScalarCosts.end());
5632         // Remember that BB will remain after vectorization.
5633         PredicatedBBsAfterVectorization.insert(BB);
5634       }
5635   }
5636 }
5637 
5638 int LoopVectorizationCostModel::computePredInstDiscount(
5639     Instruction *PredInst, DenseMap<Instruction *, unsigned> &ScalarCosts,
5640     unsigned VF) {
5641   assert(!isUniformAfterVectorization(PredInst, VF) &&
5642          "Instruction marked uniform-after-vectorization will be predicated");
5643 
5644   // Initialize the discount to zero, meaning that the scalar version and the
5645   // vector version cost the same.
5646   int Discount = 0;
5647 
5648   // Holds instructions to analyze. The instructions we visit are mapped in
5649   // ScalarCosts. Those instructions are the ones that would be scalarized if
5650   // we find that the scalar version costs less.
5651   SmallVector<Instruction *, 8> Worklist;
5652 
5653   // Returns true if the given instruction can be scalarized.
5654   auto canBeScalarized = [&](Instruction *I) -> bool {
5655     // We only attempt to scalarize instructions forming a single-use chain
5656     // from the original predicated block that would otherwise be vectorized.
5657     // Although not strictly necessary, we give up on instructions we know will
5658     // already be scalar to avoid traversing chains that are unlikely to be
5659     // beneficial.
5660     if (!I->hasOneUse() || PredInst->getParent() != I->getParent() ||
5661         isScalarAfterVectorization(I, VF))
5662       return false;
5663 
5664     // If the instruction is scalar with predication, it will be analyzed
5665     // separately. We ignore it within the context of PredInst.
5666     if (isScalarWithPredication(I))
5667       return false;
5668 
5669     // If any of the instruction's operands are uniform after vectorization,
5670     // the instruction cannot be scalarized. This prevents, for example, a
5671     // masked load from being scalarized.
5672     //
5673     // We assume we will only emit a value for lane zero of an instruction
5674     // marked uniform after vectorization, rather than VF identical values.
5675     // Thus, if we scalarize an instruction that uses a uniform, we would
5676     // create uses of values corresponding to the lanes we aren't emitting code
5677     // for. This behavior can be changed by allowing getScalarValue to clone
5678     // the lane zero values for uniforms rather than asserting.
5679     for (Use &U : I->operands())
5680       if (auto *J = dyn_cast<Instruction>(U.get()))
5681         if (isUniformAfterVectorization(J, VF))
5682           return false;
5683 
5684     // Otherwise, we can scalarize the instruction.
5685     return true;
5686   };
5687 
5688   // Compute the expected cost discount from scalarizing the entire expression
5689   // feeding the predicated instruction. We currently only consider expressions
5690   // that are single-use instruction chains.
5691   Worklist.push_back(PredInst);
5692   while (!Worklist.empty()) {
5693     Instruction *I = Worklist.pop_back_val();
5694 
5695     // If we've already analyzed the instruction, there's nothing to do.
5696     if (ScalarCosts.find(I) != ScalarCosts.end())
5697       continue;
5698 
5699     // Compute the cost of the vector instruction. Note that this cost already
5700     // includes the scalarization overhead of the predicated instruction.
5701     unsigned VectorCost = getInstructionCost(I, VF).first;
5702 
5703     // Compute the cost of the scalarized instruction. This cost is the cost of
5704     // the instruction as if it wasn't if-converted and instead remained in the
5705     // predicated block. We will scale this cost by block probability after
5706     // computing the scalarization overhead.
5707     unsigned ScalarCost = VF * getInstructionCost(I, 1).first;
5708 
5709     // Compute the scalarization overhead of needed insertelement instructions
5710     // and phi nodes.
5711     if (isScalarWithPredication(I) && !I->getType()->isVoidTy()) {
5712       ScalarCost += TTI.getScalarizationOverhead(
5713           cast<VectorType>(ToVectorTy(I->getType(), VF)),
5714           APInt::getAllOnesValue(VF), true, false);
5715       ScalarCost += VF * TTI.getCFInstrCost(Instruction::PHI);
5716     }
5717 
5718     // Compute the scalarization overhead of needed extractelement
5719     // instructions. For each of the instruction's operands, if the operand can
5720     // be scalarized, add it to the worklist; otherwise, account for the
5721     // overhead.
5722     for (Use &U : I->operands())
5723       if (auto *J = dyn_cast<Instruction>(U.get())) {
5724         assert(VectorType::isValidElementType(J->getType()) &&
5725                "Instruction has non-scalar type");
5726         if (canBeScalarized(J))
5727           Worklist.push_back(J);
5728         else if (needsExtract(J, VF))
5729           ScalarCost += TTI.getScalarizationOverhead(
5730               cast<VectorType>(ToVectorTy(J->getType(), VF)),
5731               APInt::getAllOnesValue(VF), false, true);
5732       }
5733 
5734     // Scale the total scalar cost by block probability.
5735     ScalarCost /= getReciprocalPredBlockProb();
5736 
5737     // Compute the discount. A non-negative discount means the vector version
5738     // of the instruction costs more, and scalarizing would be beneficial.
5739     Discount += VectorCost - ScalarCost;
5740     ScalarCosts[I] = ScalarCost;
5741   }
5742 
5743   return Discount;
5744 }
5745 
5746 LoopVectorizationCostModel::VectorizationCostTy
5747 LoopVectorizationCostModel::expectedCost(unsigned VF) {
5748   VectorizationCostTy Cost;
5749 
5750   // For each block.
5751   for (BasicBlock *BB : TheLoop->blocks()) {
5752     VectorizationCostTy BlockCost;
5753 
5754     // For each instruction in the old loop.
5755     for (Instruction &I : BB->instructionsWithoutDebug()) {
5756       // Skip ignored values.
5757       if (ValuesToIgnore.find(&I) != ValuesToIgnore.end() ||
5758           (VF > 1 && VecValuesToIgnore.find(&I) != VecValuesToIgnore.end()))
5759         continue;
5760 
5761       VectorizationCostTy C = getInstructionCost(&I, VF);
5762 
5763       // Check if we should override the cost.
5764       if (ForceTargetInstructionCost.getNumOccurrences() > 0)
5765         C.first = ForceTargetInstructionCost;
5766 
5767       BlockCost.first += C.first;
5768       BlockCost.second |= C.second;
5769       LLVM_DEBUG(dbgs() << "LV: Found an estimated cost of " << C.first
5770                         << " for VF " << VF << " For instruction: " << I
5771                         << '\n');
5772     }
5773 
5774     // If we are vectorizing a predicated block, it will have been
5775     // if-converted. This means that the block's instructions (aside from
5776     // stores and instructions that may divide by zero) will now be
5777     // unconditionally executed. For the scalar case, we may not always execute
5778     // the predicated block. Thus, scale the block's cost by the probability of
5779     // executing it.
5780     if (VF == 1 && blockNeedsPredication(BB))
5781       BlockCost.first /= getReciprocalPredBlockProb();
5782 
5783     Cost.first += BlockCost.first;
5784     Cost.second |= BlockCost.second;
5785   }
5786 
5787   return Cost;
5788 }
5789 
5790 /// Gets Address Access SCEV after verifying that the access pattern
5791 /// is loop invariant except the induction variable dependence.
5792 ///
5793 /// This SCEV can be sent to the Target in order to estimate the address
5794 /// calculation cost.
5795 static const SCEV *getAddressAccessSCEV(
5796               Value *Ptr,
5797               LoopVectorizationLegality *Legal,
5798               PredicatedScalarEvolution &PSE,
5799               const Loop *TheLoop) {
5800 
5801   auto *Gep = dyn_cast<GetElementPtrInst>(Ptr);
5802   if (!Gep)
5803     return nullptr;
5804 
5805   // We are looking for a gep with all loop invariant indices except for one
5806   // which should be an induction variable.
5807   auto SE = PSE.getSE();
5808   unsigned NumOperands = Gep->getNumOperands();
5809   for (unsigned i = 1; i < NumOperands; ++i) {
5810     Value *Opd = Gep->getOperand(i);
5811     if (!SE->isLoopInvariant(SE->getSCEV(Opd), TheLoop) &&
5812         !Legal->isInductionVariable(Opd))
5813       return nullptr;
5814   }
5815 
5816   // Now we know we have a GEP ptr, %inv, %ind, %inv. return the Ptr SCEV.
5817   return PSE.getSCEV(Ptr);
5818 }
5819 
5820 static bool isStrideMul(Instruction *I, LoopVectorizationLegality *Legal) {
5821   return Legal->hasStride(I->getOperand(0)) ||
5822          Legal->hasStride(I->getOperand(1));
5823 }
5824 
5825 unsigned LoopVectorizationCostModel::getMemInstScalarizationCost(Instruction *I,
5826                                                                  unsigned VF) {
5827   assert(VF > 1 && "Scalarization cost of instruction implies vectorization.");
5828   Type *ValTy = getMemInstValueType(I);
5829   auto SE = PSE.getSE();
5830 
5831   unsigned AS = getLoadStoreAddressSpace(I);
5832   Value *Ptr = getLoadStorePointerOperand(I);
5833   Type *PtrTy = ToVectorTy(Ptr->getType(), VF);
5834 
5835   // Figure out whether the access is strided and get the stride value
5836   // if it's known in compile time
5837   const SCEV *PtrSCEV = getAddressAccessSCEV(Ptr, Legal, PSE, TheLoop);
5838 
5839   // Get the cost of the scalar memory instruction and address computation.
5840   unsigned Cost = VF * TTI.getAddressComputationCost(PtrTy, SE, PtrSCEV);
5841 
5842   // Don't pass *I here, since it is scalar but will actually be part of a
5843   // vectorized loop where the user of it is a vectorized instruction.
5844   const Align Alignment = getLoadStoreAlignment(I);
5845   Cost += VF * TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(),
5846                                    Alignment, AS,
5847                                    TTI::TCK_RecipThroughput);
5848 
5849   // Get the overhead of the extractelement and insertelement instructions
5850   // we might create due to scalarization.
5851   Cost += getScalarizationOverhead(I, VF);
5852 
5853   // If we have a predicated store, it may not be executed for each vector
5854   // lane. Scale the cost by the probability of executing the predicated
5855   // block.
5856   if (isPredicatedInst(I)) {
5857     Cost /= getReciprocalPredBlockProb();
5858 
5859     if (useEmulatedMaskMemRefHack(I))
5860       // Artificially setting to a high enough value to practically disable
5861       // vectorization with such operations.
5862       Cost = 3000000;
5863   }
5864 
5865   return Cost;
5866 }
5867 
5868 unsigned LoopVectorizationCostModel::getConsecutiveMemOpCost(Instruction *I,
5869                                                              unsigned VF) {
5870   Type *ValTy = getMemInstValueType(I);
5871   auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
5872   Value *Ptr = getLoadStorePointerOperand(I);
5873   unsigned AS = getLoadStoreAddressSpace(I);
5874   int ConsecutiveStride = Legal->isConsecutivePtr(Ptr);
5875   enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
5876 
5877   assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) &&
5878          "Stride should be 1 or -1 for consecutive memory access");
5879   const Align Alignment = getLoadStoreAlignment(I);
5880   unsigned Cost = 0;
5881   if (Legal->isMaskRequired(I))
5882     Cost += TTI.getMaskedMemoryOpCost(I->getOpcode(), VectorTy,
5883                                       Alignment.value(), AS, CostKind);
5884   else
5885     Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS,
5886                                 CostKind, I);
5887 
5888   bool Reverse = ConsecutiveStride < 0;
5889   if (Reverse)
5890     Cost += TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, 0);
5891   return Cost;
5892 }
5893 
5894 unsigned LoopVectorizationCostModel::getUniformMemOpCost(Instruction *I,
5895                                                          unsigned VF) {
5896   Type *ValTy = getMemInstValueType(I);
5897   auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
5898   const Align Alignment = getLoadStoreAlignment(I);
5899   unsigned AS = getLoadStoreAddressSpace(I);
5900   enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
5901   if (isa<LoadInst>(I)) {
5902     return TTI.getAddressComputationCost(ValTy) +
5903            TTI.getMemoryOpCost(Instruction::Load, ValTy, Alignment, AS,
5904                                CostKind) +
5905            TTI.getShuffleCost(TargetTransformInfo::SK_Broadcast, VectorTy);
5906   }
5907   StoreInst *SI = cast<StoreInst>(I);
5908 
5909   bool isLoopInvariantStoreValue = Legal->isUniform(SI->getValueOperand());
5910   return TTI.getAddressComputationCost(ValTy) +
5911          TTI.getMemoryOpCost(Instruction::Store, ValTy, Alignment, AS,
5912                              CostKind) +
5913          (isLoopInvariantStoreValue
5914               ? 0
5915               : TTI.getVectorInstrCost(Instruction::ExtractElement, VectorTy,
5916                                        VF - 1));
5917 }
5918 
5919 unsigned LoopVectorizationCostModel::getGatherScatterCost(Instruction *I,
5920                                                           unsigned VF) {
5921   Type *ValTy = getMemInstValueType(I);
5922   auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
5923   const Align Alignment = getLoadStoreAlignment(I);
5924   Value *Ptr = getLoadStorePointerOperand(I);
5925 
5926   return TTI.getAddressComputationCost(VectorTy) +
5927          TTI.getGatherScatterOpCost(I->getOpcode(), VectorTy, Ptr,
5928                                     Legal->isMaskRequired(I), Alignment.value(),
5929                                     TargetTransformInfo::TCK_RecipThroughput,
5930                                     I);
5931 }
5932 
5933 unsigned LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I,
5934                                                             unsigned VF) {
5935   Type *ValTy = getMemInstValueType(I);
5936   auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
5937   unsigned AS = getLoadStoreAddressSpace(I);
5938 
5939   auto Group = getInterleavedAccessGroup(I);
5940   assert(Group && "Fail to get an interleaved access group.");
5941 
5942   unsigned InterleaveFactor = Group->getFactor();
5943   VectorType *WideVecTy = VectorType::get(ValTy, VF * InterleaveFactor);
5944 
5945   // Holds the indices of existing members in an interleaved load group.
5946   // An interleaved store group doesn't need this as it doesn't allow gaps.
5947   SmallVector<unsigned, 4> Indices;
5948   if (isa<LoadInst>(I)) {
5949     for (unsigned i = 0; i < InterleaveFactor; i++)
5950       if (Group->getMember(i))
5951         Indices.push_back(i);
5952   }
5953 
5954   // Calculate the cost of the whole interleaved group.
5955   bool UseMaskForGaps =
5956       Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed();
5957   unsigned Cost = TTI.getInterleavedMemoryOpCost(
5958       I->getOpcode(), WideVecTy, Group->getFactor(), Indices,
5959       Group->getAlign().value(), AS, TTI::TCK_RecipThroughput,
5960       Legal->isMaskRequired(I), UseMaskForGaps);
5961 
5962   if (Group->isReverse()) {
5963     // TODO: Add support for reversed masked interleaved access.
5964     assert(!Legal->isMaskRequired(I) &&
5965            "Reverse masked interleaved access not supported.");
5966     Cost += Group->getNumMembers() *
5967             TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, 0);
5968   }
5969   return Cost;
5970 }
5971 
5972 unsigned LoopVectorizationCostModel::getMemoryInstructionCost(Instruction *I,
5973                                                               unsigned VF) {
5974   // Calculate scalar cost only. Vectorization cost should be ready at this
5975   // moment.
5976   if (VF == 1) {
5977     Type *ValTy = getMemInstValueType(I);
5978     const Align Alignment = getLoadStoreAlignment(I);
5979     unsigned AS = getLoadStoreAddressSpace(I);
5980 
5981     return TTI.getAddressComputationCost(ValTy) +
5982            TTI.getMemoryOpCost(I->getOpcode(), ValTy, Alignment, AS,
5983                                TTI::TCK_RecipThroughput, I);
5984   }
5985   return getWideningCost(I, VF);
5986 }
5987 
5988 LoopVectorizationCostModel::VectorizationCostTy
5989 LoopVectorizationCostModel::getInstructionCost(Instruction *I, unsigned VF) {
5990   // If we know that this instruction will remain uniform, check the cost of
5991   // the scalar version.
5992   if (isUniformAfterVectorization(I, VF))
5993     VF = 1;
5994 
5995   if (VF > 1 && isProfitableToScalarize(I, VF))
5996     return VectorizationCostTy(InstsToScalarize[VF][I], false);
5997 
5998   // Forced scalars do not have any scalarization overhead.
5999   auto ForcedScalar = ForcedScalars.find(VF);
6000   if (VF > 1 && ForcedScalar != ForcedScalars.end()) {
6001     auto InstSet = ForcedScalar->second;
6002     if (InstSet.find(I) != InstSet.end())
6003       return VectorizationCostTy((getInstructionCost(I, 1).first * VF), false);
6004   }
6005 
6006   Type *VectorTy;
6007   unsigned C = getInstructionCost(I, VF, VectorTy);
6008 
6009   bool TypeNotScalarized =
6010       VF > 1 && VectorTy->isVectorTy() && TTI.getNumberOfParts(VectorTy) < VF;
6011   return VectorizationCostTy(C, TypeNotScalarized);
6012 }
6013 
6014 unsigned LoopVectorizationCostModel::getScalarizationOverhead(Instruction *I,
6015                                                               unsigned VF) {
6016 
6017   if (VF == 1)
6018     return 0;
6019 
6020   unsigned Cost = 0;
6021   Type *RetTy = ToVectorTy(I->getType(), VF);
6022   if (!RetTy->isVoidTy() &&
6023       (!isa<LoadInst>(I) || !TTI.supportsEfficientVectorElementLoadStore()))
6024     Cost += TTI.getScalarizationOverhead(
6025         cast<VectorType>(RetTy), APInt::getAllOnesValue(VF), true, false);
6026 
6027   // Some targets keep addresses scalar.
6028   if (isa<LoadInst>(I) && !TTI.prefersVectorizedAddressing())
6029     return Cost;
6030 
6031   // Some targets support efficient element stores.
6032   if (isa<StoreInst>(I) && TTI.supportsEfficientVectorElementLoadStore())
6033     return Cost;
6034 
6035   // Collect operands to consider.
6036   CallInst *CI = dyn_cast<CallInst>(I);
6037   Instruction::op_range Ops = CI ? CI->arg_operands() : I->operands();
6038 
6039   // Skip operands that do not require extraction/scalarization and do not incur
6040   // any overhead.
6041   return Cost + TTI.getOperandsScalarizationOverhead(
6042                     filterExtractingOperands(Ops, VF), VF);
6043 }
6044 
6045 void LoopVectorizationCostModel::setCostBasedWideningDecision(unsigned VF) {
6046   if (VF == 1)
6047     return;
6048   NumPredStores = 0;
6049   for (BasicBlock *BB : TheLoop->blocks()) {
6050     // For each instruction in the old loop.
6051     for (Instruction &I : *BB) {
6052       Value *Ptr =  getLoadStorePointerOperand(&I);
6053       if (!Ptr)
6054         continue;
6055 
6056       // TODO: We should generate better code and update the cost model for
6057       // predicated uniform stores. Today they are treated as any other
6058       // predicated store (see added test cases in
6059       // invariant-store-vectorization.ll).
6060       if (isa<StoreInst>(&I) && isScalarWithPredication(&I))
6061         NumPredStores++;
6062 
6063       if (Legal->isUniform(Ptr) &&
6064           // Conditional loads and stores should be scalarized and predicated.
6065           // isScalarWithPredication cannot be used here since masked
6066           // gather/scatters are not considered scalar with predication.
6067           !Legal->blockNeedsPredication(I.getParent())) {
6068         // TODO: Avoid replicating loads and stores instead of
6069         // relying on instcombine to remove them.
6070         // Load: Scalar load + broadcast
6071         // Store: Scalar store + isLoopInvariantStoreValue ? 0 : extract
6072         unsigned Cost = getUniformMemOpCost(&I, VF);
6073         setWideningDecision(&I, VF, CM_Scalarize, Cost);
6074         continue;
6075       }
6076 
6077       // We assume that widening is the best solution when possible.
6078       if (memoryInstructionCanBeWidened(&I, VF)) {
6079         unsigned Cost = getConsecutiveMemOpCost(&I, VF);
6080         int ConsecutiveStride =
6081                Legal->isConsecutivePtr(getLoadStorePointerOperand(&I));
6082         assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) &&
6083                "Expected consecutive stride.");
6084         InstWidening Decision =
6085             ConsecutiveStride == 1 ? CM_Widen : CM_Widen_Reverse;
6086         setWideningDecision(&I, VF, Decision, Cost);
6087         continue;
6088       }
6089 
6090       // Choose between Interleaving, Gather/Scatter or Scalarization.
6091       unsigned InterleaveCost = std::numeric_limits<unsigned>::max();
6092       unsigned NumAccesses = 1;
6093       if (isAccessInterleaved(&I)) {
6094         auto Group = getInterleavedAccessGroup(&I);
6095         assert(Group && "Fail to get an interleaved access group.");
6096 
6097         // Make one decision for the whole group.
6098         if (getWideningDecision(&I, VF) != CM_Unknown)
6099           continue;
6100 
6101         NumAccesses = Group->getNumMembers();
6102         if (interleavedAccessCanBeWidened(&I, VF))
6103           InterleaveCost = getInterleaveGroupCost(&I, VF);
6104       }
6105 
6106       unsigned GatherScatterCost =
6107           isLegalGatherOrScatter(&I)
6108               ? getGatherScatterCost(&I, VF) * NumAccesses
6109               : std::numeric_limits<unsigned>::max();
6110 
6111       unsigned ScalarizationCost =
6112           getMemInstScalarizationCost(&I, VF) * NumAccesses;
6113 
6114       // Choose better solution for the current VF,
6115       // write down this decision and use it during vectorization.
6116       unsigned Cost;
6117       InstWidening Decision;
6118       if (InterleaveCost <= GatherScatterCost &&
6119           InterleaveCost < ScalarizationCost) {
6120         Decision = CM_Interleave;
6121         Cost = InterleaveCost;
6122       } else if (GatherScatterCost < ScalarizationCost) {
6123         Decision = CM_GatherScatter;
6124         Cost = GatherScatterCost;
6125       } else {
6126         Decision = CM_Scalarize;
6127         Cost = ScalarizationCost;
6128       }
6129       // If the instructions belongs to an interleave group, the whole group
6130       // receives the same decision. The whole group receives the cost, but
6131       // the cost will actually be assigned to one instruction.
6132       if (auto Group = getInterleavedAccessGroup(&I))
6133         setWideningDecision(Group, VF, Decision, Cost);
6134       else
6135         setWideningDecision(&I, VF, Decision, Cost);
6136     }
6137   }
6138 
6139   // Make sure that any load of address and any other address computation
6140   // remains scalar unless there is gather/scatter support. This avoids
6141   // inevitable extracts into address registers, and also has the benefit of
6142   // activating LSR more, since that pass can't optimize vectorized
6143   // addresses.
6144   if (TTI.prefersVectorizedAddressing())
6145     return;
6146 
6147   // Start with all scalar pointer uses.
6148   SmallPtrSet<Instruction *, 8> AddrDefs;
6149   for (BasicBlock *BB : TheLoop->blocks())
6150     for (Instruction &I : *BB) {
6151       Instruction *PtrDef =
6152         dyn_cast_or_null<Instruction>(getLoadStorePointerOperand(&I));
6153       if (PtrDef && TheLoop->contains(PtrDef) &&
6154           getWideningDecision(&I, VF) != CM_GatherScatter)
6155         AddrDefs.insert(PtrDef);
6156     }
6157 
6158   // Add all instructions used to generate the addresses.
6159   SmallVector<Instruction *, 4> Worklist;
6160   for (auto *I : AddrDefs)
6161     Worklist.push_back(I);
6162   while (!Worklist.empty()) {
6163     Instruction *I = Worklist.pop_back_val();
6164     for (auto &Op : I->operands())
6165       if (auto *InstOp = dyn_cast<Instruction>(Op))
6166         if ((InstOp->getParent() == I->getParent()) && !isa<PHINode>(InstOp) &&
6167             AddrDefs.insert(InstOp).second)
6168           Worklist.push_back(InstOp);
6169   }
6170 
6171   for (auto *I : AddrDefs) {
6172     if (isa<LoadInst>(I)) {
6173       // Setting the desired widening decision should ideally be handled in
6174       // by cost functions, but since this involves the task of finding out
6175       // if the loaded register is involved in an address computation, it is
6176       // instead changed here when we know this is the case.
6177       InstWidening Decision = getWideningDecision(I, VF);
6178       if (Decision == CM_Widen || Decision == CM_Widen_Reverse)
6179         // Scalarize a widened load of address.
6180         setWideningDecision(I, VF, CM_Scalarize,
6181                             (VF * getMemoryInstructionCost(I, 1)));
6182       else if (auto Group = getInterleavedAccessGroup(I)) {
6183         // Scalarize an interleave group of address loads.
6184         for (unsigned I = 0; I < Group->getFactor(); ++I) {
6185           if (Instruction *Member = Group->getMember(I))
6186             setWideningDecision(Member, VF, CM_Scalarize,
6187                                 (VF * getMemoryInstructionCost(Member, 1)));
6188         }
6189       }
6190     } else
6191       // Make sure I gets scalarized and a cost estimate without
6192       // scalarization overhead.
6193       ForcedScalars[VF].insert(I);
6194   }
6195 }
6196 
6197 unsigned LoopVectorizationCostModel::getInstructionCost(Instruction *I,
6198                                                         unsigned VF,
6199                                                         Type *&VectorTy) {
6200   Type *RetTy = I->getType();
6201   if (canTruncateToMinimalBitwidth(I, VF))
6202     RetTy = IntegerType::get(RetTy->getContext(), MinBWs[I]);
6203   VectorTy = isScalarAfterVectorization(I, VF) ? RetTy : ToVectorTy(RetTy, VF);
6204   auto SE = PSE.getSE();
6205   TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
6206 
6207   // TODO: We need to estimate the cost of intrinsic calls.
6208   switch (I->getOpcode()) {
6209   case Instruction::GetElementPtr:
6210     // We mark this instruction as zero-cost because the cost of GEPs in
6211     // vectorized code depends on whether the corresponding memory instruction
6212     // is scalarized or not. Therefore, we handle GEPs with the memory
6213     // instruction cost.
6214     return 0;
6215   case Instruction::Br: {
6216     // In cases of scalarized and predicated instructions, there will be VF
6217     // predicated blocks in the vectorized loop. Each branch around these
6218     // blocks requires also an extract of its vector compare i1 element.
6219     bool ScalarPredicatedBB = false;
6220     BranchInst *BI = cast<BranchInst>(I);
6221     if (VF > 1 && BI->isConditional() &&
6222         (PredicatedBBsAfterVectorization.find(BI->getSuccessor(0)) !=
6223              PredicatedBBsAfterVectorization.end() ||
6224          PredicatedBBsAfterVectorization.find(BI->getSuccessor(1)) !=
6225              PredicatedBBsAfterVectorization.end()))
6226       ScalarPredicatedBB = true;
6227 
6228     if (ScalarPredicatedBB) {
6229       // Return cost for branches around scalarized and predicated blocks.
6230       VectorType *Vec_i1Ty =
6231           VectorType::get(IntegerType::getInt1Ty(RetTy->getContext()), VF);
6232       return (TTI.getScalarizationOverhead(Vec_i1Ty, APInt::getAllOnesValue(VF),
6233                                            false, true) +
6234               (TTI.getCFInstrCost(Instruction::Br) * VF));
6235     } else if (I->getParent() == TheLoop->getLoopLatch() || VF == 1)
6236       // The back-edge branch will remain, as will all scalar branches.
6237       return TTI.getCFInstrCost(Instruction::Br);
6238     else
6239       // This branch will be eliminated by if-conversion.
6240       return 0;
6241     // Note: We currently assume zero cost for an unconditional branch inside
6242     // a predicated block since it will become a fall-through, although we
6243     // may decide in the future to call TTI for all branches.
6244   }
6245   case Instruction::PHI: {
6246     auto *Phi = cast<PHINode>(I);
6247 
6248     // First-order recurrences are replaced by vector shuffles inside the loop.
6249     // NOTE: Don't use ToVectorTy as SK_ExtractSubvector expects a vector type.
6250     if (VF > 1 && Legal->isFirstOrderRecurrence(Phi))
6251       return TTI.getShuffleCost(TargetTransformInfo::SK_ExtractSubvector,
6252                                 cast<VectorType>(VectorTy), VF - 1,
6253                                 VectorType::get(RetTy, 1));
6254 
6255     // Phi nodes in non-header blocks (not inductions, reductions, etc.) are
6256     // converted into select instructions. We require N - 1 selects per phi
6257     // node, where N is the number of incoming values.
6258     if (VF > 1 && Phi->getParent() != TheLoop->getHeader())
6259       return (Phi->getNumIncomingValues() - 1) *
6260              TTI.getCmpSelInstrCost(
6261                  Instruction::Select, ToVectorTy(Phi->getType(), VF),
6262                  ToVectorTy(Type::getInt1Ty(Phi->getContext()), VF),
6263                  CostKind);
6264 
6265     return TTI.getCFInstrCost(Instruction::PHI);
6266   }
6267   case Instruction::UDiv:
6268   case Instruction::SDiv:
6269   case Instruction::URem:
6270   case Instruction::SRem:
6271     // If we have a predicated instruction, it may not be executed for each
6272     // vector lane. Get the scalarization cost and scale this amount by the
6273     // probability of executing the predicated block. If the instruction is not
6274     // predicated, we fall through to the next case.
6275     if (VF > 1 && isScalarWithPredication(I)) {
6276       unsigned Cost = 0;
6277 
6278       // These instructions have a non-void type, so account for the phi nodes
6279       // that we will create. This cost is likely to be zero. The phi node
6280       // cost, if any, should be scaled by the block probability because it
6281       // models a copy at the end of each predicated block.
6282       Cost += VF * TTI.getCFInstrCost(Instruction::PHI);
6283 
6284       // The cost of the non-predicated instruction.
6285       Cost += VF * TTI.getArithmeticInstrCost(I->getOpcode(), RetTy, CostKind);
6286 
6287       // The cost of insertelement and extractelement instructions needed for
6288       // scalarization.
6289       Cost += getScalarizationOverhead(I, VF);
6290 
6291       // Scale the cost by the probability of executing the predicated blocks.
6292       // This assumes the predicated block for each vector lane is equally
6293       // likely.
6294       return Cost / getReciprocalPredBlockProb();
6295     }
6296     LLVM_FALLTHROUGH;
6297   case Instruction::Add:
6298   case Instruction::FAdd:
6299   case Instruction::Sub:
6300   case Instruction::FSub:
6301   case Instruction::Mul:
6302   case Instruction::FMul:
6303   case Instruction::FDiv:
6304   case Instruction::FRem:
6305   case Instruction::Shl:
6306   case Instruction::LShr:
6307   case Instruction::AShr:
6308   case Instruction::And:
6309   case Instruction::Or:
6310   case Instruction::Xor: {
6311     // Since we will replace the stride by 1 the multiplication should go away.
6312     if (I->getOpcode() == Instruction::Mul && isStrideMul(I, Legal))
6313       return 0;
6314     // Certain instructions can be cheaper to vectorize if they have a constant
6315     // second vector operand. One example of this are shifts on x86.
6316     Value *Op2 = I->getOperand(1);
6317     TargetTransformInfo::OperandValueProperties Op2VP;
6318     TargetTransformInfo::OperandValueKind Op2VK =
6319         TTI.getOperandInfo(Op2, Op2VP);
6320     if (Op2VK == TargetTransformInfo::OK_AnyValue && Legal->isUniform(Op2))
6321       Op2VK = TargetTransformInfo::OK_UniformValue;
6322 
6323     SmallVector<const Value *, 4> Operands(I->operand_values());
6324     unsigned N = isScalarAfterVectorization(I, VF) ? VF : 1;
6325     return N * TTI.getArithmeticInstrCost(
6326                    I->getOpcode(), VectorTy, CostKind,
6327                    TargetTransformInfo::OK_AnyValue,
6328                    Op2VK, TargetTransformInfo::OP_None, Op2VP, Operands, I);
6329   }
6330   case Instruction::FNeg: {
6331     unsigned N = isScalarAfterVectorization(I, VF) ? VF : 1;
6332     return N * TTI.getArithmeticInstrCost(
6333                    I->getOpcode(), VectorTy, CostKind,
6334                    TargetTransformInfo::OK_AnyValue,
6335                    TargetTransformInfo::OK_AnyValue,
6336                    TargetTransformInfo::OP_None, TargetTransformInfo::OP_None,
6337                    I->getOperand(0), I);
6338   }
6339   case Instruction::Select: {
6340     SelectInst *SI = cast<SelectInst>(I);
6341     const SCEV *CondSCEV = SE->getSCEV(SI->getCondition());
6342     bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop));
6343     Type *CondTy = SI->getCondition()->getType();
6344     if (!ScalarCond)
6345       CondTy = VectorType::get(CondTy, VF);
6346 
6347     return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, CondTy,
6348                                   CostKind, I);
6349   }
6350   case Instruction::ICmp:
6351   case Instruction::FCmp: {
6352     Type *ValTy = I->getOperand(0)->getType();
6353     Instruction *Op0AsInstruction = dyn_cast<Instruction>(I->getOperand(0));
6354     if (canTruncateToMinimalBitwidth(Op0AsInstruction, VF))
6355       ValTy = IntegerType::get(ValTy->getContext(), MinBWs[Op0AsInstruction]);
6356     VectorTy = ToVectorTy(ValTy, VF);
6357     return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, nullptr, CostKind,
6358                                   I);
6359   }
6360   case Instruction::Store:
6361   case Instruction::Load: {
6362     unsigned Width = VF;
6363     if (Width > 1) {
6364       InstWidening Decision = getWideningDecision(I, Width);
6365       assert(Decision != CM_Unknown &&
6366              "CM decision should be taken at this point");
6367       if (Decision == CM_Scalarize)
6368         Width = 1;
6369     }
6370     VectorTy = ToVectorTy(getMemInstValueType(I), Width);
6371     return getMemoryInstructionCost(I, VF);
6372   }
6373   case Instruction::ZExt:
6374   case Instruction::SExt:
6375   case Instruction::FPToUI:
6376   case Instruction::FPToSI:
6377   case Instruction::FPExt:
6378   case Instruction::PtrToInt:
6379   case Instruction::IntToPtr:
6380   case Instruction::SIToFP:
6381   case Instruction::UIToFP:
6382   case Instruction::Trunc:
6383   case Instruction::FPTrunc:
6384   case Instruction::BitCast: {
6385     // We optimize the truncation of induction variables having constant
6386     // integer steps. The cost of these truncations is the same as the scalar
6387     // operation.
6388     if (isOptimizableIVTruncate(I, VF)) {
6389       auto *Trunc = cast<TruncInst>(I);
6390       return TTI.getCastInstrCost(Instruction::Trunc, Trunc->getDestTy(),
6391                                   Trunc->getSrcTy(), CostKind, Trunc);
6392     }
6393 
6394     Type *SrcScalarTy = I->getOperand(0)->getType();
6395     Type *SrcVecTy =
6396         VectorTy->isVectorTy() ? ToVectorTy(SrcScalarTy, VF) : SrcScalarTy;
6397     if (canTruncateToMinimalBitwidth(I, VF)) {
6398       // This cast is going to be shrunk. This may remove the cast or it might
6399       // turn it into slightly different cast. For example, if MinBW == 16,
6400       // "zext i8 %1 to i32" becomes "zext i8 %1 to i16".
6401       //
6402       // Calculate the modified src and dest types.
6403       Type *MinVecTy = VectorTy;
6404       if (I->getOpcode() == Instruction::Trunc) {
6405         SrcVecTy = smallestIntegerVectorType(SrcVecTy, MinVecTy);
6406         VectorTy =
6407             largestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy);
6408       } else if (I->getOpcode() == Instruction::ZExt ||
6409                  I->getOpcode() == Instruction::SExt) {
6410         SrcVecTy = largestIntegerVectorType(SrcVecTy, MinVecTy);
6411         VectorTy =
6412             smallestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy);
6413       }
6414     }
6415 
6416     unsigned N = isScalarAfterVectorization(I, VF) ? VF : 1;
6417     return N * TTI.getCastInstrCost(I->getOpcode(), VectorTy, SrcVecTy,
6418                                     CostKind, I);
6419   }
6420   case Instruction::Call: {
6421     bool NeedToScalarize;
6422     CallInst *CI = cast<CallInst>(I);
6423     unsigned CallCost = getVectorCallCost(CI, VF, NeedToScalarize);
6424     if (getVectorIntrinsicIDForCall(CI, TLI))
6425       return std::min(CallCost, getVectorIntrinsicCost(CI, VF));
6426     return CallCost;
6427   }
6428   default:
6429     // The cost of executing VF copies of the scalar instruction. This opcode
6430     // is unknown. Assume that it is the same as 'mul'.
6431     return VF * TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy,
6432                                            CostKind) +
6433            getScalarizationOverhead(I, VF);
6434   } // end of switch.
6435 }
6436 
6437 char LoopVectorize::ID = 0;
6438 
6439 static const char lv_name[] = "Loop Vectorization";
6440 
6441 INITIALIZE_PASS_BEGIN(LoopVectorize, LV_NAME, lv_name, false, false)
6442 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
6443 INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass)
6444 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
6445 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
6446 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
6447 INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass)
6448 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
6449 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
6450 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
6451 INITIALIZE_PASS_DEPENDENCY(LoopAccessLegacyAnalysis)
6452 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass)
6453 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass)
6454 INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass)
6455 INITIALIZE_PASS_DEPENDENCY(InjectTLIMappingsLegacy)
6456 INITIALIZE_PASS_END(LoopVectorize, LV_NAME, lv_name, false, false)
6457 
6458 namespace llvm {
6459 
6460 Pass *createLoopVectorizePass() { return new LoopVectorize(); }
6461 
6462 Pass *createLoopVectorizePass(bool InterleaveOnlyWhenForced,
6463                               bool VectorizeOnlyWhenForced) {
6464   return new LoopVectorize(InterleaveOnlyWhenForced, VectorizeOnlyWhenForced);
6465 }
6466 
6467 } // end namespace llvm
6468 
6469 bool LoopVectorizationCostModel::isConsecutiveLoadOrStore(Instruction *Inst) {
6470   // Check if the pointer operand of a load or store instruction is
6471   // consecutive.
6472   if (auto *Ptr = getLoadStorePointerOperand(Inst))
6473     return Legal->isConsecutivePtr(Ptr);
6474   return false;
6475 }
6476 
6477 void LoopVectorizationCostModel::collectValuesToIgnore() {
6478   // Ignore ephemeral values.
6479   CodeMetrics::collectEphemeralValues(TheLoop, AC, ValuesToIgnore);
6480 
6481   // Ignore type-promoting instructions we identified during reduction
6482   // detection.
6483   for (auto &Reduction : Legal->getReductionVars()) {
6484     RecurrenceDescriptor &RedDes = Reduction.second;
6485     SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts();
6486     VecValuesToIgnore.insert(Casts.begin(), Casts.end());
6487   }
6488   // Ignore type-casting instructions we identified during induction
6489   // detection.
6490   for (auto &Induction : Legal->getInductionVars()) {
6491     InductionDescriptor &IndDes = Induction.second;
6492     const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts();
6493     VecValuesToIgnore.insert(Casts.begin(), Casts.end());
6494   }
6495 }
6496 
6497 // TODO: we could return a pair of values that specify the max VF and
6498 // min VF, to be used in `buildVPlans(MinVF, MaxVF)` instead of
6499 // `buildVPlans(VF, VF)`. We cannot do it because VPLAN at the moment
6500 // doesn't have a cost model that can choose which plan to execute if
6501 // more than one is generated.
6502 static unsigned determineVPlanVF(const unsigned WidestVectorRegBits,
6503                                  LoopVectorizationCostModel &CM) {
6504   unsigned WidestType;
6505   std::tie(std::ignore, WidestType) = CM.getSmallestAndWidestTypes();
6506   return WidestVectorRegBits / WidestType;
6507 }
6508 
6509 VectorizationFactor
6510 LoopVectorizationPlanner::planInVPlanNativePath(unsigned UserVF) {
6511   unsigned VF = UserVF;
6512   // Outer loop handling: They may require CFG and instruction level
6513   // transformations before even evaluating whether vectorization is profitable.
6514   // Since we cannot modify the incoming IR, we need to build VPlan upfront in
6515   // the vectorization pipeline.
6516   if (!OrigLoop->empty()) {
6517     // If the user doesn't provide a vectorization factor, determine a
6518     // reasonable one.
6519     if (!UserVF) {
6520       VF = determineVPlanVF(TTI->getRegisterBitWidth(true /* Vector*/), CM);
6521       LLVM_DEBUG(dbgs() << "LV: VPlan computed VF " << VF << ".\n");
6522 
6523       // Make sure we have a VF > 1 for stress testing.
6524       if (VPlanBuildStressTest && VF < 2) {
6525         LLVM_DEBUG(dbgs() << "LV: VPlan stress testing: "
6526                           << "overriding computed VF.\n");
6527         VF = 4;
6528       }
6529     }
6530     assert(EnableVPlanNativePath && "VPlan-native path is not enabled.");
6531     assert(isPowerOf2_32(VF) && "VF needs to be a power of two");
6532     LLVM_DEBUG(dbgs() << "LV: Using " << (UserVF ? "user " : "") << "VF " << VF
6533                       << " to build VPlans.\n");
6534     buildVPlans(VF, VF);
6535 
6536     // For VPlan build stress testing, we bail out after VPlan construction.
6537     if (VPlanBuildStressTest)
6538       return VectorizationFactor::Disabled();
6539 
6540     return {VF, 0};
6541   }
6542 
6543   LLVM_DEBUG(
6544       dbgs() << "LV: Not vectorizing. Inner loops aren't supported in the "
6545                 "VPlan-native path.\n");
6546   return VectorizationFactor::Disabled();
6547 }
6548 
6549 Optional<VectorizationFactor> LoopVectorizationPlanner::plan(unsigned UserVF) {
6550   assert(OrigLoop->empty() && "Inner loop expected.");
6551   Optional<unsigned> MaybeMaxVF = CM.computeMaxVF();
6552   if (!MaybeMaxVF) // Cases that should not to be vectorized nor interleaved.
6553     return None;
6554 
6555   // Invalidate interleave groups if all blocks of loop will be predicated.
6556   if (CM.blockNeedsPredication(OrigLoop->getHeader()) &&
6557       !useMaskedInterleavedAccesses(*TTI)) {
6558     LLVM_DEBUG(
6559         dbgs()
6560         << "LV: Invalidate all interleaved groups due to fold-tail by masking "
6561            "which requires masked-interleaved support.\n");
6562     if (CM.InterleaveInfo.invalidateGroups())
6563       // Invalidating interleave groups also requires invalidating all decisions
6564       // based on them, which includes widening decisions and uniform and scalar
6565       // values.
6566       CM.invalidateCostModelingDecisions();
6567   }
6568 
6569   if (UserVF) {
6570     LLVM_DEBUG(dbgs() << "LV: Using user VF " << UserVF << ".\n");
6571     assert(isPowerOf2_32(UserVF) && "VF needs to be a power of two");
6572     // Collect the instructions (and their associated costs) that will be more
6573     // profitable to scalarize.
6574     CM.selectUserVectorizationFactor(UserVF);
6575     buildVPlansWithVPRecipes(UserVF, UserVF);
6576     LLVM_DEBUG(printPlans(dbgs()));
6577     return {{UserVF, 0}};
6578   }
6579 
6580   unsigned MaxVF = MaybeMaxVF.getValue();
6581   assert(MaxVF != 0 && "MaxVF is zero.");
6582 
6583   for (unsigned VF = 1; VF <= MaxVF; VF *= 2) {
6584     // Collect Uniform and Scalar instructions after vectorization with VF.
6585     CM.collectUniformsAndScalars(VF);
6586 
6587     // Collect the instructions (and their associated costs) that will be more
6588     // profitable to scalarize.
6589     if (VF > 1)
6590       CM.collectInstsToScalarize(VF);
6591   }
6592 
6593   buildVPlansWithVPRecipes(1, MaxVF);
6594   LLVM_DEBUG(printPlans(dbgs()));
6595   if (MaxVF == 1)
6596     return VectorizationFactor::Disabled();
6597 
6598   // Select the optimal vectorization factor.
6599   return CM.selectVectorizationFactor(MaxVF);
6600 }
6601 
6602 void LoopVectorizationPlanner::setBestPlan(unsigned VF, unsigned UF) {
6603   LLVM_DEBUG(dbgs() << "Setting best plan to VF=" << VF << ", UF=" << UF
6604                     << '\n');
6605   BestVF = VF;
6606   BestUF = UF;
6607 
6608   erase_if(VPlans, [VF](const VPlanPtr &Plan) {
6609     return !Plan->hasVF(VF);
6610   });
6611   assert(VPlans.size() == 1 && "Best VF has not a single VPlan.");
6612 }
6613 
6614 void LoopVectorizationPlanner::executePlan(InnerLoopVectorizer &ILV,
6615                                            DominatorTree *DT) {
6616   // Perform the actual loop transformation.
6617 
6618   // 1. Create a new empty loop. Unlink the old loop and connect the new one.
6619   VPCallbackILV CallbackILV(ILV);
6620 
6621   VPTransformState State{BestVF, BestUF,      LI,
6622                          DT,     ILV.Builder, ILV.VectorLoopValueMap,
6623                          &ILV,   CallbackILV};
6624   State.CFG.PrevBB = ILV.createVectorizedLoopSkeleton();
6625   State.TripCount = ILV.getOrCreateTripCount(nullptr);
6626   State.CanonicalIV = ILV.Induction;
6627 
6628   //===------------------------------------------------===//
6629   //
6630   // Notice: any optimization or new instruction that go
6631   // into the code below should also be implemented in
6632   // the cost-model.
6633   //
6634   //===------------------------------------------------===//
6635 
6636   // 2. Copy and widen instructions from the old loop into the new loop.
6637   assert(VPlans.size() == 1 && "Not a single VPlan to execute.");
6638   VPlans.front()->execute(&State);
6639 
6640   // 3. Fix the vectorized code: take care of header phi's, live-outs,
6641   //    predication, updating analyses.
6642   ILV.fixVectorizedLoop();
6643 }
6644 
6645 void LoopVectorizationPlanner::collectTriviallyDeadInstructions(
6646     SmallPtrSetImpl<Instruction *> &DeadInstructions) {
6647   BasicBlock *Latch = OrigLoop->getLoopLatch();
6648 
6649   // We create new control-flow for the vectorized loop, so the original
6650   // condition will be dead after vectorization if it's only used by the
6651   // branch.
6652   auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0));
6653   if (Cmp && Cmp->hasOneUse())
6654     DeadInstructions.insert(Cmp);
6655 
6656   // We create new "steps" for induction variable updates to which the original
6657   // induction variables map. An original update instruction will be dead if
6658   // all its users except the induction variable are dead.
6659   for (auto &Induction : Legal->getInductionVars()) {
6660     PHINode *Ind = Induction.first;
6661     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
6662     if (llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
6663           return U == Ind || DeadInstructions.find(cast<Instruction>(U)) !=
6664                                  DeadInstructions.end();
6665         }))
6666       DeadInstructions.insert(IndUpdate);
6667 
6668     // We record as "Dead" also the type-casting instructions we had identified
6669     // during induction analysis. We don't need any handling for them in the
6670     // vectorized loop because we have proven that, under a proper runtime
6671     // test guarding the vectorized loop, the value of the phi, and the casted
6672     // value of the phi, are the same. The last instruction in this casting chain
6673     // will get its scalar/vector/widened def from the scalar/vector/widened def
6674     // of the respective phi node. Any other casts in the induction def-use chain
6675     // have no other uses outside the phi update chain, and will be ignored.
6676     InductionDescriptor &IndDes = Induction.second;
6677     const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts();
6678     DeadInstructions.insert(Casts.begin(), Casts.end());
6679   }
6680 }
6681 
6682 Value *InnerLoopUnroller::reverseVector(Value *Vec) { return Vec; }
6683 
6684 Value *InnerLoopUnroller::getBroadcastInstrs(Value *V) { return V; }
6685 
6686 Value *InnerLoopUnroller::getStepVector(Value *Val, int StartIdx, Value *Step,
6687                                         Instruction::BinaryOps BinOp) {
6688   // When unrolling and the VF is 1, we only need to add a simple scalar.
6689   Type *Ty = Val->getType();
6690   assert(!Ty->isVectorTy() && "Val must be a scalar");
6691 
6692   if (Ty->isFloatingPointTy()) {
6693     Constant *C = ConstantFP::get(Ty, (double)StartIdx);
6694 
6695     // Floating point operations had to be 'fast' to enable the unrolling.
6696     Value *MulOp = addFastMathFlag(Builder.CreateFMul(C, Step));
6697     return addFastMathFlag(Builder.CreateBinOp(BinOp, Val, MulOp));
6698   }
6699   Constant *C = ConstantInt::get(Ty, StartIdx);
6700   return Builder.CreateAdd(Val, Builder.CreateMul(C, Step), "induction");
6701 }
6702 
6703 static void AddRuntimeUnrollDisableMetaData(Loop *L) {
6704   SmallVector<Metadata *, 4> MDs;
6705   // Reserve first location for self reference to the LoopID metadata node.
6706   MDs.push_back(nullptr);
6707   bool IsUnrollMetadata = false;
6708   MDNode *LoopID = L->getLoopID();
6709   if (LoopID) {
6710     // First find existing loop unrolling disable metadata.
6711     for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) {
6712       auto *MD = dyn_cast<MDNode>(LoopID->getOperand(i));
6713       if (MD) {
6714         const auto *S = dyn_cast<MDString>(MD->getOperand(0));
6715         IsUnrollMetadata =
6716             S && S->getString().startswith("llvm.loop.unroll.disable");
6717       }
6718       MDs.push_back(LoopID->getOperand(i));
6719     }
6720   }
6721 
6722   if (!IsUnrollMetadata) {
6723     // Add runtime unroll disable metadata.
6724     LLVMContext &Context = L->getHeader()->getContext();
6725     SmallVector<Metadata *, 1> DisableOperands;
6726     DisableOperands.push_back(
6727         MDString::get(Context, "llvm.loop.unroll.runtime.disable"));
6728     MDNode *DisableNode = MDNode::get(Context, DisableOperands);
6729     MDs.push_back(DisableNode);
6730     MDNode *NewLoopID = MDNode::get(Context, MDs);
6731     // Set operand 0 to refer to the loop id itself.
6732     NewLoopID->replaceOperandWith(0, NewLoopID);
6733     L->setLoopID(NewLoopID);
6734   }
6735 }
6736 
6737 bool LoopVectorizationPlanner::getDecisionAndClampRange(
6738     const std::function<bool(unsigned)> &Predicate, VFRange &Range) {
6739   assert(Range.End > Range.Start && "Trying to test an empty VF range.");
6740   bool PredicateAtRangeStart = Predicate(Range.Start);
6741 
6742   for (unsigned TmpVF = Range.Start * 2; TmpVF < Range.End; TmpVF *= 2)
6743     if (Predicate(TmpVF) != PredicateAtRangeStart) {
6744       Range.End = TmpVF;
6745       break;
6746     }
6747 
6748   return PredicateAtRangeStart;
6749 }
6750 
6751 /// Build VPlans for the full range of feasible VF's = {\p MinVF, 2 * \p MinVF,
6752 /// 4 * \p MinVF, ..., \p MaxVF} by repeatedly building a VPlan for a sub-range
6753 /// of VF's starting at a given VF and extending it as much as possible. Each
6754 /// vectorization decision can potentially shorten this sub-range during
6755 /// buildVPlan().
6756 void LoopVectorizationPlanner::buildVPlans(unsigned MinVF, unsigned MaxVF) {
6757   for (unsigned VF = MinVF; VF < MaxVF + 1;) {
6758     VFRange SubRange = {VF, MaxVF + 1};
6759     VPlans.push_back(buildVPlan(SubRange));
6760     VF = SubRange.End;
6761   }
6762 }
6763 
6764 VPValue *VPRecipeBuilder::createEdgeMask(BasicBlock *Src, BasicBlock *Dst,
6765                                          VPlanPtr &Plan) {
6766   assert(is_contained(predecessors(Dst), Src) && "Invalid edge");
6767 
6768   // Look for cached value.
6769   std::pair<BasicBlock *, BasicBlock *> Edge(Src, Dst);
6770   EdgeMaskCacheTy::iterator ECEntryIt = EdgeMaskCache.find(Edge);
6771   if (ECEntryIt != EdgeMaskCache.end())
6772     return ECEntryIt->second;
6773 
6774   VPValue *SrcMask = createBlockInMask(Src, Plan);
6775 
6776   // The terminator has to be a branch inst!
6777   BranchInst *BI = dyn_cast<BranchInst>(Src->getTerminator());
6778   assert(BI && "Unexpected terminator found");
6779 
6780   if (!BI->isConditional() || BI->getSuccessor(0) == BI->getSuccessor(1))
6781     return EdgeMaskCache[Edge] = SrcMask;
6782 
6783   VPValue *EdgeMask = Plan->getVPValue(BI->getCondition());
6784   assert(EdgeMask && "No Edge Mask found for condition");
6785 
6786   if (BI->getSuccessor(0) != Dst)
6787     EdgeMask = Builder.createNot(EdgeMask);
6788 
6789   if (SrcMask) // Otherwise block in-mask is all-one, no need to AND.
6790     EdgeMask = Builder.createAnd(EdgeMask, SrcMask);
6791 
6792   return EdgeMaskCache[Edge] = EdgeMask;
6793 }
6794 
6795 VPValue *VPRecipeBuilder::createBlockInMask(BasicBlock *BB, VPlanPtr &Plan) {
6796   assert(OrigLoop->contains(BB) && "Block is not a part of a loop");
6797 
6798   // Look for cached value.
6799   BlockMaskCacheTy::iterator BCEntryIt = BlockMaskCache.find(BB);
6800   if (BCEntryIt != BlockMaskCache.end())
6801     return BCEntryIt->second;
6802 
6803   // All-one mask is modelled as no-mask following the convention for masked
6804   // load/store/gather/scatter. Initialize BlockMask to no-mask.
6805   VPValue *BlockMask = nullptr;
6806 
6807   if (OrigLoop->getHeader() == BB) {
6808     if (!CM.blockNeedsPredication(BB))
6809       return BlockMaskCache[BB] = BlockMask; // Loop incoming mask is all-one.
6810 
6811     // Introduce the early-exit compare IV <= BTC to form header block mask.
6812     // This is used instead of IV < TC because TC may wrap, unlike BTC.
6813     // Start by constructing the desired canonical IV.
6814     VPValue *IV = nullptr;
6815     if (Legal->getPrimaryInduction())
6816       IV = Plan->getVPValue(Legal->getPrimaryInduction());
6817     else {
6818       auto IVRecipe = new VPWidenCanonicalIVRecipe();
6819       Builder.getInsertBlock()->appendRecipe(IVRecipe);
6820       IV = IVRecipe->getVPValue();
6821     }
6822     VPValue *BTC = Plan->getOrCreateBackedgeTakenCount();
6823     BlockMask = Builder.createNaryOp(VPInstruction::ICmpULE, {IV, BTC});
6824     return BlockMaskCache[BB] = BlockMask;
6825   }
6826 
6827   // This is the block mask. We OR all incoming edges.
6828   for (auto *Predecessor : predecessors(BB)) {
6829     VPValue *EdgeMask = createEdgeMask(Predecessor, BB, Plan);
6830     if (!EdgeMask) // Mask of predecessor is all-one so mask of block is too.
6831       return BlockMaskCache[BB] = EdgeMask;
6832 
6833     if (!BlockMask) { // BlockMask has its initialized nullptr value.
6834       BlockMask = EdgeMask;
6835       continue;
6836     }
6837 
6838     BlockMask = Builder.createOr(BlockMask, EdgeMask);
6839   }
6840 
6841   return BlockMaskCache[BB] = BlockMask;
6842 }
6843 
6844 VPWidenMemoryInstructionRecipe *
6845 VPRecipeBuilder::tryToWidenMemory(Instruction *I, VFRange &Range,
6846                                   VPlanPtr &Plan) {
6847   assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
6848          "Must be called with either a load or store");
6849 
6850   auto willWiden = [&](unsigned VF) -> bool {
6851     if (VF == 1)
6852       return false;
6853     LoopVectorizationCostModel::InstWidening Decision =
6854         CM.getWideningDecision(I, VF);
6855     assert(Decision != LoopVectorizationCostModel::CM_Unknown &&
6856            "CM decision should be taken at this point.");
6857     if (Decision == LoopVectorizationCostModel::CM_Interleave)
6858       return true;
6859     if (CM.isScalarAfterVectorization(I, VF) ||
6860         CM.isProfitableToScalarize(I, VF))
6861       return false;
6862     return Decision != LoopVectorizationCostModel::CM_Scalarize;
6863   };
6864 
6865   if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range))
6866     return nullptr;
6867 
6868   VPValue *Mask = nullptr;
6869   if (Legal->isMaskRequired(I))
6870     Mask = createBlockInMask(I->getParent(), Plan);
6871 
6872   VPValue *Addr = Plan->getOrAddVPValue(getLoadStorePointerOperand(I));
6873   if (LoadInst *Load = dyn_cast<LoadInst>(I))
6874     return new VPWidenMemoryInstructionRecipe(*Load, Addr, Mask);
6875 
6876   StoreInst *Store = cast<StoreInst>(I);
6877   VPValue *StoredValue = Plan->getOrAddVPValue(Store->getValueOperand());
6878   return new VPWidenMemoryInstructionRecipe(*Store, Addr, StoredValue, Mask);
6879 }
6880 
6881 VPWidenIntOrFpInductionRecipe *
6882 VPRecipeBuilder::tryToOptimizeInductionPHI(PHINode *Phi) const {
6883   // Check if this is an integer or fp induction. If so, build the recipe that
6884   // produces its scalar and vector values.
6885   InductionDescriptor II = Legal->getInductionVars().lookup(Phi);
6886   if (II.getKind() == InductionDescriptor::IK_IntInduction ||
6887       II.getKind() == InductionDescriptor::IK_FpInduction)
6888     return new VPWidenIntOrFpInductionRecipe(Phi);
6889 
6890   return nullptr;
6891 }
6892 
6893 VPWidenIntOrFpInductionRecipe *
6894 VPRecipeBuilder::tryToOptimizeInductionTruncate(TruncInst *I,
6895                                                 VFRange &Range) const {
6896   // Optimize the special case where the source is a constant integer
6897   // induction variable. Notice that we can only optimize the 'trunc' case
6898   // because (a) FP conversions lose precision, (b) sext/zext may wrap, and
6899   // (c) other casts depend on pointer size.
6900 
6901   // Determine whether \p K is a truncation based on an induction variable that
6902   // can be optimized.
6903   auto isOptimizableIVTruncate =
6904       [&](Instruction *K) -> std::function<bool(unsigned)> {
6905     return
6906         [=](unsigned VF) -> bool { return CM.isOptimizableIVTruncate(K, VF); };
6907   };
6908 
6909   if (LoopVectorizationPlanner::getDecisionAndClampRange(
6910           isOptimizableIVTruncate(I), Range))
6911     return new VPWidenIntOrFpInductionRecipe(cast<PHINode>(I->getOperand(0)),
6912                                              I);
6913   return nullptr;
6914 }
6915 
6916 VPBlendRecipe *VPRecipeBuilder::tryToBlend(PHINode *Phi, VPlanPtr &Plan) {
6917   // We know that all PHIs in non-header blocks are converted into selects, so
6918   // we don't have to worry about the insertion order and we can just use the
6919   // builder. At this point we generate the predication tree. There may be
6920   // duplications since this is a simple recursive scan, but future
6921   // optimizations will clean it up.
6922 
6923   SmallVector<VPValue *, 2> Operands;
6924   unsigned NumIncoming = Phi->getNumIncomingValues();
6925   for (unsigned In = 0; In < NumIncoming; In++) {
6926     VPValue *EdgeMask =
6927       createEdgeMask(Phi->getIncomingBlock(In), Phi->getParent(), Plan);
6928     assert((EdgeMask || NumIncoming == 1) &&
6929            "Multiple predecessors with one having a full mask");
6930     Operands.push_back(Plan->getOrAddVPValue(Phi->getIncomingValue(In)));
6931     if (EdgeMask)
6932       Operands.push_back(EdgeMask);
6933   }
6934   return new VPBlendRecipe(Phi, Operands);
6935 }
6936 
6937 VPWidenCallRecipe *VPRecipeBuilder::tryToWidenCall(CallInst *CI, VFRange &Range,
6938                                                    VPlan &Plan) const {
6939 
6940   bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange(
6941       [this, CI](unsigned VF) { return CM.isScalarWithPredication(CI, VF); },
6942       Range);
6943 
6944   if (IsPredicated)
6945     return nullptr;
6946 
6947   Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
6948   if (ID && (ID == Intrinsic::assume || ID == Intrinsic::lifetime_end ||
6949              ID == Intrinsic::lifetime_start || ID == Intrinsic::sideeffect))
6950     return nullptr;
6951 
6952   auto willWiden = [&](unsigned VF) -> bool {
6953     Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
6954     // The following case may be scalarized depending on the VF.
6955     // The flag shows whether we use Intrinsic or a usual Call for vectorized
6956     // version of the instruction.
6957     // Is it beneficial to perform intrinsic call compared to lib call?
6958     bool NeedToScalarize = false;
6959     unsigned CallCost = CM.getVectorCallCost(CI, VF, NeedToScalarize);
6960     bool UseVectorIntrinsic =
6961         ID && CM.getVectorIntrinsicCost(CI, VF) <= CallCost;
6962     return UseVectorIntrinsic || !NeedToScalarize;
6963   };
6964 
6965   if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range))
6966     return nullptr;
6967 
6968   return new VPWidenCallRecipe(*CI, Plan.mapToVPValues(CI->arg_operands()));
6969 }
6970 
6971 bool VPRecipeBuilder::shouldWiden(Instruction *I, VFRange &Range) const {
6972   assert(!isa<BranchInst>(I) && !isa<PHINode>(I) && !isa<LoadInst>(I) &&
6973          !isa<StoreInst>(I) && "Instruction should have been handled earlier");
6974   // Instruction should be widened, unless it is scalar after vectorization,
6975   // scalarization is profitable or it is predicated.
6976   auto WillScalarize = [this, I](unsigned VF) -> bool {
6977     return CM.isScalarAfterVectorization(I, VF) ||
6978            CM.isProfitableToScalarize(I, VF) ||
6979            CM.isScalarWithPredication(I, VF);
6980   };
6981   return !LoopVectorizationPlanner::getDecisionAndClampRange(WillScalarize,
6982                                                              Range);
6983 }
6984 
6985 VPWidenRecipe *VPRecipeBuilder::tryToWiden(Instruction *I, VPlan &Plan) const {
6986   auto IsVectorizableOpcode = [](unsigned Opcode) {
6987     switch (Opcode) {
6988     case Instruction::Add:
6989     case Instruction::And:
6990     case Instruction::AShr:
6991     case Instruction::BitCast:
6992     case Instruction::FAdd:
6993     case Instruction::FCmp:
6994     case Instruction::FDiv:
6995     case Instruction::FMul:
6996     case Instruction::FNeg:
6997     case Instruction::FPExt:
6998     case Instruction::FPToSI:
6999     case Instruction::FPToUI:
7000     case Instruction::FPTrunc:
7001     case Instruction::FRem:
7002     case Instruction::FSub:
7003     case Instruction::ICmp:
7004     case Instruction::IntToPtr:
7005     case Instruction::LShr:
7006     case Instruction::Mul:
7007     case Instruction::Or:
7008     case Instruction::PtrToInt:
7009     case Instruction::SDiv:
7010     case Instruction::Select:
7011     case Instruction::SExt:
7012     case Instruction::Shl:
7013     case Instruction::SIToFP:
7014     case Instruction::SRem:
7015     case Instruction::Sub:
7016     case Instruction::Trunc:
7017     case Instruction::UDiv:
7018     case Instruction::UIToFP:
7019     case Instruction::URem:
7020     case Instruction::Xor:
7021     case Instruction::ZExt:
7022       return true;
7023     }
7024     return false;
7025   };
7026 
7027   if (!IsVectorizableOpcode(I->getOpcode()))
7028     return nullptr;
7029 
7030   // Success: widen this instruction.
7031   return new VPWidenRecipe(*I, Plan.mapToVPValues(I->operands()));
7032 }
7033 
7034 VPBasicBlock *VPRecipeBuilder::handleReplication(
7035     Instruction *I, VFRange &Range, VPBasicBlock *VPBB,
7036     DenseMap<Instruction *, VPReplicateRecipe *> &PredInst2Recipe,
7037     VPlanPtr &Plan) {
7038   bool IsUniform = LoopVectorizationPlanner::getDecisionAndClampRange(
7039       [&](unsigned VF) { return CM.isUniformAfterVectorization(I, VF); },
7040       Range);
7041 
7042   bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange(
7043       [&](unsigned VF) { return CM.isScalarWithPredication(I, VF); }, Range);
7044 
7045   auto *Recipe = new VPReplicateRecipe(I, IsUniform, IsPredicated);
7046   setRecipe(I, Recipe);
7047 
7048   // Find if I uses a predicated instruction. If so, it will use its scalar
7049   // value. Avoid hoisting the insert-element which packs the scalar value into
7050   // a vector value, as that happens iff all users use the vector value.
7051   for (auto &Op : I->operands())
7052     if (auto *PredInst = dyn_cast<Instruction>(Op))
7053       if (PredInst2Recipe.find(PredInst) != PredInst2Recipe.end())
7054         PredInst2Recipe[PredInst]->setAlsoPack(false);
7055 
7056   // Finalize the recipe for Instr, first if it is not predicated.
7057   if (!IsPredicated) {
7058     LLVM_DEBUG(dbgs() << "LV: Scalarizing:" << *I << "\n");
7059     VPBB->appendRecipe(Recipe);
7060     return VPBB;
7061   }
7062   LLVM_DEBUG(dbgs() << "LV: Scalarizing and predicating:" << *I << "\n");
7063   assert(VPBB->getSuccessors().empty() &&
7064          "VPBB has successors when handling predicated replication.");
7065   // Record predicated instructions for above packing optimizations.
7066   PredInst2Recipe[I] = Recipe;
7067   VPBlockBase *Region = createReplicateRegion(I, Recipe, Plan);
7068   VPBlockUtils::insertBlockAfter(Region, VPBB);
7069   auto *RegSucc = new VPBasicBlock();
7070   VPBlockUtils::insertBlockAfter(RegSucc, Region);
7071   return RegSucc;
7072 }
7073 
7074 VPRegionBlock *VPRecipeBuilder::createReplicateRegion(Instruction *Instr,
7075                                                       VPRecipeBase *PredRecipe,
7076                                                       VPlanPtr &Plan) {
7077   // Instructions marked for predication are replicated and placed under an
7078   // if-then construct to prevent side-effects.
7079 
7080   // Generate recipes to compute the block mask for this region.
7081   VPValue *BlockInMask = createBlockInMask(Instr->getParent(), Plan);
7082 
7083   // Build the triangular if-then region.
7084   std::string RegionName = (Twine("pred.") + Instr->getOpcodeName()).str();
7085   assert(Instr->getParent() && "Predicated instruction not in any basic block");
7086   auto *BOMRecipe = new VPBranchOnMaskRecipe(BlockInMask);
7087   auto *Entry = new VPBasicBlock(Twine(RegionName) + ".entry", BOMRecipe);
7088   auto *PHIRecipe =
7089       Instr->getType()->isVoidTy() ? nullptr : new VPPredInstPHIRecipe(Instr);
7090   auto *Exit = new VPBasicBlock(Twine(RegionName) + ".continue", PHIRecipe);
7091   auto *Pred = new VPBasicBlock(Twine(RegionName) + ".if", PredRecipe);
7092   VPRegionBlock *Region = new VPRegionBlock(Entry, Exit, RegionName, true);
7093 
7094   // Note: first set Entry as region entry and then connect successors starting
7095   // from it in order, to propagate the "parent" of each VPBasicBlock.
7096   VPBlockUtils::insertTwoBlocksAfter(Pred, Exit, BlockInMask, Entry);
7097   VPBlockUtils::connectBlocks(Pred, Exit);
7098 
7099   return Region;
7100 }
7101 
7102 VPRecipeBase *VPRecipeBuilder::tryToCreateWidenRecipe(Instruction *Instr,
7103                                                       VFRange &Range,
7104                                                       VPlanPtr &Plan) {
7105   // First, check for specific widening recipes that deal with calls, memory
7106   // operations, inductions and Phi nodes.
7107   if (auto *CI = dyn_cast<CallInst>(Instr))
7108     return tryToWidenCall(CI, Range, *Plan);
7109 
7110   if (isa<LoadInst>(Instr) || isa<StoreInst>(Instr))
7111     return tryToWidenMemory(Instr, Range, Plan);
7112 
7113   VPRecipeBase *Recipe;
7114   if (auto Phi = dyn_cast<PHINode>(Instr)) {
7115     if (Phi->getParent() != OrigLoop->getHeader())
7116       return tryToBlend(Phi, Plan);
7117     if ((Recipe = tryToOptimizeInductionPHI(Phi)))
7118       return Recipe;
7119     return new VPWidenPHIRecipe(Phi);
7120     return new VPWidenPHIRecipe(Phi);
7121   }
7122 
7123   if (isa<TruncInst>(Instr) &&
7124       (Recipe = tryToOptimizeInductionTruncate(cast<TruncInst>(Instr), Range)))
7125     return Recipe;
7126 
7127   if (!shouldWiden(Instr, Range))
7128     return nullptr;
7129 
7130   if (auto GEP = dyn_cast<GetElementPtrInst>(Instr))
7131     return new VPWidenGEPRecipe(GEP, OrigLoop);
7132 
7133   if (auto *SI = dyn_cast<SelectInst>(Instr)) {
7134     bool InvariantCond =
7135         PSE.getSE()->isLoopInvariant(PSE.getSCEV(SI->getOperand(0)), OrigLoop);
7136     return new VPWidenSelectRecipe(*SI, InvariantCond);
7137   }
7138 
7139   return tryToWiden(Instr, *Plan);
7140 }
7141 
7142 void LoopVectorizationPlanner::buildVPlansWithVPRecipes(unsigned MinVF,
7143                                                         unsigned MaxVF) {
7144   assert(OrigLoop->empty() && "Inner loop expected.");
7145 
7146   // Collect conditions feeding internal conditional branches; they need to be
7147   // represented in VPlan for it to model masking.
7148   SmallPtrSet<Value *, 1> NeedDef;
7149 
7150   auto *Latch = OrigLoop->getLoopLatch();
7151   for (BasicBlock *BB : OrigLoop->blocks()) {
7152     if (BB == Latch)
7153       continue;
7154     BranchInst *Branch = dyn_cast<BranchInst>(BB->getTerminator());
7155     if (Branch && Branch->isConditional())
7156       NeedDef.insert(Branch->getCondition());
7157   }
7158 
7159   // If the tail is to be folded by masking, the primary induction variable, if
7160   // exists needs to be represented in VPlan for it to model early-exit masking.
7161   // Also, both the Phi and the live-out instruction of each reduction are
7162   // required in order to introduce a select between them in VPlan.
7163   if (CM.foldTailByMasking()) {
7164     if (Legal->getPrimaryInduction())
7165       NeedDef.insert(Legal->getPrimaryInduction());
7166     for (auto &Reduction : Legal->getReductionVars()) {
7167       NeedDef.insert(Reduction.first);
7168       NeedDef.insert(Reduction.second.getLoopExitInstr());
7169     }
7170   }
7171 
7172   // Collect instructions from the original loop that will become trivially dead
7173   // in the vectorized loop. We don't need to vectorize these instructions. For
7174   // example, original induction update instructions can become dead because we
7175   // separately emit induction "steps" when generating code for the new loop.
7176   // Similarly, we create a new latch condition when setting up the structure
7177   // of the new loop, so the old one can become dead.
7178   SmallPtrSet<Instruction *, 4> DeadInstructions;
7179   collectTriviallyDeadInstructions(DeadInstructions);
7180 
7181   // Add assume instructions we need to drop to DeadInstructions, to prevent
7182   // them from being added to the VPlan.
7183   // TODO: We only need to drop assumes in blocks that get flattend. If the
7184   // control flow is preserved, we should keep them.
7185   auto &ConditionalAssumes = Legal->getConditionalAssumes();
7186   DeadInstructions.insert(ConditionalAssumes.begin(), ConditionalAssumes.end());
7187 
7188   DenseMap<Instruction *, Instruction *> &SinkAfter = Legal->getSinkAfter();
7189   // Dead instructions do not need sinking. Remove them from SinkAfter.
7190   for (Instruction *I : DeadInstructions)
7191     SinkAfter.erase(I);
7192 
7193   for (unsigned VF = MinVF; VF < MaxVF + 1;) {
7194     VFRange SubRange = {VF, MaxVF + 1};
7195     VPlans.push_back(buildVPlanWithVPRecipes(SubRange, NeedDef,
7196                                              DeadInstructions, SinkAfter));
7197     VF = SubRange.End;
7198   }
7199 }
7200 
7201 VPlanPtr LoopVectorizationPlanner::buildVPlanWithVPRecipes(
7202     VFRange &Range, SmallPtrSetImpl<Value *> &NeedDef,
7203     SmallPtrSetImpl<Instruction *> &DeadInstructions,
7204     const DenseMap<Instruction *, Instruction *> &SinkAfter) {
7205 
7206   // Hold a mapping from predicated instructions to their recipes, in order to
7207   // fix their AlsoPack behavior if a user is determined to replicate and use a
7208   // scalar instead of vector value.
7209   DenseMap<Instruction *, VPReplicateRecipe *> PredInst2Recipe;
7210 
7211   SmallPtrSet<const InterleaveGroup<Instruction> *, 1> InterleaveGroups;
7212 
7213   VPRecipeBuilder RecipeBuilder(OrigLoop, TLI, Legal, CM, PSE, Builder);
7214 
7215   // ---------------------------------------------------------------------------
7216   // Pre-construction: record ingredients whose recipes we'll need to further
7217   // process after constructing the initial VPlan.
7218   // ---------------------------------------------------------------------------
7219 
7220   // Mark instructions we'll need to sink later and their targets as
7221   // ingredients whose recipe we'll need to record.
7222   for (auto &Entry : SinkAfter) {
7223     RecipeBuilder.recordRecipeOf(Entry.first);
7224     RecipeBuilder.recordRecipeOf(Entry.second);
7225   }
7226 
7227   // For each interleave group which is relevant for this (possibly trimmed)
7228   // Range, add it to the set of groups to be later applied to the VPlan and add
7229   // placeholders for its members' Recipes which we'll be replacing with a
7230   // single VPInterleaveRecipe.
7231   for (InterleaveGroup<Instruction> *IG : IAI.getInterleaveGroups()) {
7232     auto applyIG = [IG, this](unsigned VF) -> bool {
7233       return (VF >= 2 && // Query is illegal for VF == 1
7234               CM.getWideningDecision(IG->getInsertPos(), VF) ==
7235                   LoopVectorizationCostModel::CM_Interleave);
7236     };
7237     if (!getDecisionAndClampRange(applyIG, Range))
7238       continue;
7239     InterleaveGroups.insert(IG);
7240     for (unsigned i = 0; i < IG->getFactor(); i++)
7241       if (Instruction *Member = IG->getMember(i))
7242         RecipeBuilder.recordRecipeOf(Member);
7243   };
7244 
7245   // ---------------------------------------------------------------------------
7246   // Build initial VPlan: Scan the body of the loop in a topological order to
7247   // visit each basic block after having visited its predecessor basic blocks.
7248   // ---------------------------------------------------------------------------
7249 
7250   // Create a dummy pre-entry VPBasicBlock to start building the VPlan.
7251   auto Plan = std::make_unique<VPlan>();
7252   VPBasicBlock *VPBB = new VPBasicBlock("Pre-Entry");
7253   Plan->setEntry(VPBB);
7254 
7255   // Represent values that will have defs inside VPlan.
7256   for (Value *V : NeedDef)
7257     Plan->addVPValue(V);
7258 
7259   // Scan the body of the loop in a topological order to visit each basic block
7260   // after having visited its predecessor basic blocks.
7261   LoopBlocksDFS DFS(OrigLoop);
7262   DFS.perform(LI);
7263 
7264   for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) {
7265     // Relevant instructions from basic block BB will be grouped into VPRecipe
7266     // ingredients and fill a new VPBasicBlock.
7267     unsigned VPBBsForBB = 0;
7268     auto *FirstVPBBForBB = new VPBasicBlock(BB->getName());
7269     VPBlockUtils::insertBlockAfter(FirstVPBBForBB, VPBB);
7270     VPBB = FirstVPBBForBB;
7271     Builder.setInsertPoint(VPBB);
7272 
7273     // Introduce each ingredient into VPlan.
7274     // TODO: Model and preserve debug instrinsics in VPlan.
7275     for (Instruction &I : BB->instructionsWithoutDebug()) {
7276       Instruction *Instr = &I;
7277 
7278       // First filter out irrelevant instructions, to ensure no recipes are
7279       // built for them.
7280       if (isa<BranchInst>(Instr) ||
7281           DeadInstructions.find(Instr) != DeadInstructions.end())
7282         continue;
7283 
7284       if (auto Recipe =
7285               RecipeBuilder.tryToCreateWidenRecipe(Instr, Range, Plan)) {
7286         RecipeBuilder.setRecipe(Instr, Recipe);
7287         VPBB->appendRecipe(Recipe);
7288         continue;
7289       }
7290 
7291       // Otherwise, if all widening options failed, Instruction is to be
7292       // replicated. This may create a successor for VPBB.
7293       VPBasicBlock *NextVPBB = RecipeBuilder.handleReplication(
7294           Instr, Range, VPBB, PredInst2Recipe, Plan);
7295       if (NextVPBB != VPBB) {
7296         VPBB = NextVPBB;
7297         VPBB->setName(BB->hasName() ? BB->getName() + "." + Twine(VPBBsForBB++)
7298                                     : "");
7299       }
7300     }
7301   }
7302 
7303   // Discard empty dummy pre-entry VPBasicBlock. Note that other VPBasicBlocks
7304   // may also be empty, such as the last one VPBB, reflecting original
7305   // basic-blocks with no recipes.
7306   VPBasicBlock *PreEntry = cast<VPBasicBlock>(Plan->getEntry());
7307   assert(PreEntry->empty() && "Expecting empty pre-entry block.");
7308   VPBlockBase *Entry = Plan->setEntry(PreEntry->getSingleSuccessor());
7309   VPBlockUtils::disconnectBlocks(PreEntry, Entry);
7310   delete PreEntry;
7311 
7312   // ---------------------------------------------------------------------------
7313   // Transform initial VPlan: Apply previously taken decisions, in order, to
7314   // bring the VPlan to its final state.
7315   // ---------------------------------------------------------------------------
7316 
7317   // Apply Sink-After legal constraints.
7318   for (auto &Entry : SinkAfter) {
7319     VPRecipeBase *Sink = RecipeBuilder.getRecipe(Entry.first);
7320     VPRecipeBase *Target = RecipeBuilder.getRecipe(Entry.second);
7321     Sink->moveAfter(Target);
7322   }
7323 
7324   // Interleave memory: for each Interleave Group we marked earlier as relevant
7325   // for this VPlan, replace the Recipes widening its memory instructions with a
7326   // single VPInterleaveRecipe at its insertion point.
7327   for (auto IG : InterleaveGroups) {
7328     auto *Recipe = cast<VPWidenMemoryInstructionRecipe>(
7329         RecipeBuilder.getRecipe(IG->getInsertPos()));
7330     (new VPInterleaveRecipe(IG, Recipe->getAddr(), Recipe->getMask()))
7331         ->insertBefore(Recipe);
7332 
7333     for (unsigned i = 0; i < IG->getFactor(); ++i)
7334       if (Instruction *Member = IG->getMember(i)) {
7335         RecipeBuilder.getRecipe(Member)->eraseFromParent();
7336       }
7337   }
7338 
7339   // Finally, if tail is folded by masking, introduce selects between the phi
7340   // and the live-out instruction of each reduction, at the end of the latch.
7341   if (CM.foldTailByMasking()) {
7342     Builder.setInsertPoint(VPBB);
7343     auto *Cond = RecipeBuilder.createBlockInMask(OrigLoop->getHeader(), Plan);
7344     for (auto &Reduction : Legal->getReductionVars()) {
7345       VPValue *Phi = Plan->getVPValue(Reduction.first);
7346       VPValue *Red = Plan->getVPValue(Reduction.second.getLoopExitInstr());
7347       Builder.createNaryOp(Instruction::Select, {Cond, Red, Phi});
7348     }
7349   }
7350 
7351   std::string PlanName;
7352   raw_string_ostream RSO(PlanName);
7353   unsigned VF = Range.Start;
7354   Plan->addVF(VF);
7355   RSO << "Initial VPlan for VF={" << VF;
7356   for (VF *= 2; VF < Range.End; VF *= 2) {
7357     Plan->addVF(VF);
7358     RSO << "," << VF;
7359   }
7360   RSO << "},UF>=1";
7361   RSO.flush();
7362   Plan->setName(PlanName);
7363 
7364   return Plan;
7365 }
7366 
7367 VPlanPtr LoopVectorizationPlanner::buildVPlan(VFRange &Range) {
7368   // Outer loop handling: They may require CFG and instruction level
7369   // transformations before even evaluating whether vectorization is profitable.
7370   // Since we cannot modify the incoming IR, we need to build VPlan upfront in
7371   // the vectorization pipeline.
7372   assert(!OrigLoop->empty());
7373   assert(EnableVPlanNativePath && "VPlan-native path is not enabled.");
7374 
7375   // Create new empty VPlan
7376   auto Plan = std::make_unique<VPlan>();
7377 
7378   // Build hierarchical CFG
7379   VPlanHCFGBuilder HCFGBuilder(OrigLoop, LI, *Plan);
7380   HCFGBuilder.buildHierarchicalCFG();
7381 
7382   for (unsigned VF = Range.Start; VF < Range.End; VF *= 2)
7383     Plan->addVF(VF);
7384 
7385   if (EnableVPlanPredication) {
7386     VPlanPredicator VPP(*Plan);
7387     VPP.predicate();
7388 
7389     // Avoid running transformation to recipes until masked code generation in
7390     // VPlan-native path is in place.
7391     return Plan;
7392   }
7393 
7394   SmallPtrSet<Instruction *, 1> DeadInstructions;
7395   VPlanTransforms::VPInstructionsToVPRecipes(
7396       OrigLoop, Plan, Legal->getInductionVars(), DeadInstructions);
7397   return Plan;
7398 }
7399 
7400 Value* LoopVectorizationPlanner::VPCallbackILV::
7401 getOrCreateVectorValues(Value *V, unsigned Part) {
7402       return ILV.getOrCreateVectorValue(V, Part);
7403 }
7404 
7405 Value *LoopVectorizationPlanner::VPCallbackILV::getOrCreateScalarValue(
7406     Value *V, const VPIteration &Instance) {
7407   return ILV.getOrCreateScalarValue(V, Instance);
7408 }
7409 
7410 void VPInterleaveRecipe::print(raw_ostream &O, const Twine &Indent,
7411                                VPSlotTracker &SlotTracker) const {
7412   O << "\"INTERLEAVE-GROUP with factor " << IG->getFactor() << " at ";
7413   IG->getInsertPos()->printAsOperand(O, false);
7414   O << ", ";
7415   getAddr()->printAsOperand(O, SlotTracker);
7416   VPValue *Mask = getMask();
7417   if (Mask) {
7418     O << ", ";
7419     Mask->printAsOperand(O, SlotTracker);
7420   }
7421   for (unsigned i = 0; i < IG->getFactor(); ++i)
7422     if (Instruction *I = IG->getMember(i))
7423       O << "\\l\" +\n" << Indent << "\"  " << VPlanIngredient(I) << " " << i;
7424 }
7425 
7426 void VPWidenCallRecipe::execute(VPTransformState &State) {
7427   State.ILV->widenCallInstruction(Ingredient, User, State);
7428 }
7429 
7430 void VPWidenSelectRecipe::execute(VPTransformState &State) {
7431   State.ILV->widenSelectInstruction(Ingredient, InvariantCond);
7432 }
7433 
7434 void VPWidenRecipe::execute(VPTransformState &State) {
7435   State.ILV->widenInstruction(Ingredient, User, State);
7436 }
7437 
7438 void VPWidenGEPRecipe::execute(VPTransformState &State) {
7439   State.ILV->widenGEP(GEP, State.UF, State.VF, IsPtrLoopInvariant,
7440                       IsIndexLoopInvariant);
7441 }
7442 
7443 void VPWidenIntOrFpInductionRecipe::execute(VPTransformState &State) {
7444   assert(!State.Instance && "Int or FP induction being replicated.");
7445   State.ILV->widenIntOrFpInduction(IV, Trunc);
7446 }
7447 
7448 void VPWidenPHIRecipe::execute(VPTransformState &State) {
7449   State.ILV->widenPHIInstruction(Phi, State.UF, State.VF);
7450 }
7451 
7452 void VPBlendRecipe::execute(VPTransformState &State) {
7453   State.ILV->setDebugLocFromInst(State.Builder, Phi);
7454   // We know that all PHIs in non-header blocks are converted into
7455   // selects, so we don't have to worry about the insertion order and we
7456   // can just use the builder.
7457   // At this point we generate the predication tree. There may be
7458   // duplications since this is a simple recursive scan, but future
7459   // optimizations will clean it up.
7460 
7461   unsigned NumIncoming = getNumIncomingValues();
7462 
7463   // Generate a sequence of selects of the form:
7464   // SELECT(Mask3, In3,
7465   //        SELECT(Mask2, In2,
7466   //               SELECT(Mask1, In1,
7467   //                      In0)))
7468   // Note that Mask0 is never used: lanes for which no path reaches this phi and
7469   // are essentially undef are taken from In0.
7470   InnerLoopVectorizer::VectorParts Entry(State.UF);
7471   for (unsigned In = 0; In < NumIncoming; ++In) {
7472     for (unsigned Part = 0; Part < State.UF; ++Part) {
7473       // We might have single edge PHIs (blocks) - use an identity
7474       // 'select' for the first PHI operand.
7475       Value *In0 = State.get(getIncomingValue(In), Part);
7476       if (In == 0)
7477         Entry[Part] = In0; // Initialize with the first incoming value.
7478       else {
7479         // Select between the current value and the previous incoming edge
7480         // based on the incoming mask.
7481         Value *Cond = State.get(getMask(In), Part);
7482         Entry[Part] =
7483             State.Builder.CreateSelect(Cond, In0, Entry[Part], "predphi");
7484       }
7485     }
7486   }
7487   for (unsigned Part = 0; Part < State.UF; ++Part)
7488     State.ValueMap.setVectorValue(Phi, Part, Entry[Part]);
7489 }
7490 
7491 void VPInterleaveRecipe::execute(VPTransformState &State) {
7492   assert(!State.Instance && "Interleave group being replicated.");
7493   State.ILV->vectorizeInterleaveGroup(IG, State, getAddr(), getMask());
7494 }
7495 
7496 void VPReplicateRecipe::execute(VPTransformState &State) {
7497   if (State.Instance) { // Generate a single instance.
7498     State.ILV->scalarizeInstruction(Ingredient, *State.Instance, IsPredicated);
7499     // Insert scalar instance packing it into a vector.
7500     if (AlsoPack && State.VF > 1) {
7501       // If we're constructing lane 0, initialize to start from undef.
7502       if (State.Instance->Lane == 0) {
7503         Value *Undef =
7504             UndefValue::get(VectorType::get(Ingredient->getType(), State.VF));
7505         State.ValueMap.setVectorValue(Ingredient, State.Instance->Part, Undef);
7506       }
7507       State.ILV->packScalarIntoVectorValue(Ingredient, *State.Instance);
7508     }
7509     return;
7510   }
7511 
7512   // Generate scalar instances for all VF lanes of all UF parts, unless the
7513   // instruction is uniform inwhich case generate only the first lane for each
7514   // of the UF parts.
7515   unsigned EndLane = IsUniform ? 1 : State.VF;
7516   for (unsigned Part = 0; Part < State.UF; ++Part)
7517     for (unsigned Lane = 0; Lane < EndLane; ++Lane)
7518       State.ILV->scalarizeInstruction(Ingredient, {Part, Lane}, IsPredicated);
7519 }
7520 
7521 void VPBranchOnMaskRecipe::execute(VPTransformState &State) {
7522   assert(State.Instance && "Branch on Mask works only on single instance.");
7523 
7524   unsigned Part = State.Instance->Part;
7525   unsigned Lane = State.Instance->Lane;
7526 
7527   Value *ConditionBit = nullptr;
7528   if (!User) // Block in mask is all-one.
7529     ConditionBit = State.Builder.getTrue();
7530   else {
7531     VPValue *BlockInMask = User->getOperand(0);
7532     ConditionBit = State.get(BlockInMask, Part);
7533     if (ConditionBit->getType()->isVectorTy())
7534       ConditionBit = State.Builder.CreateExtractElement(
7535           ConditionBit, State.Builder.getInt32(Lane));
7536   }
7537 
7538   // Replace the temporary unreachable terminator with a new conditional branch,
7539   // whose two destinations will be set later when they are created.
7540   auto *CurrentTerminator = State.CFG.PrevBB->getTerminator();
7541   assert(isa<UnreachableInst>(CurrentTerminator) &&
7542          "Expected to replace unreachable terminator with conditional branch.");
7543   auto *CondBr = BranchInst::Create(State.CFG.PrevBB, nullptr, ConditionBit);
7544   CondBr->setSuccessor(0, nullptr);
7545   ReplaceInstWithInst(CurrentTerminator, CondBr);
7546 }
7547 
7548 void VPPredInstPHIRecipe::execute(VPTransformState &State) {
7549   assert(State.Instance && "Predicated instruction PHI works per instance.");
7550   Instruction *ScalarPredInst = cast<Instruction>(
7551       State.ValueMap.getScalarValue(PredInst, *State.Instance));
7552   BasicBlock *PredicatedBB = ScalarPredInst->getParent();
7553   BasicBlock *PredicatingBB = PredicatedBB->getSinglePredecessor();
7554   assert(PredicatingBB && "Predicated block has no single predecessor.");
7555 
7556   // By current pack/unpack logic we need to generate only a single phi node: if
7557   // a vector value for the predicated instruction exists at this point it means
7558   // the instruction has vector users only, and a phi for the vector value is
7559   // needed. In this case the recipe of the predicated instruction is marked to
7560   // also do that packing, thereby "hoisting" the insert-element sequence.
7561   // Otherwise, a phi node for the scalar value is needed.
7562   unsigned Part = State.Instance->Part;
7563   if (State.ValueMap.hasVectorValue(PredInst, Part)) {
7564     Value *VectorValue = State.ValueMap.getVectorValue(PredInst, Part);
7565     InsertElementInst *IEI = cast<InsertElementInst>(VectorValue);
7566     PHINode *VPhi = State.Builder.CreatePHI(IEI->getType(), 2);
7567     VPhi->addIncoming(IEI->getOperand(0), PredicatingBB); // Unmodified vector.
7568     VPhi->addIncoming(IEI, PredicatedBB); // New vector with inserted element.
7569     State.ValueMap.resetVectorValue(PredInst, Part, VPhi); // Update cache.
7570   } else {
7571     Type *PredInstType = PredInst->getType();
7572     PHINode *Phi = State.Builder.CreatePHI(PredInstType, 2);
7573     Phi->addIncoming(UndefValue::get(ScalarPredInst->getType()), PredicatingBB);
7574     Phi->addIncoming(ScalarPredInst, PredicatedBB);
7575     State.ValueMap.resetScalarValue(PredInst, *State.Instance, Phi);
7576   }
7577 }
7578 
7579 void VPWidenMemoryInstructionRecipe::execute(VPTransformState &State) {
7580   VPValue *StoredValue = isa<StoreInst>(Instr) ? getStoredValue() : nullptr;
7581   State.ILV->vectorizeMemoryInstruction(&Instr, State, getAddr(), StoredValue,
7582                                         getMask());
7583 }
7584 
7585 // Determine how to lower the scalar epilogue, which depends on 1) optimising
7586 // for minimum code-size, 2) predicate compiler options, 3) loop hints forcing
7587 // predication, and 4) a TTI hook that analyses whether the loop is suitable
7588 // for predication.
7589 static ScalarEpilogueLowering getScalarEpilogueLowering(
7590     Function *F, Loop *L, LoopVectorizeHints &Hints, ProfileSummaryInfo *PSI,
7591     BlockFrequencyInfo *BFI, TargetTransformInfo *TTI, TargetLibraryInfo *TLI,
7592     AssumptionCache *AC, LoopInfo *LI, ScalarEvolution *SE, DominatorTree *DT,
7593     LoopVectorizationLegality &LVL) {
7594   bool OptSize =
7595       F->hasOptSize() || llvm::shouldOptimizeForSize(L->getHeader(), PSI, BFI,
7596                                                      PGSOQueryType::IRPass);
7597   // 1) OptSize takes precedence over all other options, i.e. if this is set,
7598   // don't look at hints or options, and don't request a scalar epilogue.
7599   if (OptSize && Hints.getForce() != LoopVectorizeHints::FK_Enabled)
7600     return CM_ScalarEpilogueNotAllowedOptSize;
7601 
7602   bool PredicateOptDisabled = PreferPredicateOverEpilog.getNumOccurrences() &&
7603                               !PreferPredicateOverEpilog;
7604 
7605   // 2) Next, if disabling predication is requested on the command line, honour
7606   // this and request a scalar epilogue.
7607   if (PredicateOptDisabled)
7608     return CM_ScalarEpilogueAllowed;
7609 
7610   // 3) and 4) look if enabling predication is requested on the command line,
7611   // with a loop hint, or if the TTI hook indicates this is profitable, request
7612   // predication .
7613   if (PreferPredicateOverEpilog ||
7614       Hints.getPredicate() == LoopVectorizeHints::FK_Enabled ||
7615       (TTI->preferPredicateOverEpilogue(L, LI, *SE, *AC, TLI, DT,
7616                                         LVL.getLAI()) &&
7617        Hints.getPredicate() != LoopVectorizeHints::FK_Disabled))
7618     return CM_ScalarEpilogueNotNeededUsePredicate;
7619 
7620   return CM_ScalarEpilogueAllowed;
7621 }
7622 
7623 // Process the loop in the VPlan-native vectorization path. This path builds
7624 // VPlan upfront in the vectorization pipeline, which allows to apply
7625 // VPlan-to-VPlan transformations from the very beginning without modifying the
7626 // input LLVM IR.
7627 static bool processLoopInVPlanNativePath(
7628     Loop *L, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT,
7629     LoopVectorizationLegality *LVL, TargetTransformInfo *TTI,
7630     TargetLibraryInfo *TLI, DemandedBits *DB, AssumptionCache *AC,
7631     OptimizationRemarkEmitter *ORE, BlockFrequencyInfo *BFI,
7632     ProfileSummaryInfo *PSI, LoopVectorizeHints &Hints) {
7633 
7634   assert(EnableVPlanNativePath && "VPlan-native path is disabled.");
7635   Function *F = L->getHeader()->getParent();
7636   InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL->getLAI());
7637 
7638   ScalarEpilogueLowering SEL = getScalarEpilogueLowering(
7639       F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, *LVL);
7640 
7641   LoopVectorizationCostModel CM(SEL, L, PSE, LI, LVL, *TTI, TLI, DB, AC, ORE, F,
7642                                 &Hints, IAI);
7643   // Use the planner for outer loop vectorization.
7644   // TODO: CM is not used at this point inside the planner. Turn CM into an
7645   // optional argument if we don't need it in the future.
7646   LoopVectorizationPlanner LVP(L, LI, TLI, TTI, LVL, CM, IAI, PSE);
7647 
7648   // Get user vectorization factor.
7649   const unsigned UserVF = Hints.getWidth();
7650 
7651   // Plan how to best vectorize, return the best VF and its cost.
7652   const VectorizationFactor VF = LVP.planInVPlanNativePath(UserVF);
7653 
7654   // If we are stress testing VPlan builds, do not attempt to generate vector
7655   // code. Masked vector code generation support will follow soon.
7656   // Also, do not attempt to vectorize if no vector code will be produced.
7657   if (VPlanBuildStressTest || EnableVPlanPredication ||
7658       VectorizationFactor::Disabled() == VF)
7659     return false;
7660 
7661   LVP.setBestPlan(VF.Width, 1);
7662 
7663   InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, 1, LVL,
7664                          &CM);
7665   LLVM_DEBUG(dbgs() << "Vectorizing outer loop in \""
7666                     << L->getHeader()->getParent()->getName() << "\"\n");
7667   LVP.executePlan(LB, DT);
7668 
7669   // Mark the loop as already vectorized to avoid vectorizing again.
7670   Hints.setAlreadyVectorized();
7671 
7672   LLVM_DEBUG(verifyFunction(*L->getHeader()->getParent()));
7673   return true;
7674 }
7675 
7676 LoopVectorizePass::LoopVectorizePass(LoopVectorizeOptions Opts)
7677     : InterleaveOnlyWhenForced(Opts.InterleaveOnlyWhenForced ||
7678                                !EnableLoopInterleaving),
7679       VectorizeOnlyWhenForced(Opts.VectorizeOnlyWhenForced ||
7680                               !EnableLoopVectorization) {}
7681 
7682 bool LoopVectorizePass::processLoop(Loop *L) {
7683   assert((EnableVPlanNativePath || L->empty()) &&
7684          "VPlan-native path is not enabled. Only process inner loops.");
7685 
7686 #ifndef NDEBUG
7687   const std::string DebugLocStr = getDebugLocString(L);
7688 #endif /* NDEBUG */
7689 
7690   LLVM_DEBUG(dbgs() << "\nLV: Checking a loop in \""
7691                     << L->getHeader()->getParent()->getName() << "\" from "
7692                     << DebugLocStr << "\n");
7693 
7694   LoopVectorizeHints Hints(L, InterleaveOnlyWhenForced, *ORE);
7695 
7696   LLVM_DEBUG(
7697       dbgs() << "LV: Loop hints:"
7698              << " force="
7699              << (Hints.getForce() == LoopVectorizeHints::FK_Disabled
7700                      ? "disabled"
7701                      : (Hints.getForce() == LoopVectorizeHints::FK_Enabled
7702                             ? "enabled"
7703                             : "?"))
7704              << " width=" << Hints.getWidth()
7705              << " unroll=" << Hints.getInterleave() << "\n");
7706 
7707   // Function containing loop
7708   Function *F = L->getHeader()->getParent();
7709 
7710   // Looking at the diagnostic output is the only way to determine if a loop
7711   // was vectorized (other than looking at the IR or machine code), so it
7712   // is important to generate an optimization remark for each loop. Most of
7713   // these messages are generated as OptimizationRemarkAnalysis. Remarks
7714   // generated as OptimizationRemark and OptimizationRemarkMissed are
7715   // less verbose reporting vectorized loops and unvectorized loops that may
7716   // benefit from vectorization, respectively.
7717 
7718   if (!Hints.allowVectorization(F, L, VectorizeOnlyWhenForced)) {
7719     LLVM_DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n");
7720     return false;
7721   }
7722 
7723   PredicatedScalarEvolution PSE(*SE, *L);
7724 
7725   // Check if it is legal to vectorize the loop.
7726   LoopVectorizationRequirements Requirements(*ORE);
7727   LoopVectorizationLegality LVL(L, PSE, DT, TTI, TLI, AA, F, GetLAA, LI, ORE,
7728                                 &Requirements, &Hints, DB, AC);
7729   if (!LVL.canVectorize(EnableVPlanNativePath)) {
7730     LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n");
7731     Hints.emitRemarkWithHints();
7732     return false;
7733   }
7734 
7735   // Check the function attributes and profiles to find out if this function
7736   // should be optimized for size.
7737   ScalarEpilogueLowering SEL = getScalarEpilogueLowering(
7738       F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, LVL);
7739 
7740   // Entrance to the VPlan-native vectorization path. Outer loops are processed
7741   // here. They may require CFG and instruction level transformations before
7742   // even evaluating whether vectorization is profitable. Since we cannot modify
7743   // the incoming IR, we need to build VPlan upfront in the vectorization
7744   // pipeline.
7745   if (!L->empty())
7746     return processLoopInVPlanNativePath(L, PSE, LI, DT, &LVL, TTI, TLI, DB, AC,
7747                                         ORE, BFI, PSI, Hints);
7748 
7749   assert(L->empty() && "Inner loop expected.");
7750 
7751   // Check the loop for a trip count threshold: vectorize loops with a tiny trip
7752   // count by optimizing for size, to minimize overheads.
7753   auto ExpectedTC = getSmallBestKnownTC(*SE, L);
7754   if (ExpectedTC && *ExpectedTC < TinyTripCountVectorThreshold) {
7755     LLVM_DEBUG(dbgs() << "LV: Found a loop with a very small trip count. "
7756                       << "This loop is worth vectorizing only if no scalar "
7757                       << "iteration overheads are incurred.");
7758     if (Hints.getForce() == LoopVectorizeHints::FK_Enabled)
7759       LLVM_DEBUG(dbgs() << " But vectorizing was explicitly forced.\n");
7760     else {
7761       LLVM_DEBUG(dbgs() << "\n");
7762       SEL = CM_ScalarEpilogueNotAllowedLowTripLoop;
7763     }
7764   }
7765 
7766   // Check the function attributes to see if implicit floats are allowed.
7767   // FIXME: This check doesn't seem possibly correct -- what if the loop is
7768   // an integer loop and the vector instructions selected are purely integer
7769   // vector instructions?
7770   if (F->hasFnAttribute(Attribute::NoImplicitFloat)) {
7771     reportVectorizationFailure(
7772         "Can't vectorize when the NoImplicitFloat attribute is used",
7773         "loop not vectorized due to NoImplicitFloat attribute",
7774         "NoImplicitFloat", ORE, L);
7775     Hints.emitRemarkWithHints();
7776     return false;
7777   }
7778 
7779   // Check if the target supports potentially unsafe FP vectorization.
7780   // FIXME: Add a check for the type of safety issue (denormal, signaling)
7781   // for the target we're vectorizing for, to make sure none of the
7782   // additional fp-math flags can help.
7783   if (Hints.isPotentiallyUnsafe() &&
7784       TTI->isFPVectorizationPotentiallyUnsafe()) {
7785     reportVectorizationFailure(
7786         "Potentially unsafe FP op prevents vectorization",
7787         "loop not vectorized due to unsafe FP support.",
7788         "UnsafeFP", ORE, L);
7789     Hints.emitRemarkWithHints();
7790     return false;
7791   }
7792 
7793   bool UseInterleaved = TTI->enableInterleavedAccessVectorization();
7794   InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL.getLAI());
7795 
7796   // If an override option has been passed in for interleaved accesses, use it.
7797   if (EnableInterleavedMemAccesses.getNumOccurrences() > 0)
7798     UseInterleaved = EnableInterleavedMemAccesses;
7799 
7800   // Analyze interleaved memory accesses.
7801   if (UseInterleaved) {
7802     IAI.analyzeInterleaving(useMaskedInterleavedAccesses(*TTI));
7803   }
7804 
7805   // Use the cost model.
7806   LoopVectorizationCostModel CM(SEL, L, PSE, LI, &LVL, *TTI, TLI, DB, AC, ORE,
7807                                 F, &Hints, IAI);
7808   CM.collectValuesToIgnore();
7809 
7810   // Use the planner for vectorization.
7811   LoopVectorizationPlanner LVP(L, LI, TLI, TTI, &LVL, CM, IAI, PSE);
7812 
7813   // Get user vectorization factor.
7814   unsigned UserVF = Hints.getWidth();
7815 
7816   // Plan how to best vectorize, return the best VF and its cost.
7817   Optional<VectorizationFactor> MaybeVF = LVP.plan(UserVF);
7818 
7819   VectorizationFactor VF = VectorizationFactor::Disabled();
7820   unsigned IC = 1;
7821   unsigned UserIC = Hints.getInterleave();
7822 
7823   if (MaybeVF) {
7824     VF = *MaybeVF;
7825     // Select the interleave count.
7826     IC = CM.selectInterleaveCount(VF.Width, VF.Cost);
7827   }
7828 
7829   // Identify the diagnostic messages that should be produced.
7830   std::pair<StringRef, std::string> VecDiagMsg, IntDiagMsg;
7831   bool VectorizeLoop = true, InterleaveLoop = true;
7832   if (Requirements.doesNotMeet(F, L, Hints)) {
7833     LLVM_DEBUG(dbgs() << "LV: Not vectorizing: loop did not meet vectorization "
7834                          "requirements.\n");
7835     Hints.emitRemarkWithHints();
7836     return false;
7837   }
7838 
7839   if (VF.Width == 1) {
7840     LLVM_DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n");
7841     VecDiagMsg = std::make_pair(
7842         "VectorizationNotBeneficial",
7843         "the cost-model indicates that vectorization is not beneficial");
7844     VectorizeLoop = false;
7845   }
7846 
7847   if (!MaybeVF && UserIC > 1) {
7848     // Tell the user interleaving was avoided up-front, despite being explicitly
7849     // requested.
7850     LLVM_DEBUG(dbgs() << "LV: Ignoring UserIC, because vectorization and "
7851                          "interleaving should be avoided up front\n");
7852     IntDiagMsg = std::make_pair(
7853         "InterleavingAvoided",
7854         "Ignoring UserIC, because interleaving was avoided up front");
7855     InterleaveLoop = false;
7856   } else if (IC == 1 && UserIC <= 1) {
7857     // Tell the user interleaving is not beneficial.
7858     LLVM_DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n");
7859     IntDiagMsg = std::make_pair(
7860         "InterleavingNotBeneficial",
7861         "the cost-model indicates that interleaving is not beneficial");
7862     InterleaveLoop = false;
7863     if (UserIC == 1) {
7864       IntDiagMsg.first = "InterleavingNotBeneficialAndDisabled";
7865       IntDiagMsg.second +=
7866           " and is explicitly disabled or interleave count is set to 1";
7867     }
7868   } else if (IC > 1 && UserIC == 1) {
7869     // Tell the user interleaving is beneficial, but it explicitly disabled.
7870     LLVM_DEBUG(
7871         dbgs() << "LV: Interleaving is beneficial but is explicitly disabled.");
7872     IntDiagMsg = std::make_pair(
7873         "InterleavingBeneficialButDisabled",
7874         "the cost-model indicates that interleaving is beneficial "
7875         "but is explicitly disabled or interleave count is set to 1");
7876     InterleaveLoop = false;
7877   }
7878 
7879   // Override IC if user provided an interleave count.
7880   IC = UserIC > 0 ? UserIC : IC;
7881 
7882   // Emit diagnostic messages, if any.
7883   const char *VAPassName = Hints.vectorizeAnalysisPassName();
7884   if (!VectorizeLoop && !InterleaveLoop) {
7885     // Do not vectorize or interleaving the loop.
7886     ORE->emit([&]() {
7887       return OptimizationRemarkMissed(VAPassName, VecDiagMsg.first,
7888                                       L->getStartLoc(), L->getHeader())
7889              << VecDiagMsg.second;
7890     });
7891     ORE->emit([&]() {
7892       return OptimizationRemarkMissed(LV_NAME, IntDiagMsg.first,
7893                                       L->getStartLoc(), L->getHeader())
7894              << IntDiagMsg.second;
7895     });
7896     return false;
7897   } else if (!VectorizeLoop && InterleaveLoop) {
7898     LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n');
7899     ORE->emit([&]() {
7900       return OptimizationRemarkAnalysis(VAPassName, VecDiagMsg.first,
7901                                         L->getStartLoc(), L->getHeader())
7902              << VecDiagMsg.second;
7903     });
7904   } else if (VectorizeLoop && !InterleaveLoop) {
7905     LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width
7906                       << ") in " << DebugLocStr << '\n');
7907     ORE->emit([&]() {
7908       return OptimizationRemarkAnalysis(LV_NAME, IntDiagMsg.first,
7909                                         L->getStartLoc(), L->getHeader())
7910              << IntDiagMsg.second;
7911     });
7912   } else if (VectorizeLoop && InterleaveLoop) {
7913     LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width
7914                       << ") in " << DebugLocStr << '\n');
7915     LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n');
7916   }
7917 
7918   LVP.setBestPlan(VF.Width, IC);
7919 
7920   using namespace ore;
7921   bool DisableRuntimeUnroll = false;
7922   MDNode *OrigLoopID = L->getLoopID();
7923 
7924   if (!VectorizeLoop) {
7925     assert(IC > 1 && "interleave count should not be 1 or 0");
7926     // If we decided that it is not legal to vectorize the loop, then
7927     // interleave it.
7928     InnerLoopUnroller Unroller(L, PSE, LI, DT, TLI, TTI, AC, ORE, IC, &LVL,
7929                                &CM);
7930     LVP.executePlan(Unroller, DT);
7931 
7932     ORE->emit([&]() {
7933       return OptimizationRemark(LV_NAME, "Interleaved", L->getStartLoc(),
7934                                 L->getHeader())
7935              << "interleaved loop (interleaved count: "
7936              << NV("InterleaveCount", IC) << ")";
7937     });
7938   } else {
7939     // If we decided that it is *legal* to vectorize the loop, then do it.
7940     InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, IC,
7941                            &LVL, &CM);
7942     LVP.executePlan(LB, DT);
7943     ++LoopsVectorized;
7944 
7945     // Add metadata to disable runtime unrolling a scalar loop when there are
7946     // no runtime checks about strides and memory. A scalar loop that is
7947     // rarely used is not worth unrolling.
7948     if (!LB.areSafetyChecksAdded())
7949       DisableRuntimeUnroll = true;
7950 
7951     // Report the vectorization decision.
7952     ORE->emit([&]() {
7953       return OptimizationRemark(LV_NAME, "Vectorized", L->getStartLoc(),
7954                                 L->getHeader())
7955              << "vectorized loop (vectorization width: "
7956              << NV("VectorizationFactor", VF.Width)
7957              << ", interleaved count: " << NV("InterleaveCount", IC) << ")";
7958     });
7959   }
7960 
7961   Optional<MDNode *> RemainderLoopID =
7962       makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll,
7963                                       LLVMLoopVectorizeFollowupEpilogue});
7964   if (RemainderLoopID.hasValue()) {
7965     L->setLoopID(RemainderLoopID.getValue());
7966   } else {
7967     if (DisableRuntimeUnroll)
7968       AddRuntimeUnrollDisableMetaData(L);
7969 
7970     // Mark the loop as already vectorized to avoid vectorizing again.
7971     Hints.setAlreadyVectorized();
7972   }
7973 
7974   LLVM_DEBUG(verifyFunction(*L->getHeader()->getParent()));
7975   return true;
7976 }
7977 
7978 LoopVectorizeResult LoopVectorizePass::runImpl(
7979     Function &F, ScalarEvolution &SE_, LoopInfo &LI_, TargetTransformInfo &TTI_,
7980     DominatorTree &DT_, BlockFrequencyInfo &BFI_, TargetLibraryInfo *TLI_,
7981     DemandedBits &DB_, AliasAnalysis &AA_, AssumptionCache &AC_,
7982     std::function<const LoopAccessInfo &(Loop &)> &GetLAA_,
7983     OptimizationRemarkEmitter &ORE_, ProfileSummaryInfo *PSI_) {
7984   SE = &SE_;
7985   LI = &LI_;
7986   TTI = &TTI_;
7987   DT = &DT_;
7988   BFI = &BFI_;
7989   TLI = TLI_;
7990   AA = &AA_;
7991   AC = &AC_;
7992   GetLAA = &GetLAA_;
7993   DB = &DB_;
7994   ORE = &ORE_;
7995   PSI = PSI_;
7996 
7997   // Don't attempt if
7998   // 1. the target claims to have no vector registers, and
7999   // 2. interleaving won't help ILP.
8000   //
8001   // The second condition is necessary because, even if the target has no
8002   // vector registers, loop vectorization may still enable scalar
8003   // interleaving.
8004   if (!TTI->getNumberOfRegisters(TTI->getRegisterClassForType(true)) &&
8005       TTI->getMaxInterleaveFactor(1) < 2)
8006     return LoopVectorizeResult(false, false);
8007 
8008   bool Changed = false, CFGChanged = false;
8009 
8010   // The vectorizer requires loops to be in simplified form.
8011   // Since simplification may add new inner loops, it has to run before the
8012   // legality and profitability checks. This means running the loop vectorizer
8013   // will simplify all loops, regardless of whether anything end up being
8014   // vectorized.
8015   for (auto &L : *LI)
8016     Changed |= CFGChanged |=
8017         simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */);
8018 
8019   // Build up a worklist of inner-loops to vectorize. This is necessary as
8020   // the act of vectorizing or partially unrolling a loop creates new loops
8021   // and can invalidate iterators across the loops.
8022   SmallVector<Loop *, 8> Worklist;
8023 
8024   for (Loop *L : *LI)
8025     collectSupportedLoops(*L, LI, ORE, Worklist);
8026 
8027   LoopsAnalyzed += Worklist.size();
8028 
8029   // Now walk the identified inner loops.
8030   while (!Worklist.empty()) {
8031     Loop *L = Worklist.pop_back_val();
8032 
8033     // For the inner loops we actually process, form LCSSA to simplify the
8034     // transform.
8035     Changed |= formLCSSARecursively(*L, *DT, LI, SE);
8036 
8037     Changed |= CFGChanged |= processLoop(L);
8038   }
8039 
8040   // Process each loop nest in the function.
8041   return LoopVectorizeResult(Changed, CFGChanged);
8042 }
8043 
8044 PreservedAnalyses LoopVectorizePass::run(Function &F,
8045                                          FunctionAnalysisManager &AM) {
8046     auto &SE = AM.getResult<ScalarEvolutionAnalysis>(F);
8047     auto &LI = AM.getResult<LoopAnalysis>(F);
8048     auto &TTI = AM.getResult<TargetIRAnalysis>(F);
8049     auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
8050     auto &BFI = AM.getResult<BlockFrequencyAnalysis>(F);
8051     auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
8052     auto &AA = AM.getResult<AAManager>(F);
8053     auto &AC = AM.getResult<AssumptionAnalysis>(F);
8054     auto &DB = AM.getResult<DemandedBitsAnalysis>(F);
8055     auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F);
8056     MemorySSA *MSSA = EnableMSSALoopDependency
8057                           ? &AM.getResult<MemorySSAAnalysis>(F).getMSSA()
8058                           : nullptr;
8059 
8060     auto &LAM = AM.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager();
8061     std::function<const LoopAccessInfo &(Loop &)> GetLAA =
8062         [&](Loop &L) -> const LoopAccessInfo & {
8063       LoopStandardAnalysisResults AR = {AA, AC, DT, LI, SE, TLI, TTI, MSSA};
8064       return LAM.getResult<LoopAccessAnalysis>(L, AR);
8065     };
8066     auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F);
8067     ProfileSummaryInfo *PSI =
8068         MAMProxy.getCachedResult<ProfileSummaryAnalysis>(*F.getParent());
8069     LoopVectorizeResult Result =
8070         runImpl(F, SE, LI, TTI, DT, BFI, &TLI, DB, AA, AC, GetLAA, ORE, PSI);
8071     if (!Result.MadeAnyChange)
8072       return PreservedAnalyses::all();
8073     PreservedAnalyses PA;
8074 
8075     // We currently do not preserve loopinfo/dominator analyses with outer loop
8076     // vectorization. Until this is addressed, mark these analyses as preserved
8077     // only for non-VPlan-native path.
8078     // TODO: Preserve Loop and Dominator analyses for VPlan-native path.
8079     if (!EnableVPlanNativePath) {
8080       PA.preserve<LoopAnalysis>();
8081       PA.preserve<DominatorTreeAnalysis>();
8082     }
8083     PA.preserve<BasicAA>();
8084     PA.preserve<GlobalsAA>();
8085     if (!Result.MadeCFGChange)
8086       PA.preserveSet<CFGAnalyses>();
8087     return PA;
8088 }
8089