1 //===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops
10 // and generates target-independent LLVM-IR.
11 // The vectorizer uses the TargetTransformInfo analysis to estimate the costs
12 // of instructions in order to estimate the profitability of vectorization.
13 //
14 // The loop vectorizer combines consecutive loop iterations into a single
15 // 'wide' iteration. After this transformation the index is incremented
16 // by the SIMD vector width, and not by one.
17 //
18 // This pass has three parts:
19 // 1. The main loop pass that drives the different parts.
20 // 2. LoopVectorizationLegality - A unit that checks for the legality
21 //    of the vectorization.
22 // 3. InnerLoopVectorizer - A unit that performs the actual
23 //    widening of instructions.
24 // 4. LoopVectorizationCostModel - A unit that checks for the profitability
25 //    of vectorization. It decides on the optimal vector width, which
26 //    can be one, if vectorization is not profitable.
27 //
28 // There is a development effort going on to migrate loop vectorizer to the
29 // VPlan infrastructure and to introduce outer loop vectorization support (see
30 // docs/Proposal/VectorizationPlan.rst and
31 // http://lists.llvm.org/pipermail/llvm-dev/2017-December/119523.html). For this
32 // purpose, we temporarily introduced the VPlan-native vectorization path: an
33 // alternative vectorization path that is natively implemented on top of the
34 // VPlan infrastructure. See EnableVPlanNativePath for enabling.
35 //
36 //===----------------------------------------------------------------------===//
37 //
38 // The reduction-variable vectorization is based on the paper:
39 //  D. Nuzman and R. Henderson. Multi-platform Auto-vectorization.
40 //
41 // Variable uniformity checks are inspired by:
42 //  Karrenberg, R. and Hack, S. Whole Function Vectorization.
43 //
44 // The interleaved access vectorization is based on the paper:
45 //  Dorit Nuzman, Ira Rosen and Ayal Zaks.  Auto-Vectorization of Interleaved
46 //  Data for SIMD
47 //
48 // Other ideas/concepts are from:
49 //  A. Zaks and D. Nuzman. Autovectorization in GCC-two years later.
50 //
51 //  S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua.  An Evaluation of
52 //  Vectorizing Compilers.
53 //
54 //===----------------------------------------------------------------------===//
55 
56 #include "llvm/Transforms/Vectorize/LoopVectorize.h"
57 #include "LoopVectorizationPlanner.h"
58 #include "VPRecipeBuilder.h"
59 #include "VPlan.h"
60 #include "VPlanHCFGBuilder.h"
61 #include "VPlanPredicator.h"
62 #include "VPlanTransforms.h"
63 #include "llvm/ADT/APInt.h"
64 #include "llvm/ADT/ArrayRef.h"
65 #include "llvm/ADT/DenseMap.h"
66 #include "llvm/ADT/DenseMapInfo.h"
67 #include "llvm/ADT/Hashing.h"
68 #include "llvm/ADT/MapVector.h"
69 #include "llvm/ADT/None.h"
70 #include "llvm/ADT/Optional.h"
71 #include "llvm/ADT/STLExtras.h"
72 #include "llvm/ADT/SetVector.h"
73 #include "llvm/ADT/SmallPtrSet.h"
74 #include "llvm/ADT/SmallVector.h"
75 #include "llvm/ADT/Statistic.h"
76 #include "llvm/ADT/StringRef.h"
77 #include "llvm/ADT/Twine.h"
78 #include "llvm/ADT/iterator_range.h"
79 #include "llvm/Analysis/AssumptionCache.h"
80 #include "llvm/Analysis/BasicAliasAnalysis.h"
81 #include "llvm/Analysis/BlockFrequencyInfo.h"
82 #include "llvm/Analysis/CFG.h"
83 #include "llvm/Analysis/CodeMetrics.h"
84 #include "llvm/Analysis/DemandedBits.h"
85 #include "llvm/Analysis/GlobalsModRef.h"
86 #include "llvm/Analysis/LoopAccessAnalysis.h"
87 #include "llvm/Analysis/LoopAnalysisManager.h"
88 #include "llvm/Analysis/LoopInfo.h"
89 #include "llvm/Analysis/LoopIterator.h"
90 #include "llvm/Analysis/MemorySSA.h"
91 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
92 #include "llvm/Analysis/ProfileSummaryInfo.h"
93 #include "llvm/Analysis/ScalarEvolution.h"
94 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
95 #include "llvm/Analysis/TargetLibraryInfo.h"
96 #include "llvm/Analysis/TargetTransformInfo.h"
97 #include "llvm/Analysis/VectorUtils.h"
98 #include "llvm/IR/Attributes.h"
99 #include "llvm/IR/BasicBlock.h"
100 #include "llvm/IR/CFG.h"
101 #include "llvm/IR/Constant.h"
102 #include "llvm/IR/Constants.h"
103 #include "llvm/IR/DataLayout.h"
104 #include "llvm/IR/DebugInfoMetadata.h"
105 #include "llvm/IR/DebugLoc.h"
106 #include "llvm/IR/DerivedTypes.h"
107 #include "llvm/IR/DiagnosticInfo.h"
108 #include "llvm/IR/Dominators.h"
109 #include "llvm/IR/Function.h"
110 #include "llvm/IR/IRBuilder.h"
111 #include "llvm/IR/InstrTypes.h"
112 #include "llvm/IR/Instruction.h"
113 #include "llvm/IR/Instructions.h"
114 #include "llvm/IR/IntrinsicInst.h"
115 #include "llvm/IR/Intrinsics.h"
116 #include "llvm/IR/LLVMContext.h"
117 #include "llvm/IR/Metadata.h"
118 #include "llvm/IR/Module.h"
119 #include "llvm/IR/Operator.h"
120 #include "llvm/IR/Type.h"
121 #include "llvm/IR/Use.h"
122 #include "llvm/IR/User.h"
123 #include "llvm/IR/Value.h"
124 #include "llvm/IR/ValueHandle.h"
125 #include "llvm/IR/Verifier.h"
126 #include "llvm/InitializePasses.h"
127 #include "llvm/Pass.h"
128 #include "llvm/Support/Casting.h"
129 #include "llvm/Support/CommandLine.h"
130 #include "llvm/Support/Compiler.h"
131 #include "llvm/Support/Debug.h"
132 #include "llvm/Support/ErrorHandling.h"
133 #include "llvm/Support/InstructionCost.h"
134 #include "llvm/Support/MathExtras.h"
135 #include "llvm/Support/raw_ostream.h"
136 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
137 #include "llvm/Transforms/Utils/InjectTLIMappings.h"
138 #include "llvm/Transforms/Utils/LoopSimplify.h"
139 #include "llvm/Transforms/Utils/LoopUtils.h"
140 #include "llvm/Transforms/Utils/LoopVersioning.h"
141 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
142 #include "llvm/Transforms/Utils/SizeOpts.h"
143 #include "llvm/Transforms/Vectorize/LoopVectorizationLegality.h"
144 #include <algorithm>
145 #include <cassert>
146 #include <cstdint>
147 #include <cstdlib>
148 #include <functional>
149 #include <iterator>
150 #include <limits>
151 #include <memory>
152 #include <string>
153 #include <tuple>
154 #include <utility>
155 
156 using namespace llvm;
157 
158 #define LV_NAME "loop-vectorize"
159 #define DEBUG_TYPE LV_NAME
160 
161 #ifndef NDEBUG
162 const char VerboseDebug[] = DEBUG_TYPE "-verbose";
163 #endif
164 
165 /// @{
166 /// Metadata attribute names
167 const char LLVMLoopVectorizeFollowupAll[] = "llvm.loop.vectorize.followup_all";
168 const char LLVMLoopVectorizeFollowupVectorized[] =
169     "llvm.loop.vectorize.followup_vectorized";
170 const char LLVMLoopVectorizeFollowupEpilogue[] =
171     "llvm.loop.vectorize.followup_epilogue";
172 /// @}
173 
174 STATISTIC(LoopsVectorized, "Number of loops vectorized");
175 STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization");
176 STATISTIC(LoopsEpilogueVectorized, "Number of epilogues vectorized");
177 
178 static cl::opt<bool> EnableEpilogueVectorization(
179     "enable-epilogue-vectorization", cl::init(true), cl::Hidden,
180     cl::desc("Enable vectorization of epilogue loops."));
181 
182 static cl::opt<unsigned> EpilogueVectorizationForceVF(
183     "epilogue-vectorization-force-VF", cl::init(1), cl::Hidden,
184     cl::desc("When epilogue vectorization is enabled, and a value greater than "
185              "1 is specified, forces the given VF for all applicable epilogue "
186              "loops."));
187 
188 static cl::opt<unsigned> EpilogueVectorizationMinVF(
189     "epilogue-vectorization-minimum-VF", cl::init(16), cl::Hidden,
190     cl::desc("Only loops with vectorization factor equal to or larger than "
191              "the specified value are considered for epilogue vectorization."));
192 
193 /// Loops with a known constant trip count below this number are vectorized only
194 /// if no scalar iteration overheads are incurred.
195 static cl::opt<unsigned> TinyTripCountVectorThreshold(
196     "vectorizer-min-trip-count", cl::init(16), cl::Hidden,
197     cl::desc("Loops with a constant trip count that is smaller than this "
198              "value are vectorized only if no scalar iteration overheads "
199              "are incurred."));
200 
201 // Option prefer-predicate-over-epilogue indicates that an epilogue is undesired,
202 // that predication is preferred, and this lists all options. I.e., the
203 // vectorizer will try to fold the tail-loop (epilogue) into the vector body
204 // and predicate the instructions accordingly. If tail-folding fails, there are
205 // different fallback strategies depending on these values:
206 namespace PreferPredicateTy {
207   enum Option {
208     ScalarEpilogue = 0,
209     PredicateElseScalarEpilogue,
210     PredicateOrDontVectorize
211   };
212 } // namespace PreferPredicateTy
213 
214 static cl::opt<PreferPredicateTy::Option> PreferPredicateOverEpilogue(
215     "prefer-predicate-over-epilogue",
216     cl::init(PreferPredicateTy::ScalarEpilogue),
217     cl::Hidden,
218     cl::desc("Tail-folding and predication preferences over creating a scalar "
219              "epilogue loop."),
220     cl::values(clEnumValN(PreferPredicateTy::ScalarEpilogue,
221                          "scalar-epilogue",
222                          "Don't tail-predicate loops, create scalar epilogue"),
223               clEnumValN(PreferPredicateTy::PredicateElseScalarEpilogue,
224                          "predicate-else-scalar-epilogue",
225                          "prefer tail-folding, create scalar epilogue if tail "
226                          "folding fails."),
227               clEnumValN(PreferPredicateTy::PredicateOrDontVectorize,
228                          "predicate-dont-vectorize",
229                          "prefers tail-folding, don't attempt vectorization if "
230                          "tail-folding fails.")));
231 
232 static cl::opt<bool> MaximizeBandwidth(
233     "vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden,
234     cl::desc("Maximize bandwidth when selecting vectorization factor which "
235              "will be determined by the smallest type in loop."));
236 
237 static cl::opt<bool> EnableInterleavedMemAccesses(
238     "enable-interleaved-mem-accesses", cl::init(false), cl::Hidden,
239     cl::desc("Enable vectorization on interleaved memory accesses in a loop"));
240 
241 /// An interleave-group may need masking if it resides in a block that needs
242 /// predication, or in order to mask away gaps.
243 static cl::opt<bool> EnableMaskedInterleavedMemAccesses(
244     "enable-masked-interleaved-mem-accesses", cl::init(false), cl::Hidden,
245     cl::desc("Enable vectorization on masked interleaved memory accesses in a loop"));
246 
247 static cl::opt<unsigned> TinyTripCountInterleaveThreshold(
248     "tiny-trip-count-interleave-threshold", cl::init(128), cl::Hidden,
249     cl::desc("We don't interleave loops with a estimated constant trip count "
250              "below this number"));
251 
252 static cl::opt<unsigned> ForceTargetNumScalarRegs(
253     "force-target-num-scalar-regs", cl::init(0), cl::Hidden,
254     cl::desc("A flag that overrides the target's number of scalar registers."));
255 
256 static cl::opt<unsigned> ForceTargetNumVectorRegs(
257     "force-target-num-vector-regs", cl::init(0), cl::Hidden,
258     cl::desc("A flag that overrides the target's number of vector registers."));
259 
260 static cl::opt<unsigned> ForceTargetMaxScalarInterleaveFactor(
261     "force-target-max-scalar-interleave", cl::init(0), cl::Hidden,
262     cl::desc("A flag that overrides the target's max interleave factor for "
263              "scalar loops."));
264 
265 static cl::opt<unsigned> ForceTargetMaxVectorInterleaveFactor(
266     "force-target-max-vector-interleave", cl::init(0), cl::Hidden,
267     cl::desc("A flag that overrides the target's max interleave factor for "
268              "vectorized loops."));
269 
270 static cl::opt<unsigned> ForceTargetInstructionCost(
271     "force-target-instruction-cost", cl::init(0), cl::Hidden,
272     cl::desc("A flag that overrides the target's expected cost for "
273              "an instruction to a single constant value. Mostly "
274              "useful for getting consistent testing."));
275 
276 static cl::opt<bool> ForceTargetSupportsScalableVectors(
277     "force-target-supports-scalable-vectors", cl::init(false), cl::Hidden,
278     cl::desc(
279         "Pretend that scalable vectors are supported, even if the target does "
280         "not support them. This flag should only be used for testing."));
281 
282 static cl::opt<unsigned> SmallLoopCost(
283     "small-loop-cost", cl::init(20), cl::Hidden,
284     cl::desc(
285         "The cost of a loop that is considered 'small' by the interleaver."));
286 
287 static cl::opt<bool> LoopVectorizeWithBlockFrequency(
288     "loop-vectorize-with-block-frequency", cl::init(true), cl::Hidden,
289     cl::desc("Enable the use of the block frequency analysis to access PGO "
290              "heuristics minimizing code growth in cold regions and being more "
291              "aggressive in hot regions."));
292 
293 // Runtime interleave loops for load/store throughput.
294 static cl::opt<bool> EnableLoadStoreRuntimeInterleave(
295     "enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden,
296     cl::desc(
297         "Enable runtime interleaving until load/store ports are saturated"));
298 
299 /// Interleave small loops with scalar reductions.
300 static cl::opt<bool> InterleaveSmallLoopScalarReduction(
301     "interleave-small-loop-scalar-reduction", cl::init(false), cl::Hidden,
302     cl::desc("Enable interleaving for loops with small iteration counts that "
303              "contain scalar reductions to expose ILP."));
304 
305 /// The number of stores in a loop that are allowed to need predication.
306 static cl::opt<unsigned> NumberOfStoresToPredicate(
307     "vectorize-num-stores-pred", cl::init(1), cl::Hidden,
308     cl::desc("Max number of stores to be predicated behind an if."));
309 
310 static cl::opt<bool> EnableIndVarRegisterHeur(
311     "enable-ind-var-reg-heur", cl::init(true), cl::Hidden,
312     cl::desc("Count the induction variable only once when interleaving"));
313 
314 static cl::opt<bool> EnableCondStoresVectorization(
315     "enable-cond-stores-vec", cl::init(true), cl::Hidden,
316     cl::desc("Enable if predication of stores during vectorization."));
317 
318 static cl::opt<unsigned> MaxNestedScalarReductionIC(
319     "max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden,
320     cl::desc("The maximum interleave count to use when interleaving a scalar "
321              "reduction in a nested loop."));
322 
323 static cl::opt<bool>
324     PreferInLoopReductions("prefer-inloop-reductions", cl::init(false),
325                            cl::Hidden,
326                            cl::desc("Prefer in-loop vector reductions, "
327                                     "overriding the targets preference."));
328 
329 static cl::opt<bool> PreferPredicatedReductionSelect(
330     "prefer-predicated-reduction-select", cl::init(false), cl::Hidden,
331     cl::desc(
332         "Prefer predicating a reduction operation over an after loop select."));
333 
334 cl::opt<bool> EnableVPlanNativePath(
335     "enable-vplan-native-path", cl::init(false), cl::Hidden,
336     cl::desc("Enable VPlan-native vectorization path with "
337              "support for outer loop vectorization."));
338 
339 // FIXME: Remove this switch once we have divergence analysis. Currently we
340 // assume divergent non-backedge branches when this switch is true.
341 cl::opt<bool> EnableVPlanPredication(
342     "enable-vplan-predication", cl::init(false), cl::Hidden,
343     cl::desc("Enable VPlan-native vectorization path predicator with "
344              "support for outer loop vectorization."));
345 
346 // This flag enables the stress testing of the VPlan H-CFG construction in the
347 // VPlan-native vectorization path. It must be used in conjuction with
348 // -enable-vplan-native-path. -vplan-verify-hcfg can also be used to enable the
349 // verification of the H-CFGs built.
350 static cl::opt<bool> VPlanBuildStressTest(
351     "vplan-build-stress-test", cl::init(false), cl::Hidden,
352     cl::desc(
353         "Build VPlan for every supported loop nest in the function and bail "
354         "out right after the build (stress test the VPlan H-CFG construction "
355         "in the VPlan-native vectorization path)."));
356 
357 cl::opt<bool> llvm::EnableLoopInterleaving(
358     "interleave-loops", cl::init(true), cl::Hidden,
359     cl::desc("Enable loop interleaving in Loop vectorization passes"));
360 cl::opt<bool> llvm::EnableLoopVectorization(
361     "vectorize-loops", cl::init(true), cl::Hidden,
362     cl::desc("Run the Loop vectorization passes"));
363 
364 /// A helper function that returns the type of loaded or stored value.
365 static Type *getMemInstValueType(Value *I) {
366   assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
367          "Expected Load or Store instruction");
368   if (auto *LI = dyn_cast<LoadInst>(I))
369     return LI->getType();
370   return cast<StoreInst>(I)->getValueOperand()->getType();
371 }
372 
373 /// A helper function that returns true if the given type is irregular. The
374 /// type is irregular if its allocated size doesn't equal the store size of an
375 /// element of the corresponding vector type at the given vectorization factor.
376 static bool hasIrregularType(Type *Ty, const DataLayout &DL, ElementCount VF) {
377   // Determine if an array of VF elements of type Ty is "bitcast compatible"
378   // with a <VF x Ty> vector.
379   if (VF.isVector()) {
380     auto *VectorTy = VectorType::get(Ty, VF);
381     return TypeSize::get(VF.getKnownMinValue() *
382                              DL.getTypeAllocSize(Ty).getFixedValue(),
383                          VF.isScalable()) != DL.getTypeStoreSize(VectorTy);
384   }
385 
386   // If the vectorization factor is one, we just check if an array of type Ty
387   // requires padding between elements.
388   return DL.getTypeAllocSizeInBits(Ty) != DL.getTypeSizeInBits(Ty);
389 }
390 
391 /// A helper function that returns the reciprocal of the block probability of
392 /// predicated blocks. If we return X, we are assuming the predicated block
393 /// will execute once for every X iterations of the loop header.
394 ///
395 /// TODO: We should use actual block probability here, if available. Currently,
396 ///       we always assume predicated blocks have a 50% chance of executing.
397 static unsigned getReciprocalPredBlockProb() { return 2; }
398 
399 /// A helper function that adds a 'fast' flag to floating-point operations.
400 static Value *addFastMathFlag(Value *V) {
401   if (isa<FPMathOperator>(V))
402     cast<Instruction>(V)->setFastMathFlags(FastMathFlags::getFast());
403   return V;
404 }
405 
406 /// A helper function that returns an integer or floating-point constant with
407 /// value C.
408 static Constant *getSignedIntOrFpConstant(Type *Ty, int64_t C) {
409   return Ty->isIntegerTy() ? ConstantInt::getSigned(Ty, C)
410                            : ConstantFP::get(Ty, C);
411 }
412 
413 /// Returns "best known" trip count for the specified loop \p L as defined by
414 /// the following procedure:
415 ///   1) Returns exact trip count if it is known.
416 ///   2) Returns expected trip count according to profile data if any.
417 ///   3) Returns upper bound estimate if it is known.
418 ///   4) Returns None if all of the above failed.
419 static Optional<unsigned> getSmallBestKnownTC(ScalarEvolution &SE, Loop *L) {
420   // Check if exact trip count is known.
421   if (unsigned ExpectedTC = SE.getSmallConstantTripCount(L))
422     return ExpectedTC;
423 
424   // Check if there is an expected trip count available from profile data.
425   if (LoopVectorizeWithBlockFrequency)
426     if (auto EstimatedTC = getLoopEstimatedTripCount(L))
427       return EstimatedTC;
428 
429   // Check if upper bound estimate is known.
430   if (unsigned ExpectedTC = SE.getSmallConstantMaxTripCount(L))
431     return ExpectedTC;
432 
433   return None;
434 }
435 
436 namespace llvm {
437 
438 /// InnerLoopVectorizer vectorizes loops which contain only one basic
439 /// block to a specified vectorization factor (VF).
440 /// This class performs the widening of scalars into vectors, or multiple
441 /// scalars. This class also implements the following features:
442 /// * It inserts an epilogue loop for handling loops that don't have iteration
443 ///   counts that are known to be a multiple of the vectorization factor.
444 /// * It handles the code generation for reduction variables.
445 /// * Scalarization (implementation using scalars) of un-vectorizable
446 ///   instructions.
447 /// InnerLoopVectorizer does not perform any vectorization-legality
448 /// checks, and relies on the caller to check for the different legality
449 /// aspects. The InnerLoopVectorizer relies on the
450 /// LoopVectorizationLegality class to provide information about the induction
451 /// and reduction variables that were found to a given vectorization factor.
452 class InnerLoopVectorizer {
453 public:
454   InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE,
455                       LoopInfo *LI, DominatorTree *DT,
456                       const TargetLibraryInfo *TLI,
457                       const TargetTransformInfo *TTI, AssumptionCache *AC,
458                       OptimizationRemarkEmitter *ORE, ElementCount VecWidth,
459                       unsigned UnrollFactor, LoopVectorizationLegality *LVL,
460                       LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI,
461                       ProfileSummaryInfo *PSI)
462       : OrigLoop(OrigLoop), PSE(PSE), LI(LI), DT(DT), TLI(TLI), TTI(TTI),
463         AC(AC), ORE(ORE), VF(VecWidth), UF(UnrollFactor),
464         Builder(PSE.getSE()->getContext()), Legal(LVL), Cost(CM), BFI(BFI),
465         PSI(PSI) {
466     // Query this against the original loop and save it here because the profile
467     // of the original loop header may change as the transformation happens.
468     OptForSizeBasedOnProfile = llvm::shouldOptimizeForSize(
469         OrigLoop->getHeader(), PSI, BFI, PGSOQueryType::IRPass);
470   }
471 
472   virtual ~InnerLoopVectorizer() = default;
473 
474   /// Create a new empty loop that will contain vectorized instructions later
475   /// on, while the old loop will be used as the scalar remainder. Control flow
476   /// is generated around the vectorized (and scalar epilogue) loops consisting
477   /// of various checks and bypasses. Return the pre-header block of the new
478   /// loop.
479   /// In the case of epilogue vectorization, this function is overriden to
480   /// handle the more complex control flow around the loops.
481   virtual BasicBlock *createVectorizedLoopSkeleton();
482 
483   /// Widen a single instruction within the innermost loop.
484   void widenInstruction(Instruction &I, VPValue *Def, VPUser &Operands,
485                         VPTransformState &State);
486 
487   /// Widen a single call instruction within the innermost loop.
488   void widenCallInstruction(CallInst &I, VPValue *Def, VPUser &ArgOperands,
489                             VPTransformState &State);
490 
491   /// Widen a single select instruction within the innermost loop.
492   void widenSelectInstruction(SelectInst &I, VPValue *VPDef, VPUser &Operands,
493                               bool InvariantCond, VPTransformState &State);
494 
495   /// Fix the vectorized code, taking care of header phi's, live-outs, and more.
496   void fixVectorizedLoop(VPTransformState &State);
497 
498   // Return true if any runtime check is added.
499   bool areSafetyChecksAdded() { return AddedSafetyChecks; }
500 
501   /// A type for vectorized values in the new loop. Each value from the
502   /// original loop, when vectorized, is represented by UF vector values in the
503   /// new unrolled loop, where UF is the unroll factor.
504   using VectorParts = SmallVector<Value *, 2>;
505 
506   /// Vectorize a single GetElementPtrInst based on information gathered and
507   /// decisions taken during planning.
508   void widenGEP(GetElementPtrInst *GEP, VPValue *VPDef, VPUser &Indices,
509                 unsigned UF, ElementCount VF, bool IsPtrLoopInvariant,
510                 SmallBitVector &IsIndexLoopInvariant, VPTransformState &State);
511 
512   /// Vectorize a single PHINode in a block. This method handles the induction
513   /// variable canonicalization. It supports both VF = 1 for unrolled loops and
514   /// arbitrary length vectors.
515   void widenPHIInstruction(Instruction *PN, RecurrenceDescriptor *RdxDesc,
516                            Value *StartV, VPValue *Def,
517                            VPTransformState &State);
518 
519   /// A helper function to scalarize a single Instruction in the innermost loop.
520   /// Generates a sequence of scalar instances for each lane between \p MinLane
521   /// and \p MaxLane, times each part between \p MinPart and \p MaxPart,
522   /// inclusive. Uses the VPValue operands from \p Operands instead of \p
523   /// Instr's operands.
524   void scalarizeInstruction(Instruction *Instr, VPValue *Def, VPUser &Operands,
525                             const VPIteration &Instance, bool IfPredicateInstr,
526                             VPTransformState &State);
527 
528   /// Widen an integer or floating-point induction variable \p IV. If \p Trunc
529   /// is provided, the integer induction variable will first be truncated to
530   /// the corresponding type.
531   void widenIntOrFpInduction(PHINode *IV, Value *Start, TruncInst *Trunc,
532                              VPValue *Def, VPValue *CastDef,
533                              VPTransformState &State);
534 
535   /// Construct the vector value of a scalarized value \p V one lane at a time.
536   void packScalarIntoVectorValue(VPValue *Def, const VPIteration &Instance,
537                                  VPTransformState &State);
538 
539   /// Try to vectorize interleaved access group \p Group with the base address
540   /// given in \p Addr, optionally masking the vector operations if \p
541   /// BlockInMask is non-null. Use \p State to translate given VPValues to IR
542   /// values in the vectorized loop.
543   void vectorizeInterleaveGroup(const InterleaveGroup<Instruction> *Group,
544                                 ArrayRef<VPValue *> VPDefs,
545                                 VPTransformState &State, VPValue *Addr,
546                                 ArrayRef<VPValue *> StoredValues,
547                                 VPValue *BlockInMask = nullptr);
548 
549   /// Vectorize Load and Store instructions with the base address given in \p
550   /// Addr, optionally masking the vector operations if \p BlockInMask is
551   /// non-null. Use \p State to translate given VPValues to IR values in the
552   /// vectorized loop.
553   void vectorizeMemoryInstruction(Instruction *Instr, VPTransformState &State,
554                                   VPValue *Def, VPValue *Addr,
555                                   VPValue *StoredValue, VPValue *BlockInMask);
556 
557   /// Set the debug location in the builder using the debug location in
558   /// the instruction.
559   void setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr);
560 
561   /// Fix the non-induction PHIs in the OrigPHIsToFix vector.
562   void fixNonInductionPHIs(VPTransformState &State);
563 
564   /// Create a broadcast instruction. This method generates a broadcast
565   /// instruction (shuffle) for loop invariant values and for the induction
566   /// value. If this is the induction variable then we extend it to N, N+1, ...
567   /// this is needed because each iteration in the loop corresponds to a SIMD
568   /// element.
569   virtual Value *getBroadcastInstrs(Value *V);
570 
571 protected:
572   friend class LoopVectorizationPlanner;
573 
574   /// A small list of PHINodes.
575   using PhiVector = SmallVector<PHINode *, 4>;
576 
577   /// A type for scalarized values in the new loop. Each value from the
578   /// original loop, when scalarized, is represented by UF x VF scalar values
579   /// in the new unrolled loop, where UF is the unroll factor and VF is the
580   /// vectorization factor.
581   using ScalarParts = SmallVector<SmallVector<Value *, 4>, 2>;
582 
583   /// Set up the values of the IVs correctly when exiting the vector loop.
584   void fixupIVUsers(PHINode *OrigPhi, const InductionDescriptor &II,
585                     Value *CountRoundDown, Value *EndValue,
586                     BasicBlock *MiddleBlock);
587 
588   /// Create a new induction variable inside L.
589   PHINode *createInductionVariable(Loop *L, Value *Start, Value *End,
590                                    Value *Step, Instruction *DL);
591 
592   /// Handle all cross-iteration phis in the header.
593   void fixCrossIterationPHIs(VPTransformState &State);
594 
595   /// Fix a first-order recurrence. This is the second phase of vectorizing
596   /// this phi node.
597   void fixFirstOrderRecurrence(PHINode *Phi, VPTransformState &State);
598 
599   /// Fix a reduction cross-iteration phi. This is the second phase of
600   /// vectorizing this phi node.
601   void fixReduction(PHINode *Phi, VPTransformState &State);
602 
603   /// Clear NSW/NUW flags from reduction instructions if necessary.
604   void clearReductionWrapFlags(RecurrenceDescriptor &RdxDesc,
605                                VPTransformState &State);
606 
607   /// Fixup the LCSSA phi nodes in the unique exit block.  This simply
608   /// means we need to add the appropriate incoming value from the middle
609   /// block as exiting edges from the scalar epilogue loop (if present) are
610   /// already in place, and we exit the vector loop exclusively to the middle
611   /// block.
612   void fixLCSSAPHIs(VPTransformState &State);
613 
614   /// Iteratively sink the scalarized operands of a predicated instruction into
615   /// the block that was created for it.
616   void sinkScalarOperands(Instruction *PredInst);
617 
618   /// Shrinks vector element sizes to the smallest bitwidth they can be legally
619   /// represented as.
620   void truncateToMinimalBitwidths(VPTransformState &State);
621 
622   /// This function adds (StartIdx, StartIdx + Step, StartIdx + 2*Step, ...)
623   /// to each vector element of Val. The sequence starts at StartIndex.
624   /// \p Opcode is relevant for FP induction variable.
625   virtual Value *getStepVector(Value *Val, int StartIdx, Value *Step,
626                                Instruction::BinaryOps Opcode =
627                                Instruction::BinaryOpsEnd);
628 
629   /// Compute scalar induction steps. \p ScalarIV is the scalar induction
630   /// variable on which to base the steps, \p Step is the size of the step, and
631   /// \p EntryVal is the value from the original loop that maps to the steps.
632   /// Note that \p EntryVal doesn't have to be an induction variable - it
633   /// can also be a truncate instruction.
634   void buildScalarSteps(Value *ScalarIV, Value *Step, Instruction *EntryVal,
635                         const InductionDescriptor &ID, VPValue *Def,
636                         VPValue *CastDef, VPTransformState &State);
637 
638   /// Create a vector induction phi node based on an existing scalar one. \p
639   /// EntryVal is the value from the original loop that maps to the vector phi
640   /// node, and \p Step is the loop-invariant step. If \p EntryVal is a
641   /// truncate instruction, instead of widening the original IV, we widen a
642   /// version of the IV truncated to \p EntryVal's type.
643   void createVectorIntOrFpInductionPHI(const InductionDescriptor &II,
644                                        Value *Step, Value *Start,
645                                        Instruction *EntryVal, VPValue *Def,
646                                        VPValue *CastDef,
647                                        VPTransformState &State);
648 
649   /// Returns true if an instruction \p I should be scalarized instead of
650   /// vectorized for the chosen vectorization factor.
651   bool shouldScalarizeInstruction(Instruction *I) const;
652 
653   /// Returns true if we should generate a scalar version of \p IV.
654   bool needsScalarInduction(Instruction *IV) const;
655 
656   /// If there is a cast involved in the induction variable \p ID, which should
657   /// be ignored in the vectorized loop body, this function records the
658   /// VectorLoopValue of the respective Phi also as the VectorLoopValue of the
659   /// cast. We had already proved that the casted Phi is equal to the uncasted
660   /// Phi in the vectorized loop (under a runtime guard), and therefore
661   /// there is no need to vectorize the cast - the same value can be used in the
662   /// vector loop for both the Phi and the cast.
663   /// If \p VectorLoopValue is a scalarized value, \p Lane is also specified,
664   /// Otherwise, \p VectorLoopValue is a widened/vectorized value.
665   ///
666   /// \p EntryVal is the value from the original loop that maps to the vector
667   /// phi node and is used to distinguish what is the IV currently being
668   /// processed - original one (if \p EntryVal is a phi corresponding to the
669   /// original IV) or the "newly-created" one based on the proof mentioned above
670   /// (see also buildScalarSteps() and createVectorIntOrFPInductionPHI()). In the
671   /// latter case \p EntryVal is a TruncInst and we must not record anything for
672   /// that IV, but it's error-prone to expect callers of this routine to care
673   /// about that, hence this explicit parameter.
674   void recordVectorLoopValueForInductionCast(
675       const InductionDescriptor &ID, const Instruction *EntryVal,
676       Value *VectorLoopValue, VPValue *CastDef, VPTransformState &State,
677       unsigned Part, unsigned Lane = UINT_MAX);
678 
679   /// Generate a shuffle sequence that will reverse the vector Vec.
680   virtual Value *reverseVector(Value *Vec);
681 
682   /// Returns (and creates if needed) the original loop trip count.
683   Value *getOrCreateTripCount(Loop *NewLoop);
684 
685   /// Returns (and creates if needed) the trip count of the widened loop.
686   Value *getOrCreateVectorTripCount(Loop *NewLoop);
687 
688   /// Returns a bitcasted value to the requested vector type.
689   /// Also handles bitcasts of vector<float> <-> vector<pointer> types.
690   Value *createBitOrPointerCast(Value *V, VectorType *DstVTy,
691                                 const DataLayout &DL);
692 
693   /// Emit a bypass check to see if the vector trip count is zero, including if
694   /// it overflows.
695   void emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass);
696 
697   /// Emit a bypass check to see if all of the SCEV assumptions we've
698   /// had to make are correct.
699   void emitSCEVChecks(Loop *L, BasicBlock *Bypass);
700 
701   /// Emit bypass checks to check any memory assumptions we may have made.
702   void emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass);
703 
704   /// Compute the transformed value of Index at offset StartValue using step
705   /// StepValue.
706   /// For integer induction, returns StartValue + Index * StepValue.
707   /// For pointer induction, returns StartValue[Index * StepValue].
708   /// FIXME: The newly created binary instructions should contain nsw/nuw
709   /// flags, which can be found from the original scalar operations.
710   Value *emitTransformedIndex(IRBuilder<> &B, Value *Index, ScalarEvolution *SE,
711                               const DataLayout &DL,
712                               const InductionDescriptor &ID) const;
713 
714   /// Emit basic blocks (prefixed with \p Prefix) for the iteration check,
715   /// vector loop preheader, middle block and scalar preheader. Also
716   /// allocate a loop object for the new vector loop and return it.
717   Loop *createVectorLoopSkeleton(StringRef Prefix);
718 
719   /// Create new phi nodes for the induction variables to resume iteration count
720   /// in the scalar epilogue, from where the vectorized loop left off (given by
721   /// \p VectorTripCount).
722   /// In cases where the loop skeleton is more complicated (eg. epilogue
723   /// vectorization) and the resume values can come from an additional bypass
724   /// block, the \p AdditionalBypass pair provides information about the bypass
725   /// block and the end value on the edge from bypass to this loop.
726   void createInductionResumeValues(
727       Loop *L, Value *VectorTripCount,
728       std::pair<BasicBlock *, Value *> AdditionalBypass = {nullptr, nullptr});
729 
730   /// Complete the loop skeleton by adding debug MDs, creating appropriate
731   /// conditional branches in the middle block, preparing the builder and
732   /// running the verifier. Take in the vector loop \p L as argument, and return
733   /// the preheader of the completed vector loop.
734   BasicBlock *completeLoopSkeleton(Loop *L, MDNode *OrigLoopID);
735 
736   /// Add additional metadata to \p To that was not present on \p Orig.
737   ///
738   /// Currently this is used to add the noalias annotations based on the
739   /// inserted memchecks.  Use this for instructions that are *cloned* into the
740   /// vector loop.
741   void addNewMetadata(Instruction *To, const Instruction *Orig);
742 
743   /// Add metadata from one instruction to another.
744   ///
745   /// This includes both the original MDs from \p From and additional ones (\see
746   /// addNewMetadata).  Use this for *newly created* instructions in the vector
747   /// loop.
748   void addMetadata(Instruction *To, Instruction *From);
749 
750   /// Similar to the previous function but it adds the metadata to a
751   /// vector of instructions.
752   void addMetadata(ArrayRef<Value *> To, Instruction *From);
753 
754   /// Allow subclasses to override and print debug traces before/after vplan
755   /// execution, when trace information is requested.
756   virtual void printDebugTracesAtStart(){};
757   virtual void printDebugTracesAtEnd(){};
758 
759   /// The original loop.
760   Loop *OrigLoop;
761 
762   /// A wrapper around ScalarEvolution used to add runtime SCEV checks. Applies
763   /// dynamic knowledge to simplify SCEV expressions and converts them to a
764   /// more usable form.
765   PredicatedScalarEvolution &PSE;
766 
767   /// Loop Info.
768   LoopInfo *LI;
769 
770   /// Dominator Tree.
771   DominatorTree *DT;
772 
773   /// Alias Analysis.
774   AAResults *AA;
775 
776   /// Target Library Info.
777   const TargetLibraryInfo *TLI;
778 
779   /// Target Transform Info.
780   const TargetTransformInfo *TTI;
781 
782   /// Assumption Cache.
783   AssumptionCache *AC;
784 
785   /// Interface to emit optimization remarks.
786   OptimizationRemarkEmitter *ORE;
787 
788   /// LoopVersioning.  It's only set up (non-null) if memchecks were
789   /// used.
790   ///
791   /// This is currently only used to add no-alias metadata based on the
792   /// memchecks.  The actually versioning is performed manually.
793   std::unique_ptr<LoopVersioning> LVer;
794 
795   /// The vectorization SIMD factor to use. Each vector will have this many
796   /// vector elements.
797   ElementCount VF;
798 
799   /// The vectorization unroll factor to use. Each scalar is vectorized to this
800   /// many different vector instructions.
801   unsigned UF;
802 
803   /// The builder that we use
804   IRBuilder<> Builder;
805 
806   // --- Vectorization state ---
807 
808   /// The vector-loop preheader.
809   BasicBlock *LoopVectorPreHeader;
810 
811   /// The scalar-loop preheader.
812   BasicBlock *LoopScalarPreHeader;
813 
814   /// Middle Block between the vector and the scalar.
815   BasicBlock *LoopMiddleBlock;
816 
817   /// The (unique) ExitBlock of the scalar loop.  Note that
818   /// there can be multiple exiting edges reaching this block.
819   BasicBlock *LoopExitBlock;
820 
821   /// The vector loop body.
822   BasicBlock *LoopVectorBody;
823 
824   /// The scalar loop body.
825   BasicBlock *LoopScalarBody;
826 
827   /// A list of all bypass blocks. The first block is the entry of the loop.
828   SmallVector<BasicBlock *, 4> LoopBypassBlocks;
829 
830   /// The new Induction variable which was added to the new block.
831   PHINode *Induction = nullptr;
832 
833   /// The induction variable of the old basic block.
834   PHINode *OldInduction = nullptr;
835 
836   /// Store instructions that were predicated.
837   SmallVector<Instruction *, 4> PredicatedInstructions;
838 
839   /// Trip count of the original loop.
840   Value *TripCount = nullptr;
841 
842   /// Trip count of the widened loop (TripCount - TripCount % (VF*UF))
843   Value *VectorTripCount = nullptr;
844 
845   /// The legality analysis.
846   LoopVectorizationLegality *Legal;
847 
848   /// The profitablity analysis.
849   LoopVectorizationCostModel *Cost;
850 
851   // Record whether runtime checks are added.
852   bool AddedSafetyChecks = false;
853 
854   // Holds the end values for each induction variable. We save the end values
855   // so we can later fix-up the external users of the induction variables.
856   DenseMap<PHINode *, Value *> IVEndValues;
857 
858   // Vector of original scalar PHIs whose corresponding widened PHIs need to be
859   // fixed up at the end of vector code generation.
860   SmallVector<PHINode *, 8> OrigPHIsToFix;
861 
862   /// BFI and PSI are used to check for profile guided size optimizations.
863   BlockFrequencyInfo *BFI;
864   ProfileSummaryInfo *PSI;
865 
866   // Whether this loop should be optimized for size based on profile guided size
867   // optimizatios.
868   bool OptForSizeBasedOnProfile;
869 };
870 
871 class InnerLoopUnroller : public InnerLoopVectorizer {
872 public:
873   InnerLoopUnroller(Loop *OrigLoop, PredicatedScalarEvolution &PSE,
874                     LoopInfo *LI, DominatorTree *DT,
875                     const TargetLibraryInfo *TLI,
876                     const TargetTransformInfo *TTI, AssumptionCache *AC,
877                     OptimizationRemarkEmitter *ORE, unsigned UnrollFactor,
878                     LoopVectorizationLegality *LVL,
879                     LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI,
880                     ProfileSummaryInfo *PSI)
881       : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE,
882                             ElementCount::getFixed(1), UnrollFactor, LVL, CM,
883                             BFI, PSI) {}
884 
885 private:
886   Value *getBroadcastInstrs(Value *V) override;
887   Value *getStepVector(Value *Val, int StartIdx, Value *Step,
888                        Instruction::BinaryOps Opcode =
889                        Instruction::BinaryOpsEnd) override;
890   Value *reverseVector(Value *Vec) override;
891 };
892 
893 /// Encapsulate information regarding vectorization of a loop and its epilogue.
894 /// This information is meant to be updated and used across two stages of
895 /// epilogue vectorization.
896 struct EpilogueLoopVectorizationInfo {
897   ElementCount MainLoopVF = ElementCount::getFixed(0);
898   unsigned MainLoopUF = 0;
899   ElementCount EpilogueVF = ElementCount::getFixed(0);
900   unsigned EpilogueUF = 0;
901   BasicBlock *MainLoopIterationCountCheck = nullptr;
902   BasicBlock *EpilogueIterationCountCheck = nullptr;
903   BasicBlock *SCEVSafetyCheck = nullptr;
904   BasicBlock *MemSafetyCheck = nullptr;
905   Value *TripCount = nullptr;
906   Value *VectorTripCount = nullptr;
907 
908   EpilogueLoopVectorizationInfo(unsigned MVF, unsigned MUF, unsigned EVF,
909                                 unsigned EUF)
910       : MainLoopVF(ElementCount::getFixed(MVF)), MainLoopUF(MUF),
911         EpilogueVF(ElementCount::getFixed(EVF)), EpilogueUF(EUF) {
912     assert(EUF == 1 &&
913            "A high UF for the epilogue loop is likely not beneficial.");
914   }
915 };
916 
917 /// An extension of the inner loop vectorizer that creates a skeleton for a
918 /// vectorized loop that has its epilogue (residual) also vectorized.
919 /// The idea is to run the vplan on a given loop twice, firstly to setup the
920 /// skeleton and vectorize the main loop, and secondly to complete the skeleton
921 /// from the first step and vectorize the epilogue.  This is achieved by
922 /// deriving two concrete strategy classes from this base class and invoking
923 /// them in succession from the loop vectorizer planner.
924 class InnerLoopAndEpilogueVectorizer : public InnerLoopVectorizer {
925 public:
926   InnerLoopAndEpilogueVectorizer(
927       Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI,
928       DominatorTree *DT, const TargetLibraryInfo *TLI,
929       const TargetTransformInfo *TTI, AssumptionCache *AC,
930       OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI,
931       LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM,
932       BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI)
933       : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE,
934                             EPI.MainLoopVF, EPI.MainLoopUF, LVL, CM, BFI, PSI),
935         EPI(EPI) {}
936 
937   // Override this function to handle the more complex control flow around the
938   // three loops.
939   BasicBlock *createVectorizedLoopSkeleton() final override {
940     return createEpilogueVectorizedLoopSkeleton();
941   }
942 
943   /// The interface for creating a vectorized skeleton using one of two
944   /// different strategies, each corresponding to one execution of the vplan
945   /// as described above.
946   virtual BasicBlock *createEpilogueVectorizedLoopSkeleton() = 0;
947 
948   /// Holds and updates state information required to vectorize the main loop
949   /// and its epilogue in two separate passes. This setup helps us avoid
950   /// regenerating and recomputing runtime safety checks. It also helps us to
951   /// shorten the iteration-count-check path length for the cases where the
952   /// iteration count of the loop is so small that the main vector loop is
953   /// completely skipped.
954   EpilogueLoopVectorizationInfo &EPI;
955 };
956 
957 /// A specialized derived class of inner loop vectorizer that performs
958 /// vectorization of *main* loops in the process of vectorizing loops and their
959 /// epilogues.
960 class EpilogueVectorizerMainLoop : public InnerLoopAndEpilogueVectorizer {
961 public:
962   EpilogueVectorizerMainLoop(
963       Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI,
964       DominatorTree *DT, const TargetLibraryInfo *TLI,
965       const TargetTransformInfo *TTI, AssumptionCache *AC,
966       OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI,
967       LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM,
968       BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI)
969       : InnerLoopAndEpilogueVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE,
970                                        EPI, LVL, CM, BFI, PSI) {}
971   /// Implements the interface for creating a vectorized skeleton using the
972   /// *main loop* strategy (ie the first pass of vplan execution).
973   BasicBlock *createEpilogueVectorizedLoopSkeleton() final override;
974 
975 protected:
976   /// Emits an iteration count bypass check once for the main loop (when \p
977   /// ForEpilogue is false) and once for the epilogue loop (when \p
978   /// ForEpilogue is true).
979   BasicBlock *emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass,
980                                              bool ForEpilogue);
981   void printDebugTracesAtStart() override;
982   void printDebugTracesAtEnd() override;
983 };
984 
985 // A specialized derived class of inner loop vectorizer that performs
986 // vectorization of *epilogue* loops in the process of vectorizing loops and
987 // their epilogues.
988 class EpilogueVectorizerEpilogueLoop : public InnerLoopAndEpilogueVectorizer {
989 public:
990   EpilogueVectorizerEpilogueLoop(Loop *OrigLoop, PredicatedScalarEvolution &PSE,
991                     LoopInfo *LI, DominatorTree *DT,
992                     const TargetLibraryInfo *TLI,
993                     const TargetTransformInfo *TTI, AssumptionCache *AC,
994                     OptimizationRemarkEmitter *ORE,
995                     EpilogueLoopVectorizationInfo &EPI,
996                     LoopVectorizationLegality *LVL,
997                     llvm::LoopVectorizationCostModel *CM,
998                     BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI)
999       : InnerLoopAndEpilogueVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE,
1000                                        EPI, LVL, CM, BFI, PSI) {}
1001   /// Implements the interface for creating a vectorized skeleton using the
1002   /// *epilogue loop* strategy (ie the second pass of vplan execution).
1003   BasicBlock *createEpilogueVectorizedLoopSkeleton() final override;
1004 
1005 protected:
1006   /// Emits an iteration count bypass check after the main vector loop has
1007   /// finished to see if there are any iterations left to execute by either
1008   /// the vector epilogue or the scalar epilogue.
1009   BasicBlock *emitMinimumVectorEpilogueIterCountCheck(Loop *L,
1010                                                       BasicBlock *Bypass,
1011                                                       BasicBlock *Insert);
1012   void printDebugTracesAtStart() override;
1013   void printDebugTracesAtEnd() override;
1014 };
1015 } // end namespace llvm
1016 
1017 /// Look for a meaningful debug location on the instruction or it's
1018 /// operands.
1019 static Instruction *getDebugLocFromInstOrOperands(Instruction *I) {
1020   if (!I)
1021     return I;
1022 
1023   DebugLoc Empty;
1024   if (I->getDebugLoc() != Empty)
1025     return I;
1026 
1027   for (Use &Op : I->operands()) {
1028     if (Instruction *OpInst = dyn_cast<Instruction>(Op))
1029       if (OpInst->getDebugLoc() != Empty)
1030         return OpInst;
1031   }
1032 
1033   return I;
1034 }
1035 
1036 void InnerLoopVectorizer::setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr) {
1037   if (const Instruction *Inst = dyn_cast_or_null<Instruction>(Ptr)) {
1038     const DILocation *DIL = Inst->getDebugLoc();
1039     if (DIL && Inst->getFunction()->isDebugInfoForProfiling() &&
1040         !isa<DbgInfoIntrinsic>(Inst)) {
1041       assert(!VF.isScalable() && "scalable vectors not yet supported.");
1042       auto NewDIL =
1043           DIL->cloneByMultiplyingDuplicationFactor(UF * VF.getKnownMinValue());
1044       if (NewDIL)
1045         B.SetCurrentDebugLocation(NewDIL.getValue());
1046       else
1047         LLVM_DEBUG(dbgs()
1048                    << "Failed to create new discriminator: "
1049                    << DIL->getFilename() << " Line: " << DIL->getLine());
1050     }
1051     else
1052       B.SetCurrentDebugLocation(DIL);
1053   } else
1054     B.SetCurrentDebugLocation(DebugLoc());
1055 }
1056 
1057 /// Write a record \p DebugMsg about vectorization failure to the debug
1058 /// output stream. If \p I is passed, it is an instruction that prevents
1059 /// vectorization.
1060 #ifndef NDEBUG
1061 static void debugVectorizationFailure(const StringRef DebugMsg,
1062     Instruction *I) {
1063   dbgs() << "LV: Not vectorizing: " << DebugMsg;
1064   if (I != nullptr)
1065     dbgs() << " " << *I;
1066   else
1067     dbgs() << '.';
1068   dbgs() << '\n';
1069 }
1070 #endif
1071 
1072 /// Create an analysis remark that explains why vectorization failed
1073 ///
1074 /// \p PassName is the name of the pass (e.g. can be AlwaysPrint).  \p
1075 /// RemarkName is the identifier for the remark.  If \p I is passed it is an
1076 /// instruction that prevents vectorization.  Otherwise \p TheLoop is used for
1077 /// the location of the remark.  \return the remark object that can be
1078 /// streamed to.
1079 static OptimizationRemarkAnalysis createLVAnalysis(const char *PassName,
1080     StringRef RemarkName, Loop *TheLoop, Instruction *I) {
1081   Value *CodeRegion = TheLoop->getHeader();
1082   DebugLoc DL = TheLoop->getStartLoc();
1083 
1084   if (I) {
1085     CodeRegion = I->getParent();
1086     // If there is no debug location attached to the instruction, revert back to
1087     // using the loop's.
1088     if (I->getDebugLoc())
1089       DL = I->getDebugLoc();
1090   }
1091 
1092   OptimizationRemarkAnalysis R(PassName, RemarkName, DL, CodeRegion);
1093   R << "loop not vectorized: ";
1094   return R;
1095 }
1096 
1097 /// Return a value for Step multiplied by VF.
1098 static Value *createStepForVF(IRBuilder<> &B, Constant *Step, ElementCount VF) {
1099   assert(isa<ConstantInt>(Step) && "Expected an integer step");
1100   Constant *StepVal = ConstantInt::get(
1101       Step->getType(),
1102       cast<ConstantInt>(Step)->getSExtValue() * VF.getKnownMinValue());
1103   return VF.isScalable() ? B.CreateVScale(StepVal) : StepVal;
1104 }
1105 
1106 namespace llvm {
1107 
1108 void reportVectorizationFailure(const StringRef DebugMsg,
1109     const StringRef OREMsg, const StringRef ORETag,
1110     OptimizationRemarkEmitter *ORE, Loop *TheLoop, Instruction *I) {
1111   LLVM_DEBUG(debugVectorizationFailure(DebugMsg, I));
1112   LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE);
1113   ORE->emit(createLVAnalysis(Hints.vectorizeAnalysisPassName(),
1114                 ORETag, TheLoop, I) << OREMsg);
1115 }
1116 
1117 } // end namespace llvm
1118 
1119 #ifndef NDEBUG
1120 /// \return string containing a file name and a line # for the given loop.
1121 static std::string getDebugLocString(const Loop *L) {
1122   std::string Result;
1123   if (L) {
1124     raw_string_ostream OS(Result);
1125     if (const DebugLoc LoopDbgLoc = L->getStartLoc())
1126       LoopDbgLoc.print(OS);
1127     else
1128       // Just print the module name.
1129       OS << L->getHeader()->getParent()->getParent()->getModuleIdentifier();
1130     OS.flush();
1131   }
1132   return Result;
1133 }
1134 #endif
1135 
1136 void InnerLoopVectorizer::addNewMetadata(Instruction *To,
1137                                          const Instruction *Orig) {
1138   // If the loop was versioned with memchecks, add the corresponding no-alias
1139   // metadata.
1140   if (LVer && (isa<LoadInst>(Orig) || isa<StoreInst>(Orig)))
1141     LVer->annotateInstWithNoAlias(To, Orig);
1142 }
1143 
1144 void InnerLoopVectorizer::addMetadata(Instruction *To,
1145                                       Instruction *From) {
1146   propagateMetadata(To, From);
1147   addNewMetadata(To, From);
1148 }
1149 
1150 void InnerLoopVectorizer::addMetadata(ArrayRef<Value *> To,
1151                                       Instruction *From) {
1152   for (Value *V : To) {
1153     if (Instruction *I = dyn_cast<Instruction>(V))
1154       addMetadata(I, From);
1155   }
1156 }
1157 
1158 namespace llvm {
1159 
1160 // Loop vectorization cost-model hints how the scalar epilogue loop should be
1161 // lowered.
1162 enum ScalarEpilogueLowering {
1163 
1164   // The default: allowing scalar epilogues.
1165   CM_ScalarEpilogueAllowed,
1166 
1167   // Vectorization with OptForSize: don't allow epilogues.
1168   CM_ScalarEpilogueNotAllowedOptSize,
1169 
1170   // A special case of vectorisation with OptForSize: loops with a very small
1171   // trip count are considered for vectorization under OptForSize, thereby
1172   // making sure the cost of their loop body is dominant, free of runtime
1173   // guards and scalar iteration overheads.
1174   CM_ScalarEpilogueNotAllowedLowTripLoop,
1175 
1176   // Loop hint predicate indicating an epilogue is undesired.
1177   CM_ScalarEpilogueNotNeededUsePredicate,
1178 
1179   // Directive indicating we must either tail fold or not vectorize
1180   CM_ScalarEpilogueNotAllowedUsePredicate
1181 };
1182 
1183 /// LoopVectorizationCostModel - estimates the expected speedups due to
1184 /// vectorization.
1185 /// In many cases vectorization is not profitable. This can happen because of
1186 /// a number of reasons. In this class we mainly attempt to predict the
1187 /// expected speedup/slowdowns due to the supported instruction set. We use the
1188 /// TargetTransformInfo to query the different backends for the cost of
1189 /// different operations.
1190 class LoopVectorizationCostModel {
1191 public:
1192   LoopVectorizationCostModel(ScalarEpilogueLowering SEL, Loop *L,
1193                              PredicatedScalarEvolution &PSE, LoopInfo *LI,
1194                              LoopVectorizationLegality *Legal,
1195                              const TargetTransformInfo &TTI,
1196                              const TargetLibraryInfo *TLI, DemandedBits *DB,
1197                              AssumptionCache *AC,
1198                              OptimizationRemarkEmitter *ORE, const Function *F,
1199                              const LoopVectorizeHints *Hints,
1200                              InterleavedAccessInfo &IAI)
1201       : ScalarEpilogueStatus(SEL), TheLoop(L), PSE(PSE), LI(LI), Legal(Legal),
1202         TTI(TTI), TLI(TLI), DB(DB), AC(AC), ORE(ORE), TheFunction(F),
1203         Hints(Hints), InterleaveInfo(IAI) {}
1204 
1205   /// \return An upper bound for the vectorization factor, or None if
1206   /// vectorization and interleaving should be avoided up front.
1207   Optional<ElementCount> computeMaxVF(ElementCount UserVF, unsigned UserIC);
1208 
1209   /// \return True if runtime checks are required for vectorization, and false
1210   /// otherwise.
1211   bool runtimeChecksRequired();
1212 
1213   /// \return The most profitable vectorization factor and the cost of that VF.
1214   /// This method checks every power of two up to MaxVF. If UserVF is not ZERO
1215   /// then this vectorization factor will be selected if vectorization is
1216   /// possible.
1217   VectorizationFactor selectVectorizationFactor(ElementCount MaxVF);
1218   VectorizationFactor
1219   selectEpilogueVectorizationFactor(const ElementCount MaxVF,
1220                                     const LoopVectorizationPlanner &LVP);
1221 
1222   /// Setup cost-based decisions for user vectorization factor.
1223   void selectUserVectorizationFactor(ElementCount UserVF) {
1224     collectUniformsAndScalars(UserVF);
1225     collectInstsToScalarize(UserVF);
1226   }
1227 
1228   /// \return The size (in bits) of the smallest and widest types in the code
1229   /// that needs to be vectorized. We ignore values that remain scalar such as
1230   /// 64 bit loop indices.
1231   std::pair<unsigned, unsigned> getSmallestAndWidestTypes();
1232 
1233   /// \return The desired interleave count.
1234   /// If interleave count has been specified by metadata it will be returned.
1235   /// Otherwise, the interleave count is computed and returned. VF and LoopCost
1236   /// are the selected vectorization factor and the cost of the selected VF.
1237   unsigned selectInterleaveCount(ElementCount VF, unsigned LoopCost);
1238 
1239   /// Memory access instruction may be vectorized in more than one way.
1240   /// Form of instruction after vectorization depends on cost.
1241   /// This function takes cost-based decisions for Load/Store instructions
1242   /// and collects them in a map. This decisions map is used for building
1243   /// the lists of loop-uniform and loop-scalar instructions.
1244   /// The calculated cost is saved with widening decision in order to
1245   /// avoid redundant calculations.
1246   void setCostBasedWideningDecision(ElementCount VF);
1247 
1248   /// A struct that represents some properties of the register usage
1249   /// of a loop.
1250   struct RegisterUsage {
1251     /// Holds the number of loop invariant values that are used in the loop.
1252     /// The key is ClassID of target-provided register class.
1253     SmallMapVector<unsigned, unsigned, 4> LoopInvariantRegs;
1254     /// Holds the maximum number of concurrent live intervals in the loop.
1255     /// The key is ClassID of target-provided register class.
1256     SmallMapVector<unsigned, unsigned, 4> MaxLocalUsers;
1257   };
1258 
1259   /// \return Returns information about the register usages of the loop for the
1260   /// given vectorization factors.
1261   SmallVector<RegisterUsage, 8>
1262   calculateRegisterUsage(ArrayRef<ElementCount> VFs);
1263 
1264   /// Collect values we want to ignore in the cost model.
1265   void collectValuesToIgnore();
1266 
1267   /// Split reductions into those that happen in the loop, and those that happen
1268   /// outside. In loop reductions are collected into InLoopReductionChains.
1269   void collectInLoopReductions();
1270 
1271   /// \returns The smallest bitwidth each instruction can be represented with.
1272   /// The vector equivalents of these instructions should be truncated to this
1273   /// type.
1274   const MapVector<Instruction *, uint64_t> &getMinimalBitwidths() const {
1275     return MinBWs;
1276   }
1277 
1278   /// \returns True if it is more profitable to scalarize instruction \p I for
1279   /// vectorization factor \p VF.
1280   bool isProfitableToScalarize(Instruction *I, ElementCount VF) const {
1281     assert(VF.isVector() &&
1282            "Profitable to scalarize relevant only for VF > 1.");
1283 
1284     // Cost model is not run in the VPlan-native path - return conservative
1285     // result until this changes.
1286     if (EnableVPlanNativePath)
1287       return false;
1288 
1289     auto Scalars = InstsToScalarize.find(VF);
1290     assert(Scalars != InstsToScalarize.end() &&
1291            "VF not yet analyzed for scalarization profitability");
1292     return Scalars->second.find(I) != Scalars->second.end();
1293   }
1294 
1295   /// Returns true if \p I is known to be uniform after vectorization.
1296   bool isUniformAfterVectorization(Instruction *I, ElementCount VF) const {
1297     if (VF.isScalar())
1298       return true;
1299 
1300     // Cost model is not run in the VPlan-native path - return conservative
1301     // result until this changes.
1302     if (EnableVPlanNativePath)
1303       return false;
1304 
1305     auto UniformsPerVF = Uniforms.find(VF);
1306     assert(UniformsPerVF != Uniforms.end() &&
1307            "VF not yet analyzed for uniformity");
1308     return UniformsPerVF->second.count(I);
1309   }
1310 
1311   /// Returns true if \p I is known to be scalar after vectorization.
1312   bool isScalarAfterVectorization(Instruction *I, ElementCount VF) const {
1313     if (VF.isScalar())
1314       return true;
1315 
1316     // Cost model is not run in the VPlan-native path - return conservative
1317     // result until this changes.
1318     if (EnableVPlanNativePath)
1319       return false;
1320 
1321     auto ScalarsPerVF = Scalars.find(VF);
1322     assert(ScalarsPerVF != Scalars.end() &&
1323            "Scalar values are not calculated for VF");
1324     return ScalarsPerVF->second.count(I);
1325   }
1326 
1327   /// \returns True if instruction \p I can be truncated to a smaller bitwidth
1328   /// for vectorization factor \p VF.
1329   bool canTruncateToMinimalBitwidth(Instruction *I, ElementCount VF) const {
1330     return VF.isVector() && MinBWs.find(I) != MinBWs.end() &&
1331            !isProfitableToScalarize(I, VF) &&
1332            !isScalarAfterVectorization(I, VF);
1333   }
1334 
1335   /// Decision that was taken during cost calculation for memory instruction.
1336   enum InstWidening {
1337     CM_Unknown,
1338     CM_Widen,         // For consecutive accesses with stride +1.
1339     CM_Widen_Reverse, // For consecutive accesses with stride -1.
1340     CM_Interleave,
1341     CM_GatherScatter,
1342     CM_Scalarize
1343   };
1344 
1345   /// Save vectorization decision \p W and \p Cost taken by the cost model for
1346   /// instruction \p I and vector width \p VF.
1347   void setWideningDecision(Instruction *I, ElementCount VF, InstWidening W,
1348                            InstructionCost Cost) {
1349     assert(VF.isVector() && "Expected VF >=2");
1350     WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost);
1351   }
1352 
1353   /// Save vectorization decision \p W and \p Cost taken by the cost model for
1354   /// interleaving group \p Grp and vector width \p VF.
1355   void setWideningDecision(const InterleaveGroup<Instruction> *Grp,
1356                            ElementCount VF, InstWidening W,
1357                            InstructionCost Cost) {
1358     assert(VF.isVector() && "Expected VF >=2");
1359     /// Broadcast this decicion to all instructions inside the group.
1360     /// But the cost will be assigned to one instruction only.
1361     for (unsigned i = 0; i < Grp->getFactor(); ++i) {
1362       if (auto *I = Grp->getMember(i)) {
1363         if (Grp->getInsertPos() == I)
1364           WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost);
1365         else
1366           WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, 0);
1367       }
1368     }
1369   }
1370 
1371   /// Return the cost model decision for the given instruction \p I and vector
1372   /// width \p VF. Return CM_Unknown if this instruction did not pass
1373   /// through the cost modeling.
1374   InstWidening getWideningDecision(Instruction *I, ElementCount VF) {
1375     assert(VF.isVector() && "Expected VF to be a vector VF");
1376     // Cost model is not run in the VPlan-native path - return conservative
1377     // result until this changes.
1378     if (EnableVPlanNativePath)
1379       return CM_GatherScatter;
1380 
1381     std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(I, VF);
1382     auto Itr = WideningDecisions.find(InstOnVF);
1383     if (Itr == WideningDecisions.end())
1384       return CM_Unknown;
1385     return Itr->second.first;
1386   }
1387 
1388   /// Return the vectorization cost for the given instruction \p I and vector
1389   /// width \p VF.
1390   InstructionCost getWideningCost(Instruction *I, ElementCount VF) {
1391     assert(VF.isVector() && "Expected VF >=2");
1392     std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(I, VF);
1393     assert(WideningDecisions.find(InstOnVF) != WideningDecisions.end() &&
1394            "The cost is not calculated");
1395     return WideningDecisions[InstOnVF].second;
1396   }
1397 
1398   /// Return True if instruction \p I is an optimizable truncate whose operand
1399   /// is an induction variable. Such a truncate will be removed by adding a new
1400   /// induction variable with the destination type.
1401   bool isOptimizableIVTruncate(Instruction *I, ElementCount VF) {
1402     // If the instruction is not a truncate, return false.
1403     auto *Trunc = dyn_cast<TruncInst>(I);
1404     if (!Trunc)
1405       return false;
1406 
1407     // Get the source and destination types of the truncate.
1408     Type *SrcTy = ToVectorTy(cast<CastInst>(I)->getSrcTy(), VF);
1409     Type *DestTy = ToVectorTy(cast<CastInst>(I)->getDestTy(), VF);
1410 
1411     // If the truncate is free for the given types, return false. Replacing a
1412     // free truncate with an induction variable would add an induction variable
1413     // update instruction to each iteration of the loop. We exclude from this
1414     // check the primary induction variable since it will need an update
1415     // instruction regardless.
1416     Value *Op = Trunc->getOperand(0);
1417     if (Op != Legal->getPrimaryInduction() && TTI.isTruncateFree(SrcTy, DestTy))
1418       return false;
1419 
1420     // If the truncated value is not an induction variable, return false.
1421     return Legal->isInductionPhi(Op);
1422   }
1423 
1424   /// Collects the instructions to scalarize for each predicated instruction in
1425   /// the loop.
1426   void collectInstsToScalarize(ElementCount VF);
1427 
1428   /// Collect Uniform and Scalar values for the given \p VF.
1429   /// The sets depend on CM decision for Load/Store instructions
1430   /// that may be vectorized as interleave, gather-scatter or scalarized.
1431   void collectUniformsAndScalars(ElementCount VF) {
1432     // Do the analysis once.
1433     if (VF.isScalar() || Uniforms.find(VF) != Uniforms.end())
1434       return;
1435     setCostBasedWideningDecision(VF);
1436     collectLoopUniforms(VF);
1437     collectLoopScalars(VF);
1438   }
1439 
1440   /// Returns true if the target machine supports masked store operation
1441   /// for the given \p DataType and kind of access to \p Ptr.
1442   bool isLegalMaskedStore(Type *DataType, Value *Ptr, Align Alignment) {
1443     return Legal->isConsecutivePtr(Ptr) &&
1444            TTI.isLegalMaskedStore(DataType, Alignment);
1445   }
1446 
1447   /// Returns true if the target machine supports masked load operation
1448   /// for the given \p DataType and kind of access to \p Ptr.
1449   bool isLegalMaskedLoad(Type *DataType, Value *Ptr, Align Alignment) {
1450     return Legal->isConsecutivePtr(Ptr) &&
1451            TTI.isLegalMaskedLoad(DataType, Alignment);
1452   }
1453 
1454   /// Returns true if the target machine supports masked scatter operation
1455   /// for the given \p DataType.
1456   bool isLegalMaskedScatter(Type *DataType, Align Alignment) {
1457     return TTI.isLegalMaskedScatter(DataType, Alignment);
1458   }
1459 
1460   /// Returns true if the target machine supports masked gather operation
1461   /// for the given \p DataType.
1462   bool isLegalMaskedGather(Type *DataType, Align Alignment) {
1463     return TTI.isLegalMaskedGather(DataType, Alignment);
1464   }
1465 
1466   /// Returns true if the target machine can represent \p V as a masked gather
1467   /// or scatter operation.
1468   bool isLegalGatherOrScatter(Value *V) {
1469     bool LI = isa<LoadInst>(V);
1470     bool SI = isa<StoreInst>(V);
1471     if (!LI && !SI)
1472       return false;
1473     auto *Ty = getMemInstValueType(V);
1474     Align Align = getLoadStoreAlignment(V);
1475     return (LI && isLegalMaskedGather(Ty, Align)) ||
1476            (SI && isLegalMaskedScatter(Ty, Align));
1477   }
1478 
1479   /// Returns true if the target machine supports all of the reduction
1480   /// variables found for the given VF.
1481   bool canVectorizeReductions(ElementCount VF) {
1482     return (all_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool {
1483       RecurrenceDescriptor RdxDesc = Reduction.second;
1484       return TTI.isLegalToVectorizeReduction(RdxDesc, VF);
1485     }));
1486   }
1487 
1488   /// Returns true if \p I is an instruction that will be scalarized with
1489   /// predication. Such instructions include conditional stores and
1490   /// instructions that may divide by zero.
1491   /// If a non-zero VF has been calculated, we check if I will be scalarized
1492   /// predication for that VF.
1493   bool isScalarWithPredication(Instruction *I,
1494                                ElementCount VF = ElementCount::getFixed(1));
1495 
1496   // Returns true if \p I is an instruction that will be predicated either
1497   // through scalar predication or masked load/store or masked gather/scatter.
1498   // Superset of instructions that return true for isScalarWithPredication.
1499   bool isPredicatedInst(Instruction *I) {
1500     if (!blockNeedsPredication(I->getParent()))
1501       return false;
1502     // Loads and stores that need some form of masked operation are predicated
1503     // instructions.
1504     if (isa<LoadInst>(I) || isa<StoreInst>(I))
1505       return Legal->isMaskRequired(I);
1506     return isScalarWithPredication(I);
1507   }
1508 
1509   /// Returns true if \p I is a memory instruction with consecutive memory
1510   /// access that can be widened.
1511   bool
1512   memoryInstructionCanBeWidened(Instruction *I,
1513                                 ElementCount VF = ElementCount::getFixed(1));
1514 
1515   /// Returns true if \p I is a memory instruction in an interleaved-group
1516   /// of memory accesses that can be vectorized with wide vector loads/stores
1517   /// and shuffles.
1518   bool
1519   interleavedAccessCanBeWidened(Instruction *I,
1520                                 ElementCount VF = ElementCount::getFixed(1));
1521 
1522   /// Check if \p Instr belongs to any interleaved access group.
1523   bool isAccessInterleaved(Instruction *Instr) {
1524     return InterleaveInfo.isInterleaved(Instr);
1525   }
1526 
1527   /// Get the interleaved access group that \p Instr belongs to.
1528   const InterleaveGroup<Instruction> *
1529   getInterleavedAccessGroup(Instruction *Instr) {
1530     return InterleaveInfo.getInterleaveGroup(Instr);
1531   }
1532 
1533   /// Returns true if we're required to use a scalar epilogue for at least
1534   /// the final iteration of the original loop.
1535   bool requiresScalarEpilogue() const {
1536     if (!isScalarEpilogueAllowed())
1537       return false;
1538     // If we might exit from anywhere but the latch, must run the exiting
1539     // iteration in scalar form.
1540     if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch())
1541       return true;
1542     return InterleaveInfo.requiresScalarEpilogue();
1543   }
1544 
1545   /// Returns true if a scalar epilogue is not allowed due to optsize or a
1546   /// loop hint annotation.
1547   bool isScalarEpilogueAllowed() const {
1548     return ScalarEpilogueStatus == CM_ScalarEpilogueAllowed;
1549   }
1550 
1551   /// Returns true if all loop blocks should be masked to fold tail loop.
1552   bool foldTailByMasking() const { return FoldTailByMasking; }
1553 
1554   bool blockNeedsPredication(BasicBlock *BB) {
1555     return foldTailByMasking() || Legal->blockNeedsPredication(BB);
1556   }
1557 
1558   /// A SmallMapVector to store the InLoop reduction op chains, mapping phi
1559   /// nodes to the chain of instructions representing the reductions. Uses a
1560   /// MapVector to ensure deterministic iteration order.
1561   using ReductionChainMap =
1562       SmallMapVector<PHINode *, SmallVector<Instruction *, 4>, 4>;
1563 
1564   /// Return the chain of instructions representing an inloop reduction.
1565   const ReductionChainMap &getInLoopReductionChains() const {
1566     return InLoopReductionChains;
1567   }
1568 
1569   /// Returns true if the Phi is part of an inloop reduction.
1570   bool isInLoopReduction(PHINode *Phi) const {
1571     return InLoopReductionChains.count(Phi);
1572   }
1573 
1574   /// Estimate cost of an intrinsic call instruction CI if it were vectorized
1575   /// with factor VF.  Return the cost of the instruction, including
1576   /// scalarization overhead if it's needed.
1577   InstructionCost getVectorIntrinsicCost(CallInst *CI, ElementCount VF);
1578 
1579   /// Estimate cost of a call instruction CI if it were vectorized with factor
1580   /// VF. Return the cost of the instruction, including scalarization overhead
1581   /// if it's needed. The flag NeedToScalarize shows if the call needs to be
1582   /// scalarized -
1583   /// i.e. either vector version isn't available, or is too expensive.
1584   InstructionCost getVectorCallCost(CallInst *CI, ElementCount VF,
1585                                     bool &NeedToScalarize);
1586 
1587   /// Invalidates decisions already taken by the cost model.
1588   void invalidateCostModelingDecisions() {
1589     WideningDecisions.clear();
1590     Uniforms.clear();
1591     Scalars.clear();
1592   }
1593 
1594 private:
1595   unsigned NumPredStores = 0;
1596 
1597   /// \return An upper bound for the vectorization factor, a power-of-2 larger
1598   /// than zero. One is returned if vectorization should best be avoided due
1599   /// to cost.
1600   ElementCount computeFeasibleMaxVF(unsigned ConstTripCount,
1601                                     ElementCount UserVF);
1602 
1603   /// The vectorization cost is a combination of the cost itself and a boolean
1604   /// indicating whether any of the contributing operations will actually
1605   /// operate on
1606   /// vector values after type legalization in the backend. If this latter value
1607   /// is
1608   /// false, then all operations will be scalarized (i.e. no vectorization has
1609   /// actually taken place).
1610   using VectorizationCostTy = std::pair<InstructionCost, bool>;
1611 
1612   /// Returns the expected execution cost. The unit of the cost does
1613   /// not matter because we use the 'cost' units to compare different
1614   /// vector widths. The cost that is returned is *not* normalized by
1615   /// the factor width.
1616   VectorizationCostTy expectedCost(ElementCount VF);
1617 
1618   /// Returns the execution time cost of an instruction for a given vector
1619   /// width. Vector width of one means scalar.
1620   VectorizationCostTy getInstructionCost(Instruction *I, ElementCount VF);
1621 
1622   /// The cost-computation logic from getInstructionCost which provides
1623   /// the vector type as an output parameter.
1624   InstructionCost getInstructionCost(Instruction *I, ElementCount VF,
1625                                      Type *&VectorTy);
1626 
1627   /// Return the cost of instructions in an inloop reduction pattern, if I is
1628   /// part of that pattern.
1629   InstructionCost getReductionPatternCost(Instruction *I, ElementCount VF,
1630                                           Type *VectorTy,
1631                                           TTI::TargetCostKind CostKind);
1632 
1633   /// Calculate vectorization cost of memory instruction \p I.
1634   InstructionCost getMemoryInstructionCost(Instruction *I, ElementCount VF);
1635 
1636   /// The cost computation for scalarized memory instruction.
1637   InstructionCost getMemInstScalarizationCost(Instruction *I, ElementCount VF);
1638 
1639   /// The cost computation for interleaving group of memory instructions.
1640   InstructionCost getInterleaveGroupCost(Instruction *I, ElementCount VF);
1641 
1642   /// The cost computation for Gather/Scatter instruction.
1643   InstructionCost getGatherScatterCost(Instruction *I, ElementCount VF);
1644 
1645   /// The cost computation for widening instruction \p I with consecutive
1646   /// memory access.
1647   InstructionCost getConsecutiveMemOpCost(Instruction *I, ElementCount VF);
1648 
1649   /// The cost calculation for Load/Store instruction \p I with uniform pointer -
1650   /// Load: scalar load + broadcast.
1651   /// Store: scalar store + (loop invariant value stored? 0 : extract of last
1652   /// element)
1653   InstructionCost getUniformMemOpCost(Instruction *I, ElementCount VF);
1654 
1655   /// Estimate the overhead of scalarizing an instruction. This is a
1656   /// convenience wrapper for the type-based getScalarizationOverhead API.
1657   InstructionCost getScalarizationOverhead(Instruction *I, ElementCount VF);
1658 
1659   /// Returns whether the instruction is a load or store and will be a emitted
1660   /// as a vector operation.
1661   bool isConsecutiveLoadOrStore(Instruction *I);
1662 
1663   /// Returns true if an artificially high cost for emulated masked memrefs
1664   /// should be used.
1665   bool useEmulatedMaskMemRefHack(Instruction *I);
1666 
1667   /// Map of scalar integer values to the smallest bitwidth they can be legally
1668   /// represented as. The vector equivalents of these values should be truncated
1669   /// to this type.
1670   MapVector<Instruction *, uint64_t> MinBWs;
1671 
1672   /// A type representing the costs for instructions if they were to be
1673   /// scalarized rather than vectorized. The entries are Instruction-Cost
1674   /// pairs.
1675   using ScalarCostsTy = DenseMap<Instruction *, InstructionCost>;
1676 
1677   /// A set containing all BasicBlocks that are known to present after
1678   /// vectorization as a predicated block.
1679   SmallPtrSet<BasicBlock *, 4> PredicatedBBsAfterVectorization;
1680 
1681   /// Records whether it is allowed to have the original scalar loop execute at
1682   /// least once. This may be needed as a fallback loop in case runtime
1683   /// aliasing/dependence checks fail, or to handle the tail/remainder
1684   /// iterations when the trip count is unknown or doesn't divide by the VF,
1685   /// or as a peel-loop to handle gaps in interleave-groups.
1686   /// Under optsize and when the trip count is very small we don't allow any
1687   /// iterations to execute in the scalar loop.
1688   ScalarEpilogueLowering ScalarEpilogueStatus = CM_ScalarEpilogueAllowed;
1689 
1690   /// All blocks of loop are to be masked to fold tail of scalar iterations.
1691   bool FoldTailByMasking = false;
1692 
1693   /// A map holding scalar costs for different vectorization factors. The
1694   /// presence of a cost for an instruction in the mapping indicates that the
1695   /// instruction will be scalarized when vectorizing with the associated
1696   /// vectorization factor. The entries are VF-ScalarCostTy pairs.
1697   DenseMap<ElementCount, ScalarCostsTy> InstsToScalarize;
1698 
1699   /// Holds the instructions known to be uniform after vectorization.
1700   /// The data is collected per VF.
1701   DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Uniforms;
1702 
1703   /// Holds the instructions known to be scalar after vectorization.
1704   /// The data is collected per VF.
1705   DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Scalars;
1706 
1707   /// Holds the instructions (address computations) that are forced to be
1708   /// scalarized.
1709   DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> ForcedScalars;
1710 
1711   /// PHINodes of the reductions that should be expanded in-loop along with
1712   /// their associated chains of reduction operations, in program order from top
1713   /// (PHI) to bottom
1714   ReductionChainMap InLoopReductionChains;
1715 
1716   /// A Map of inloop reduction operations and their immediate chain operand.
1717   /// FIXME: This can be removed once reductions can be costed correctly in
1718   /// vplan. This was added to allow quick lookup to the inloop operations,
1719   /// without having to loop through InLoopReductionChains.
1720   DenseMap<Instruction *, Instruction *> InLoopReductionImmediateChains;
1721 
1722   /// Returns the expected difference in cost from scalarizing the expression
1723   /// feeding a predicated instruction \p PredInst. The instructions to
1724   /// scalarize and their scalar costs are collected in \p ScalarCosts. A
1725   /// non-negative return value implies the expression will be scalarized.
1726   /// Currently, only single-use chains are considered for scalarization.
1727   int computePredInstDiscount(Instruction *PredInst, ScalarCostsTy &ScalarCosts,
1728                               ElementCount VF);
1729 
1730   /// Collect the instructions that are uniform after vectorization. An
1731   /// instruction is uniform if we represent it with a single scalar value in
1732   /// the vectorized loop corresponding to each vector iteration. Examples of
1733   /// uniform instructions include pointer operands of consecutive or
1734   /// interleaved memory accesses. Note that although uniformity implies an
1735   /// instruction will be scalar, the reverse is not true. In general, a
1736   /// scalarized instruction will be represented by VF scalar values in the
1737   /// vectorized loop, each corresponding to an iteration of the original
1738   /// scalar loop.
1739   void collectLoopUniforms(ElementCount VF);
1740 
1741   /// Collect the instructions that are scalar after vectorization. An
1742   /// instruction is scalar if it is known to be uniform or will be scalarized
1743   /// during vectorization. Non-uniform scalarized instructions will be
1744   /// represented by VF values in the vectorized loop, each corresponding to an
1745   /// iteration of the original scalar loop.
1746   void collectLoopScalars(ElementCount VF);
1747 
1748   /// Keeps cost model vectorization decision and cost for instructions.
1749   /// Right now it is used for memory instructions only.
1750   using DecisionList = DenseMap<std::pair<Instruction *, ElementCount>,
1751                                 std::pair<InstWidening, InstructionCost>>;
1752 
1753   DecisionList WideningDecisions;
1754 
1755   /// Returns true if \p V is expected to be vectorized and it needs to be
1756   /// extracted.
1757   bool needsExtract(Value *V, ElementCount VF) const {
1758     Instruction *I = dyn_cast<Instruction>(V);
1759     if (VF.isScalar() || !I || !TheLoop->contains(I) ||
1760         TheLoop->isLoopInvariant(I))
1761       return false;
1762 
1763     // Assume we can vectorize V (and hence we need extraction) if the
1764     // scalars are not computed yet. This can happen, because it is called
1765     // via getScalarizationOverhead from setCostBasedWideningDecision, before
1766     // the scalars are collected. That should be a safe assumption in most
1767     // cases, because we check if the operands have vectorizable types
1768     // beforehand in LoopVectorizationLegality.
1769     return Scalars.find(VF) == Scalars.end() ||
1770            !isScalarAfterVectorization(I, VF);
1771   };
1772 
1773   /// Returns a range containing only operands needing to be extracted.
1774   SmallVector<Value *, 4> filterExtractingOperands(Instruction::op_range Ops,
1775                                                    ElementCount VF) {
1776     return SmallVector<Value *, 4>(make_filter_range(
1777         Ops, [this, VF](Value *V) { return this->needsExtract(V, VF); }));
1778   }
1779 
1780   /// Determines if we have the infrastructure to vectorize loop \p L and its
1781   /// epilogue, assuming the main loop is vectorized by \p VF.
1782   bool isCandidateForEpilogueVectorization(const Loop &L,
1783                                            const ElementCount VF) const;
1784 
1785   /// Returns true if epilogue vectorization is considered profitable, and
1786   /// false otherwise.
1787   /// \p VF is the vectorization factor chosen for the original loop.
1788   bool isEpilogueVectorizationProfitable(const ElementCount VF) const;
1789 
1790 public:
1791   /// The loop that we evaluate.
1792   Loop *TheLoop;
1793 
1794   /// Predicated scalar evolution analysis.
1795   PredicatedScalarEvolution &PSE;
1796 
1797   /// Loop Info analysis.
1798   LoopInfo *LI;
1799 
1800   /// Vectorization legality.
1801   LoopVectorizationLegality *Legal;
1802 
1803   /// Vector target information.
1804   const TargetTransformInfo &TTI;
1805 
1806   /// Target Library Info.
1807   const TargetLibraryInfo *TLI;
1808 
1809   /// Demanded bits analysis.
1810   DemandedBits *DB;
1811 
1812   /// Assumption cache.
1813   AssumptionCache *AC;
1814 
1815   /// Interface to emit optimization remarks.
1816   OptimizationRemarkEmitter *ORE;
1817 
1818   const Function *TheFunction;
1819 
1820   /// Loop Vectorize Hint.
1821   const LoopVectorizeHints *Hints;
1822 
1823   /// The interleave access information contains groups of interleaved accesses
1824   /// with the same stride and close to each other.
1825   InterleavedAccessInfo &InterleaveInfo;
1826 
1827   /// Values to ignore in the cost model.
1828   SmallPtrSet<const Value *, 16> ValuesToIgnore;
1829 
1830   /// Values to ignore in the cost model when VF > 1.
1831   SmallPtrSet<const Value *, 16> VecValuesToIgnore;
1832 
1833   /// Profitable vector factors.
1834   SmallVector<VectorizationFactor, 8> ProfitableVFs;
1835 };
1836 
1837 } // end namespace llvm
1838 
1839 // Return true if \p OuterLp is an outer loop annotated with hints for explicit
1840 // vectorization. The loop needs to be annotated with #pragma omp simd
1841 // simdlen(#) or #pragma clang vectorize(enable) vectorize_width(#). If the
1842 // vector length information is not provided, vectorization is not considered
1843 // explicit. Interleave hints are not allowed either. These limitations will be
1844 // relaxed in the future.
1845 // Please, note that we are currently forced to abuse the pragma 'clang
1846 // vectorize' semantics. This pragma provides *auto-vectorization hints*
1847 // (i.e., LV must check that vectorization is legal) whereas pragma 'omp simd'
1848 // provides *explicit vectorization hints* (LV can bypass legal checks and
1849 // assume that vectorization is legal). However, both hints are implemented
1850 // using the same metadata (llvm.loop.vectorize, processed by
1851 // LoopVectorizeHints). This will be fixed in the future when the native IR
1852 // representation for pragma 'omp simd' is introduced.
1853 static bool isExplicitVecOuterLoop(Loop *OuterLp,
1854                                    OptimizationRemarkEmitter *ORE) {
1855   assert(!OuterLp->isInnermost() && "This is not an outer loop");
1856   LoopVectorizeHints Hints(OuterLp, true /*DisableInterleaving*/, *ORE);
1857 
1858   // Only outer loops with an explicit vectorization hint are supported.
1859   // Unannotated outer loops are ignored.
1860   if (Hints.getForce() == LoopVectorizeHints::FK_Undefined)
1861     return false;
1862 
1863   Function *Fn = OuterLp->getHeader()->getParent();
1864   if (!Hints.allowVectorization(Fn, OuterLp,
1865                                 true /*VectorizeOnlyWhenForced*/)) {
1866     LLVM_DEBUG(dbgs() << "LV: Loop hints prevent outer loop vectorization.\n");
1867     return false;
1868   }
1869 
1870   if (Hints.getInterleave() > 1) {
1871     // TODO: Interleave support is future work.
1872     LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Interleave is not supported for "
1873                          "outer loops.\n");
1874     Hints.emitRemarkWithHints();
1875     return false;
1876   }
1877 
1878   return true;
1879 }
1880 
1881 static void collectSupportedLoops(Loop &L, LoopInfo *LI,
1882                                   OptimizationRemarkEmitter *ORE,
1883                                   SmallVectorImpl<Loop *> &V) {
1884   // Collect inner loops and outer loops without irreducible control flow. For
1885   // now, only collect outer loops that have explicit vectorization hints. If we
1886   // are stress testing the VPlan H-CFG construction, we collect the outermost
1887   // loop of every loop nest.
1888   if (L.isInnermost() || VPlanBuildStressTest ||
1889       (EnableVPlanNativePath && isExplicitVecOuterLoop(&L, ORE))) {
1890     LoopBlocksRPO RPOT(&L);
1891     RPOT.perform(LI);
1892     if (!containsIrreducibleCFG<const BasicBlock *>(RPOT, *LI)) {
1893       V.push_back(&L);
1894       // TODO: Collect inner loops inside marked outer loops in case
1895       // vectorization fails for the outer loop. Do not invoke
1896       // 'containsIrreducibleCFG' again for inner loops when the outer loop is
1897       // already known to be reducible. We can use an inherited attribute for
1898       // that.
1899       return;
1900     }
1901   }
1902   for (Loop *InnerL : L)
1903     collectSupportedLoops(*InnerL, LI, ORE, V);
1904 }
1905 
1906 namespace {
1907 
1908 /// The LoopVectorize Pass.
1909 struct LoopVectorize : public FunctionPass {
1910   /// Pass identification, replacement for typeid
1911   static char ID;
1912 
1913   LoopVectorizePass Impl;
1914 
1915   explicit LoopVectorize(bool InterleaveOnlyWhenForced = false,
1916                          bool VectorizeOnlyWhenForced = false)
1917       : FunctionPass(ID),
1918         Impl({InterleaveOnlyWhenForced, VectorizeOnlyWhenForced}) {
1919     initializeLoopVectorizePass(*PassRegistry::getPassRegistry());
1920   }
1921 
1922   bool runOnFunction(Function &F) override {
1923     if (skipFunction(F))
1924       return false;
1925 
1926     auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
1927     auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
1928     auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
1929     auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
1930     auto *BFI = &getAnalysis<BlockFrequencyInfoWrapperPass>().getBFI();
1931     auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
1932     auto *TLI = TLIP ? &TLIP->getTLI(F) : nullptr;
1933     auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
1934     auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
1935     auto *LAA = &getAnalysis<LoopAccessLegacyAnalysis>();
1936     auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits();
1937     auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE();
1938     auto *PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI();
1939 
1940     std::function<const LoopAccessInfo &(Loop &)> GetLAA =
1941         [&](Loop &L) -> const LoopAccessInfo & { return LAA->getInfo(&L); };
1942 
1943     return Impl.runImpl(F, *SE, *LI, *TTI, *DT, *BFI, TLI, *DB, *AA, *AC,
1944                         GetLAA, *ORE, PSI).MadeAnyChange;
1945   }
1946 
1947   void getAnalysisUsage(AnalysisUsage &AU) const override {
1948     AU.addRequired<AssumptionCacheTracker>();
1949     AU.addRequired<BlockFrequencyInfoWrapperPass>();
1950     AU.addRequired<DominatorTreeWrapperPass>();
1951     AU.addRequired<LoopInfoWrapperPass>();
1952     AU.addRequired<ScalarEvolutionWrapperPass>();
1953     AU.addRequired<TargetTransformInfoWrapperPass>();
1954     AU.addRequired<AAResultsWrapperPass>();
1955     AU.addRequired<LoopAccessLegacyAnalysis>();
1956     AU.addRequired<DemandedBitsWrapperPass>();
1957     AU.addRequired<OptimizationRemarkEmitterWrapperPass>();
1958     AU.addRequired<InjectTLIMappingsLegacy>();
1959 
1960     // We currently do not preserve loopinfo/dominator analyses with outer loop
1961     // vectorization. Until this is addressed, mark these analyses as preserved
1962     // only for non-VPlan-native path.
1963     // TODO: Preserve Loop and Dominator analyses for VPlan-native path.
1964     if (!EnableVPlanNativePath) {
1965       AU.addPreserved<LoopInfoWrapperPass>();
1966       AU.addPreserved<DominatorTreeWrapperPass>();
1967     }
1968 
1969     AU.addPreserved<BasicAAWrapperPass>();
1970     AU.addPreserved<GlobalsAAWrapperPass>();
1971     AU.addRequired<ProfileSummaryInfoWrapperPass>();
1972   }
1973 };
1974 
1975 } // end anonymous namespace
1976 
1977 //===----------------------------------------------------------------------===//
1978 // Implementation of LoopVectorizationLegality, InnerLoopVectorizer and
1979 // LoopVectorizationCostModel and LoopVectorizationPlanner.
1980 //===----------------------------------------------------------------------===//
1981 
1982 Value *InnerLoopVectorizer::getBroadcastInstrs(Value *V) {
1983   // We need to place the broadcast of invariant variables outside the loop,
1984   // but only if it's proven safe to do so. Else, broadcast will be inside
1985   // vector loop body.
1986   Instruction *Instr = dyn_cast<Instruction>(V);
1987   bool SafeToHoist = OrigLoop->isLoopInvariant(V) &&
1988                      (!Instr ||
1989                       DT->dominates(Instr->getParent(), LoopVectorPreHeader));
1990   // Place the code for broadcasting invariant variables in the new preheader.
1991   IRBuilder<>::InsertPointGuard Guard(Builder);
1992   if (SafeToHoist)
1993     Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
1994 
1995   // Broadcast the scalar into all locations in the vector.
1996   Value *Shuf = Builder.CreateVectorSplat(VF, V, "broadcast");
1997 
1998   return Shuf;
1999 }
2000 
2001 void InnerLoopVectorizer::createVectorIntOrFpInductionPHI(
2002     const InductionDescriptor &II, Value *Step, Value *Start,
2003     Instruction *EntryVal, VPValue *Def, VPValue *CastDef,
2004     VPTransformState &State) {
2005   assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) &&
2006          "Expected either an induction phi-node or a truncate of it!");
2007 
2008   // Construct the initial value of the vector IV in the vector loop preheader
2009   auto CurrIP = Builder.saveIP();
2010   Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
2011   if (isa<TruncInst>(EntryVal)) {
2012     assert(Start->getType()->isIntegerTy() &&
2013            "Truncation requires an integer type");
2014     auto *TruncType = cast<IntegerType>(EntryVal->getType());
2015     Step = Builder.CreateTrunc(Step, TruncType);
2016     Start = Builder.CreateCast(Instruction::Trunc, Start, TruncType);
2017   }
2018   Value *SplatStart = Builder.CreateVectorSplat(VF, Start);
2019   Value *SteppedStart =
2020       getStepVector(SplatStart, 0, Step, II.getInductionOpcode());
2021 
2022   // We create vector phi nodes for both integer and floating-point induction
2023   // variables. Here, we determine the kind of arithmetic we will perform.
2024   Instruction::BinaryOps AddOp;
2025   Instruction::BinaryOps MulOp;
2026   if (Step->getType()->isIntegerTy()) {
2027     AddOp = Instruction::Add;
2028     MulOp = Instruction::Mul;
2029   } else {
2030     AddOp = II.getInductionOpcode();
2031     MulOp = Instruction::FMul;
2032   }
2033 
2034   // Multiply the vectorization factor by the step using integer or
2035   // floating-point arithmetic as appropriate.
2036   Value *ConstVF =
2037       getSignedIntOrFpConstant(Step->getType(), VF.getKnownMinValue());
2038   Value *Mul = addFastMathFlag(Builder.CreateBinOp(MulOp, Step, ConstVF));
2039 
2040   // Create a vector splat to use in the induction update.
2041   //
2042   // FIXME: If the step is non-constant, we create the vector splat with
2043   //        IRBuilder. IRBuilder can constant-fold the multiply, but it doesn't
2044   //        handle a constant vector splat.
2045   assert(!VF.isScalable() && "scalable vectors not yet supported.");
2046   Value *SplatVF = isa<Constant>(Mul)
2047                        ? ConstantVector::getSplat(VF, cast<Constant>(Mul))
2048                        : Builder.CreateVectorSplat(VF, Mul);
2049   Builder.restoreIP(CurrIP);
2050 
2051   // We may need to add the step a number of times, depending on the unroll
2052   // factor. The last of those goes into the PHI.
2053   PHINode *VecInd = PHINode::Create(SteppedStart->getType(), 2, "vec.ind",
2054                                     &*LoopVectorBody->getFirstInsertionPt());
2055   VecInd->setDebugLoc(EntryVal->getDebugLoc());
2056   Instruction *LastInduction = VecInd;
2057   for (unsigned Part = 0; Part < UF; ++Part) {
2058     State.set(Def, LastInduction, Part);
2059 
2060     if (isa<TruncInst>(EntryVal))
2061       addMetadata(LastInduction, EntryVal);
2062     recordVectorLoopValueForInductionCast(II, EntryVal, LastInduction, CastDef,
2063                                           State, Part);
2064 
2065     LastInduction = cast<Instruction>(addFastMathFlag(
2066         Builder.CreateBinOp(AddOp, LastInduction, SplatVF, "step.add")));
2067     LastInduction->setDebugLoc(EntryVal->getDebugLoc());
2068   }
2069 
2070   // Move the last step to the end of the latch block. This ensures consistent
2071   // placement of all induction updates.
2072   auto *LoopVectorLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch();
2073   auto *Br = cast<BranchInst>(LoopVectorLatch->getTerminator());
2074   auto *ICmp = cast<Instruction>(Br->getCondition());
2075   LastInduction->moveBefore(ICmp);
2076   LastInduction->setName("vec.ind.next");
2077 
2078   VecInd->addIncoming(SteppedStart, LoopVectorPreHeader);
2079   VecInd->addIncoming(LastInduction, LoopVectorLatch);
2080 }
2081 
2082 bool InnerLoopVectorizer::shouldScalarizeInstruction(Instruction *I) const {
2083   return Cost->isScalarAfterVectorization(I, VF) ||
2084          Cost->isProfitableToScalarize(I, VF);
2085 }
2086 
2087 bool InnerLoopVectorizer::needsScalarInduction(Instruction *IV) const {
2088   if (shouldScalarizeInstruction(IV))
2089     return true;
2090   auto isScalarInst = [&](User *U) -> bool {
2091     auto *I = cast<Instruction>(U);
2092     return (OrigLoop->contains(I) && shouldScalarizeInstruction(I));
2093   };
2094   return llvm::any_of(IV->users(), isScalarInst);
2095 }
2096 
2097 void InnerLoopVectorizer::recordVectorLoopValueForInductionCast(
2098     const InductionDescriptor &ID, const Instruction *EntryVal,
2099     Value *VectorLoopVal, VPValue *CastDef, VPTransformState &State,
2100     unsigned Part, unsigned Lane) {
2101   assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) &&
2102          "Expected either an induction phi-node or a truncate of it!");
2103 
2104   // This induction variable is not the phi from the original loop but the
2105   // newly-created IV based on the proof that casted Phi is equal to the
2106   // uncasted Phi in the vectorized loop (under a runtime guard possibly). It
2107   // re-uses the same InductionDescriptor that original IV uses but we don't
2108   // have to do any recording in this case - that is done when original IV is
2109   // processed.
2110   if (isa<TruncInst>(EntryVal))
2111     return;
2112 
2113   const SmallVectorImpl<Instruction *> &Casts = ID.getCastInsts();
2114   if (Casts.empty())
2115     return;
2116   // Only the first Cast instruction in the Casts vector is of interest.
2117   // The rest of the Casts (if exist) have no uses outside the
2118   // induction update chain itself.
2119   if (Lane < UINT_MAX)
2120     State.set(CastDef, VectorLoopVal, VPIteration(Part, Lane));
2121   else
2122     State.set(CastDef, VectorLoopVal, Part);
2123 }
2124 
2125 void InnerLoopVectorizer::widenIntOrFpInduction(PHINode *IV, Value *Start,
2126                                                 TruncInst *Trunc, VPValue *Def,
2127                                                 VPValue *CastDef,
2128                                                 VPTransformState &State) {
2129   assert((IV->getType()->isIntegerTy() || IV != OldInduction) &&
2130          "Primary induction variable must have an integer type");
2131 
2132   auto II = Legal->getInductionVars().find(IV);
2133   assert(II != Legal->getInductionVars().end() && "IV is not an induction");
2134 
2135   auto ID = II->second;
2136   assert(IV->getType() == ID.getStartValue()->getType() && "Types must match");
2137 
2138   // The value from the original loop to which we are mapping the new induction
2139   // variable.
2140   Instruction *EntryVal = Trunc ? cast<Instruction>(Trunc) : IV;
2141 
2142   auto &DL = OrigLoop->getHeader()->getModule()->getDataLayout();
2143 
2144   // Generate code for the induction step. Note that induction steps are
2145   // required to be loop-invariant
2146   auto CreateStepValue = [&](const SCEV *Step) -> Value * {
2147     assert(PSE.getSE()->isLoopInvariant(Step, OrigLoop) &&
2148            "Induction step should be loop invariant");
2149     if (PSE.getSE()->isSCEVable(IV->getType())) {
2150       SCEVExpander Exp(*PSE.getSE(), DL, "induction");
2151       return Exp.expandCodeFor(Step, Step->getType(),
2152                                LoopVectorPreHeader->getTerminator());
2153     }
2154     return cast<SCEVUnknown>(Step)->getValue();
2155   };
2156 
2157   // The scalar value to broadcast. This is derived from the canonical
2158   // induction variable. If a truncation type is given, truncate the canonical
2159   // induction variable and step. Otherwise, derive these values from the
2160   // induction descriptor.
2161   auto CreateScalarIV = [&](Value *&Step) -> Value * {
2162     Value *ScalarIV = Induction;
2163     if (IV != OldInduction) {
2164       ScalarIV = IV->getType()->isIntegerTy()
2165                      ? Builder.CreateSExtOrTrunc(Induction, IV->getType())
2166                      : Builder.CreateCast(Instruction::SIToFP, Induction,
2167                                           IV->getType());
2168       ScalarIV = emitTransformedIndex(Builder, ScalarIV, PSE.getSE(), DL, ID);
2169       ScalarIV->setName("offset.idx");
2170     }
2171     if (Trunc) {
2172       auto *TruncType = cast<IntegerType>(Trunc->getType());
2173       assert(Step->getType()->isIntegerTy() &&
2174              "Truncation requires an integer step");
2175       ScalarIV = Builder.CreateTrunc(ScalarIV, TruncType);
2176       Step = Builder.CreateTrunc(Step, TruncType);
2177     }
2178     return ScalarIV;
2179   };
2180 
2181   // Create the vector values from the scalar IV, in the absence of creating a
2182   // vector IV.
2183   auto CreateSplatIV = [&](Value *ScalarIV, Value *Step) {
2184     Value *Broadcasted = getBroadcastInstrs(ScalarIV);
2185     for (unsigned Part = 0; Part < UF; ++Part) {
2186       assert(!VF.isScalable() && "scalable vectors not yet supported.");
2187       Value *EntryPart =
2188           getStepVector(Broadcasted, VF.getKnownMinValue() * Part, Step,
2189                         ID.getInductionOpcode());
2190       State.set(Def, EntryPart, Part);
2191       if (Trunc)
2192         addMetadata(EntryPart, Trunc);
2193       recordVectorLoopValueForInductionCast(ID, EntryVal, EntryPart, CastDef,
2194                                             State, Part);
2195     }
2196   };
2197 
2198   // Now do the actual transformations, and start with creating the step value.
2199   Value *Step = CreateStepValue(ID.getStep());
2200   if (VF.isZero() || VF.isScalar()) {
2201     Value *ScalarIV = CreateScalarIV(Step);
2202     CreateSplatIV(ScalarIV, Step);
2203     return;
2204   }
2205 
2206   // Determine if we want a scalar version of the induction variable. This is
2207   // true if the induction variable itself is not widened, or if it has at
2208   // least one user in the loop that is not widened.
2209   auto NeedsScalarIV = needsScalarInduction(EntryVal);
2210   if (!NeedsScalarIV) {
2211     createVectorIntOrFpInductionPHI(ID, Step, Start, EntryVal, Def, CastDef,
2212                                     State);
2213     return;
2214   }
2215 
2216   // Try to create a new independent vector induction variable. If we can't
2217   // create the phi node, we will splat the scalar induction variable in each
2218   // loop iteration.
2219   if (!shouldScalarizeInstruction(EntryVal)) {
2220     createVectorIntOrFpInductionPHI(ID, Step, Start, EntryVal, Def, CastDef,
2221                                     State);
2222     Value *ScalarIV = CreateScalarIV(Step);
2223     // Create scalar steps that can be used by instructions we will later
2224     // scalarize. Note that the addition of the scalar steps will not increase
2225     // the number of instructions in the loop in the common case prior to
2226     // InstCombine. We will be trading one vector extract for each scalar step.
2227     buildScalarSteps(ScalarIV, Step, EntryVal, ID, Def, CastDef, State);
2228     return;
2229   }
2230 
2231   // All IV users are scalar instructions, so only emit a scalar IV, not a
2232   // vectorised IV. Except when we tail-fold, then the splat IV feeds the
2233   // predicate used by the masked loads/stores.
2234   Value *ScalarIV = CreateScalarIV(Step);
2235   if (!Cost->isScalarEpilogueAllowed())
2236     CreateSplatIV(ScalarIV, Step);
2237   buildScalarSteps(ScalarIV, Step, EntryVal, ID, Def, CastDef, State);
2238 }
2239 
2240 Value *InnerLoopVectorizer::getStepVector(Value *Val, int StartIdx, Value *Step,
2241                                           Instruction::BinaryOps BinOp) {
2242   // Create and check the types.
2243   auto *ValVTy = cast<FixedVectorType>(Val->getType());
2244   int VLen = ValVTy->getNumElements();
2245 
2246   Type *STy = Val->getType()->getScalarType();
2247   assert((STy->isIntegerTy() || STy->isFloatingPointTy()) &&
2248          "Induction Step must be an integer or FP");
2249   assert(Step->getType() == STy && "Step has wrong type");
2250 
2251   SmallVector<Constant *, 8> Indices;
2252 
2253   if (STy->isIntegerTy()) {
2254     // Create a vector of consecutive numbers from zero to VF.
2255     for (int i = 0; i < VLen; ++i)
2256       Indices.push_back(ConstantInt::get(STy, StartIdx + i));
2257 
2258     // Add the consecutive indices to the vector value.
2259     Constant *Cv = ConstantVector::get(Indices);
2260     assert(Cv->getType() == Val->getType() && "Invalid consecutive vec");
2261     Step = Builder.CreateVectorSplat(VLen, Step);
2262     assert(Step->getType() == Val->getType() && "Invalid step vec");
2263     // FIXME: The newly created binary instructions should contain nsw/nuw flags,
2264     // which can be found from the original scalar operations.
2265     Step = Builder.CreateMul(Cv, Step);
2266     return Builder.CreateAdd(Val, Step, "induction");
2267   }
2268 
2269   // Floating point induction.
2270   assert((BinOp == Instruction::FAdd || BinOp == Instruction::FSub) &&
2271          "Binary Opcode should be specified for FP induction");
2272   // Create a vector of consecutive numbers from zero to VF.
2273   for (int i = 0; i < VLen; ++i)
2274     Indices.push_back(ConstantFP::get(STy, (double)(StartIdx + i)));
2275 
2276   // Add the consecutive indices to the vector value.
2277   Constant *Cv = ConstantVector::get(Indices);
2278 
2279   Step = Builder.CreateVectorSplat(VLen, Step);
2280 
2281   // Floating point operations had to be 'fast' to enable the induction.
2282   FastMathFlags Flags;
2283   Flags.setFast();
2284 
2285   Value *MulOp = Builder.CreateFMul(Cv, Step);
2286   if (isa<Instruction>(MulOp))
2287     // Have to check, MulOp may be a constant
2288     cast<Instruction>(MulOp)->setFastMathFlags(Flags);
2289 
2290   Value *BOp = Builder.CreateBinOp(BinOp, Val, MulOp, "induction");
2291   if (isa<Instruction>(BOp))
2292     cast<Instruction>(BOp)->setFastMathFlags(Flags);
2293   return BOp;
2294 }
2295 
2296 void InnerLoopVectorizer::buildScalarSteps(Value *ScalarIV, Value *Step,
2297                                            Instruction *EntryVal,
2298                                            const InductionDescriptor &ID,
2299                                            VPValue *Def, VPValue *CastDef,
2300                                            VPTransformState &State) {
2301   // We shouldn't have to build scalar steps if we aren't vectorizing.
2302   assert(VF.isVector() && "VF should be greater than one");
2303   // Get the value type and ensure it and the step have the same integer type.
2304   Type *ScalarIVTy = ScalarIV->getType()->getScalarType();
2305   assert(ScalarIVTy == Step->getType() &&
2306          "Val and Step should have the same type");
2307 
2308   // We build scalar steps for both integer and floating-point induction
2309   // variables. Here, we determine the kind of arithmetic we will perform.
2310   Instruction::BinaryOps AddOp;
2311   Instruction::BinaryOps MulOp;
2312   if (ScalarIVTy->isIntegerTy()) {
2313     AddOp = Instruction::Add;
2314     MulOp = Instruction::Mul;
2315   } else {
2316     AddOp = ID.getInductionOpcode();
2317     MulOp = Instruction::FMul;
2318   }
2319 
2320   // Determine the number of scalars we need to generate for each unroll
2321   // iteration. If EntryVal is uniform, we only need to generate the first
2322   // lane. Otherwise, we generate all VF values.
2323   unsigned Lanes =
2324       Cost->isUniformAfterVectorization(cast<Instruction>(EntryVal), VF)
2325           ? 1
2326           : VF.getKnownMinValue();
2327   assert((!VF.isScalable() || Lanes == 1) &&
2328          "Should never scalarize a scalable vector");
2329   // Compute the scalar steps and save the results in State.
2330   for (unsigned Part = 0; Part < UF; ++Part) {
2331     for (unsigned Lane = 0; Lane < Lanes; ++Lane) {
2332       auto *IntStepTy = IntegerType::get(ScalarIVTy->getContext(),
2333                                          ScalarIVTy->getScalarSizeInBits());
2334       Value *StartIdx =
2335           createStepForVF(Builder, ConstantInt::get(IntStepTy, Part), VF);
2336       if (ScalarIVTy->isFloatingPointTy())
2337         StartIdx = Builder.CreateSIToFP(StartIdx, ScalarIVTy);
2338       StartIdx = addFastMathFlag(Builder.CreateBinOp(
2339           AddOp, StartIdx, getSignedIntOrFpConstant(ScalarIVTy, Lane)));
2340       // The step returned by `createStepForVF` is a runtime-evaluated value
2341       // when VF is scalable. Otherwise, it should be folded into a Constant.
2342       assert((VF.isScalable() || isa<Constant>(StartIdx)) &&
2343              "Expected StartIdx to be folded to a constant when VF is not "
2344              "scalable");
2345       auto *Mul = addFastMathFlag(Builder.CreateBinOp(MulOp, StartIdx, Step));
2346       auto *Add = addFastMathFlag(Builder.CreateBinOp(AddOp, ScalarIV, Mul));
2347       State.set(Def, Add, VPIteration(Part, Lane));
2348       recordVectorLoopValueForInductionCast(ID, EntryVal, Add, CastDef, State,
2349                                             Part, Lane);
2350     }
2351   }
2352 }
2353 
2354 void InnerLoopVectorizer::packScalarIntoVectorValue(VPValue *Def,
2355                                                     const VPIteration &Instance,
2356                                                     VPTransformState &State) {
2357   Value *ScalarInst = State.get(Def, Instance);
2358   Value *VectorValue = State.get(Def, Instance.Part);
2359   VectorValue = Builder.CreateInsertElement(
2360       VectorValue, ScalarInst, State.Builder.getInt32(Instance.Lane));
2361   State.set(Def, VectorValue, Instance.Part);
2362 }
2363 
2364 Value *InnerLoopVectorizer::reverseVector(Value *Vec) {
2365   assert(Vec->getType()->isVectorTy() && "Invalid type");
2366   assert(!VF.isScalable() && "Cannot reverse scalable vectors");
2367   SmallVector<int, 8> ShuffleMask;
2368   for (unsigned i = 0; i < VF.getKnownMinValue(); ++i)
2369     ShuffleMask.push_back(VF.getKnownMinValue() - i - 1);
2370 
2371   return Builder.CreateShuffleVector(Vec, ShuffleMask, "reverse");
2372 }
2373 
2374 // Return whether we allow using masked interleave-groups (for dealing with
2375 // strided loads/stores that reside in predicated blocks, or for dealing
2376 // with gaps).
2377 static bool useMaskedInterleavedAccesses(const TargetTransformInfo &TTI) {
2378   // If an override option has been passed in for interleaved accesses, use it.
2379   if (EnableMaskedInterleavedMemAccesses.getNumOccurrences() > 0)
2380     return EnableMaskedInterleavedMemAccesses;
2381 
2382   return TTI.enableMaskedInterleavedAccessVectorization();
2383 }
2384 
2385 // Try to vectorize the interleave group that \p Instr belongs to.
2386 //
2387 // E.g. Translate following interleaved load group (factor = 3):
2388 //   for (i = 0; i < N; i+=3) {
2389 //     R = Pic[i];             // Member of index 0
2390 //     G = Pic[i+1];           // Member of index 1
2391 //     B = Pic[i+2];           // Member of index 2
2392 //     ... // do something to R, G, B
2393 //   }
2394 // To:
2395 //   %wide.vec = load <12 x i32>                       ; Read 4 tuples of R,G,B
2396 //   %R.vec = shuffle %wide.vec, poison, <0, 3, 6, 9>   ; R elements
2397 //   %G.vec = shuffle %wide.vec, poison, <1, 4, 7, 10>  ; G elements
2398 //   %B.vec = shuffle %wide.vec, poison, <2, 5, 8, 11>  ; B elements
2399 //
2400 // Or translate following interleaved store group (factor = 3):
2401 //   for (i = 0; i < N; i+=3) {
2402 //     ... do something to R, G, B
2403 //     Pic[i]   = R;           // Member of index 0
2404 //     Pic[i+1] = G;           // Member of index 1
2405 //     Pic[i+2] = B;           // Member of index 2
2406 //   }
2407 // To:
2408 //   %R_G.vec = shuffle %R.vec, %G.vec, <0, 1, 2, ..., 7>
2409 //   %B_U.vec = shuffle %B.vec, poison, <0, 1, 2, 3, u, u, u, u>
2410 //   %interleaved.vec = shuffle %R_G.vec, %B_U.vec,
2411 //        <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11>    ; Interleave R,G,B elements
2412 //   store <12 x i32> %interleaved.vec              ; Write 4 tuples of R,G,B
2413 void InnerLoopVectorizer::vectorizeInterleaveGroup(
2414     const InterleaveGroup<Instruction> *Group, ArrayRef<VPValue *> VPDefs,
2415     VPTransformState &State, VPValue *Addr, ArrayRef<VPValue *> StoredValues,
2416     VPValue *BlockInMask) {
2417   Instruction *Instr = Group->getInsertPos();
2418   const DataLayout &DL = Instr->getModule()->getDataLayout();
2419 
2420   // Prepare for the vector type of the interleaved load/store.
2421   Type *ScalarTy = getMemInstValueType(Instr);
2422   unsigned InterleaveFactor = Group->getFactor();
2423   assert(!VF.isScalable() && "scalable vectors not yet supported.");
2424   auto *VecTy = VectorType::get(ScalarTy, VF * InterleaveFactor);
2425 
2426   // Prepare for the new pointers.
2427   SmallVector<Value *, 2> AddrParts;
2428   unsigned Index = Group->getIndex(Instr);
2429 
2430   // TODO: extend the masked interleaved-group support to reversed access.
2431   assert((!BlockInMask || !Group->isReverse()) &&
2432          "Reversed masked interleave-group not supported.");
2433 
2434   // If the group is reverse, adjust the index to refer to the last vector lane
2435   // instead of the first. We adjust the index from the first vector lane,
2436   // rather than directly getting the pointer for lane VF - 1, because the
2437   // pointer operand of the interleaved access is supposed to be uniform. For
2438   // uniform instructions, we're only required to generate a value for the
2439   // first vector lane in each unroll iteration.
2440   assert(!VF.isScalable() &&
2441          "scalable vector reverse operation is not implemented");
2442   if (Group->isReverse())
2443     Index += (VF.getKnownMinValue() - 1) * Group->getFactor();
2444 
2445   for (unsigned Part = 0; Part < UF; Part++) {
2446     Value *AddrPart = State.get(Addr, VPIteration(Part, 0));
2447     setDebugLocFromInst(Builder, AddrPart);
2448 
2449     // Notice current instruction could be any index. Need to adjust the address
2450     // to the member of index 0.
2451     //
2452     // E.g.  a = A[i+1];     // Member of index 1 (Current instruction)
2453     //       b = A[i];       // Member of index 0
2454     // Current pointer is pointed to A[i+1], adjust it to A[i].
2455     //
2456     // E.g.  A[i+1] = a;     // Member of index 1
2457     //       A[i]   = b;     // Member of index 0
2458     //       A[i+2] = c;     // Member of index 2 (Current instruction)
2459     // Current pointer is pointed to A[i+2], adjust it to A[i].
2460 
2461     bool InBounds = false;
2462     if (auto *gep = dyn_cast<GetElementPtrInst>(AddrPart->stripPointerCasts()))
2463       InBounds = gep->isInBounds();
2464     AddrPart = Builder.CreateGEP(ScalarTy, AddrPart, Builder.getInt32(-Index));
2465     cast<GetElementPtrInst>(AddrPart)->setIsInBounds(InBounds);
2466 
2467     // Cast to the vector pointer type.
2468     unsigned AddressSpace = AddrPart->getType()->getPointerAddressSpace();
2469     Type *PtrTy = VecTy->getPointerTo(AddressSpace);
2470     AddrParts.push_back(Builder.CreateBitCast(AddrPart, PtrTy));
2471   }
2472 
2473   setDebugLocFromInst(Builder, Instr);
2474   Value *PoisonVec = PoisonValue::get(VecTy);
2475 
2476   Value *MaskForGaps = nullptr;
2477   if (Group->requiresScalarEpilogue() && !Cost->isScalarEpilogueAllowed()) {
2478     assert(!VF.isScalable() && "scalable vectors not yet supported.");
2479     MaskForGaps = createBitMaskForGaps(Builder, VF.getKnownMinValue(), *Group);
2480     assert(MaskForGaps && "Mask for Gaps is required but it is null");
2481   }
2482 
2483   // Vectorize the interleaved load group.
2484   if (isa<LoadInst>(Instr)) {
2485     // For each unroll part, create a wide load for the group.
2486     SmallVector<Value *, 2> NewLoads;
2487     for (unsigned Part = 0; Part < UF; Part++) {
2488       Instruction *NewLoad;
2489       if (BlockInMask || MaskForGaps) {
2490         assert(useMaskedInterleavedAccesses(*TTI) &&
2491                "masked interleaved groups are not allowed.");
2492         Value *GroupMask = MaskForGaps;
2493         if (BlockInMask) {
2494           Value *BlockInMaskPart = State.get(BlockInMask, Part);
2495           assert(!VF.isScalable() && "scalable vectors not yet supported.");
2496           Value *ShuffledMask = Builder.CreateShuffleVector(
2497               BlockInMaskPart,
2498               createReplicatedMask(InterleaveFactor, VF.getKnownMinValue()),
2499               "interleaved.mask");
2500           GroupMask = MaskForGaps
2501                           ? Builder.CreateBinOp(Instruction::And, ShuffledMask,
2502                                                 MaskForGaps)
2503                           : ShuffledMask;
2504         }
2505         NewLoad =
2506             Builder.CreateMaskedLoad(AddrParts[Part], Group->getAlign(),
2507                                      GroupMask, PoisonVec, "wide.masked.vec");
2508       }
2509       else
2510         NewLoad = Builder.CreateAlignedLoad(VecTy, AddrParts[Part],
2511                                             Group->getAlign(), "wide.vec");
2512       Group->addMetadata(NewLoad);
2513       NewLoads.push_back(NewLoad);
2514     }
2515 
2516     // For each member in the group, shuffle out the appropriate data from the
2517     // wide loads.
2518     unsigned J = 0;
2519     for (unsigned I = 0; I < InterleaveFactor; ++I) {
2520       Instruction *Member = Group->getMember(I);
2521 
2522       // Skip the gaps in the group.
2523       if (!Member)
2524         continue;
2525 
2526       assert(!VF.isScalable() && "scalable vectors not yet supported.");
2527       auto StrideMask =
2528           createStrideMask(I, InterleaveFactor, VF.getKnownMinValue());
2529       for (unsigned Part = 0; Part < UF; Part++) {
2530         Value *StridedVec = Builder.CreateShuffleVector(
2531             NewLoads[Part], StrideMask, "strided.vec");
2532 
2533         // If this member has different type, cast the result type.
2534         if (Member->getType() != ScalarTy) {
2535           assert(!VF.isScalable() && "VF is assumed to be non scalable.");
2536           VectorType *OtherVTy = VectorType::get(Member->getType(), VF);
2537           StridedVec = createBitOrPointerCast(StridedVec, OtherVTy, DL);
2538         }
2539 
2540         if (Group->isReverse())
2541           StridedVec = reverseVector(StridedVec);
2542 
2543         State.set(VPDefs[J], StridedVec, Part);
2544       }
2545       ++J;
2546     }
2547     return;
2548   }
2549 
2550   // The sub vector type for current instruction.
2551   assert(!VF.isScalable() && "VF is assumed to be non scalable.");
2552   auto *SubVT = VectorType::get(ScalarTy, VF);
2553 
2554   // Vectorize the interleaved store group.
2555   for (unsigned Part = 0; Part < UF; Part++) {
2556     // Collect the stored vector from each member.
2557     SmallVector<Value *, 4> StoredVecs;
2558     for (unsigned i = 0; i < InterleaveFactor; i++) {
2559       // Interleaved store group doesn't allow a gap, so each index has a member
2560       assert(Group->getMember(i) && "Fail to get a member from an interleaved store group");
2561 
2562       Value *StoredVec = State.get(StoredValues[i], Part);
2563 
2564       if (Group->isReverse())
2565         StoredVec = reverseVector(StoredVec);
2566 
2567       // If this member has different type, cast it to a unified type.
2568 
2569       if (StoredVec->getType() != SubVT)
2570         StoredVec = createBitOrPointerCast(StoredVec, SubVT, DL);
2571 
2572       StoredVecs.push_back(StoredVec);
2573     }
2574 
2575     // Concatenate all vectors into a wide vector.
2576     Value *WideVec = concatenateVectors(Builder, StoredVecs);
2577 
2578     // Interleave the elements in the wide vector.
2579     assert(!VF.isScalable() && "scalable vectors not yet supported.");
2580     Value *IVec = Builder.CreateShuffleVector(
2581         WideVec, createInterleaveMask(VF.getKnownMinValue(), InterleaveFactor),
2582         "interleaved.vec");
2583 
2584     Instruction *NewStoreInstr;
2585     if (BlockInMask) {
2586       Value *BlockInMaskPart = State.get(BlockInMask, Part);
2587       Value *ShuffledMask = Builder.CreateShuffleVector(
2588           BlockInMaskPart,
2589           createReplicatedMask(InterleaveFactor, VF.getKnownMinValue()),
2590           "interleaved.mask");
2591       NewStoreInstr = Builder.CreateMaskedStore(
2592           IVec, AddrParts[Part], Group->getAlign(), ShuffledMask);
2593     }
2594     else
2595       NewStoreInstr =
2596           Builder.CreateAlignedStore(IVec, AddrParts[Part], Group->getAlign());
2597 
2598     Group->addMetadata(NewStoreInstr);
2599   }
2600 }
2601 
2602 void InnerLoopVectorizer::vectorizeMemoryInstruction(
2603     Instruction *Instr, VPTransformState &State, VPValue *Def, VPValue *Addr,
2604     VPValue *StoredValue, VPValue *BlockInMask) {
2605   // Attempt to issue a wide load.
2606   LoadInst *LI = dyn_cast<LoadInst>(Instr);
2607   StoreInst *SI = dyn_cast<StoreInst>(Instr);
2608 
2609   assert((LI || SI) && "Invalid Load/Store instruction");
2610   assert((!SI || StoredValue) && "No stored value provided for widened store");
2611   assert((!LI || !StoredValue) && "Stored value provided for widened load");
2612 
2613   LoopVectorizationCostModel::InstWidening Decision =
2614       Cost->getWideningDecision(Instr, VF);
2615   assert((Decision == LoopVectorizationCostModel::CM_Widen ||
2616           Decision == LoopVectorizationCostModel::CM_Widen_Reverse ||
2617           Decision == LoopVectorizationCostModel::CM_GatherScatter) &&
2618          "CM decision is not to widen the memory instruction");
2619 
2620   Type *ScalarDataTy = getMemInstValueType(Instr);
2621 
2622   auto *DataTy = VectorType::get(ScalarDataTy, VF);
2623   const Align Alignment = getLoadStoreAlignment(Instr);
2624 
2625   // Determine if the pointer operand of the access is either consecutive or
2626   // reverse consecutive.
2627   bool Reverse = (Decision == LoopVectorizationCostModel::CM_Widen_Reverse);
2628   bool ConsecutiveStride =
2629       Reverse || (Decision == LoopVectorizationCostModel::CM_Widen);
2630   bool CreateGatherScatter =
2631       (Decision == LoopVectorizationCostModel::CM_GatherScatter);
2632 
2633   // Either Ptr feeds a vector load/store, or a vector GEP should feed a vector
2634   // gather/scatter. Otherwise Decision should have been to Scalarize.
2635   assert((ConsecutiveStride || CreateGatherScatter) &&
2636          "The instruction should be scalarized");
2637   (void)ConsecutiveStride;
2638 
2639   VectorParts BlockInMaskParts(UF);
2640   bool isMaskRequired = BlockInMask;
2641   if (isMaskRequired)
2642     for (unsigned Part = 0; Part < UF; ++Part)
2643       BlockInMaskParts[Part] = State.get(BlockInMask, Part);
2644 
2645   const auto CreateVecPtr = [&](unsigned Part, Value *Ptr) -> Value * {
2646     // Calculate the pointer for the specific unroll-part.
2647     GetElementPtrInst *PartPtr = nullptr;
2648 
2649     bool InBounds = false;
2650     if (auto *gep = dyn_cast<GetElementPtrInst>(Ptr->stripPointerCasts()))
2651       InBounds = gep->isInBounds();
2652 
2653     if (Reverse) {
2654       assert(!VF.isScalable() &&
2655              "Reversing vectors is not yet supported for scalable vectors.");
2656 
2657       // If the address is consecutive but reversed, then the
2658       // wide store needs to start at the last vector element.
2659       PartPtr = cast<GetElementPtrInst>(Builder.CreateGEP(
2660           ScalarDataTy, Ptr, Builder.getInt32(-Part * VF.getKnownMinValue())));
2661       PartPtr->setIsInBounds(InBounds);
2662       PartPtr = cast<GetElementPtrInst>(Builder.CreateGEP(
2663           ScalarDataTy, PartPtr, Builder.getInt32(1 - VF.getKnownMinValue())));
2664       PartPtr->setIsInBounds(InBounds);
2665       if (isMaskRequired) // Reverse of a null all-one mask is a null mask.
2666         BlockInMaskParts[Part] = reverseVector(BlockInMaskParts[Part]);
2667     } else {
2668       Value *Increment = createStepForVF(Builder, Builder.getInt32(Part), VF);
2669       PartPtr = cast<GetElementPtrInst>(
2670           Builder.CreateGEP(ScalarDataTy, Ptr, Increment));
2671       PartPtr->setIsInBounds(InBounds);
2672     }
2673 
2674     unsigned AddressSpace = Ptr->getType()->getPointerAddressSpace();
2675     return Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace));
2676   };
2677 
2678   // Handle Stores:
2679   if (SI) {
2680     setDebugLocFromInst(Builder, SI);
2681 
2682     for (unsigned Part = 0; Part < UF; ++Part) {
2683       Instruction *NewSI = nullptr;
2684       Value *StoredVal = State.get(StoredValue, Part);
2685       if (CreateGatherScatter) {
2686         Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr;
2687         Value *VectorGep = State.get(Addr, Part);
2688         NewSI = Builder.CreateMaskedScatter(StoredVal, VectorGep, Alignment,
2689                                             MaskPart);
2690       } else {
2691         if (Reverse) {
2692           // If we store to reverse consecutive memory locations, then we need
2693           // to reverse the order of elements in the stored value.
2694           StoredVal = reverseVector(StoredVal);
2695           // We don't want to update the value in the map as it might be used in
2696           // another expression. So don't call resetVectorValue(StoredVal).
2697         }
2698         auto *VecPtr = CreateVecPtr(Part, State.get(Addr, VPIteration(0, 0)));
2699         if (isMaskRequired)
2700           NewSI = Builder.CreateMaskedStore(StoredVal, VecPtr, Alignment,
2701                                             BlockInMaskParts[Part]);
2702         else
2703           NewSI = Builder.CreateAlignedStore(StoredVal, VecPtr, Alignment);
2704       }
2705       addMetadata(NewSI, SI);
2706     }
2707     return;
2708   }
2709 
2710   // Handle loads.
2711   assert(LI && "Must have a load instruction");
2712   setDebugLocFromInst(Builder, LI);
2713   for (unsigned Part = 0; Part < UF; ++Part) {
2714     Value *NewLI;
2715     if (CreateGatherScatter) {
2716       Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr;
2717       Value *VectorGep = State.get(Addr, Part);
2718       NewLI = Builder.CreateMaskedGather(VectorGep, Alignment, MaskPart,
2719                                          nullptr, "wide.masked.gather");
2720       addMetadata(NewLI, LI);
2721     } else {
2722       auto *VecPtr = CreateVecPtr(Part, State.get(Addr, VPIteration(0, 0)));
2723       if (isMaskRequired)
2724         NewLI = Builder.CreateMaskedLoad(
2725             VecPtr, Alignment, BlockInMaskParts[Part], PoisonValue::get(DataTy),
2726             "wide.masked.load");
2727       else
2728         NewLI =
2729             Builder.CreateAlignedLoad(DataTy, VecPtr, Alignment, "wide.load");
2730 
2731       // Add metadata to the load, but setVectorValue to the reverse shuffle.
2732       addMetadata(NewLI, LI);
2733       if (Reverse)
2734         NewLI = reverseVector(NewLI);
2735     }
2736 
2737     State.set(Def, NewLI, Part);
2738   }
2739 }
2740 
2741 void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr, VPValue *Def,
2742                                                VPUser &User,
2743                                                const VPIteration &Instance,
2744                                                bool IfPredicateInstr,
2745                                                VPTransformState &State) {
2746   assert(!Instr->getType()->isAggregateType() && "Can't handle vectors");
2747 
2748   // llvm.experimental.noalias.scope.decl intrinsics must only be duplicated for
2749   // the first lane and part.
2750   if (isa<NoAliasScopeDeclInst>(Instr))
2751     if (!Instance.isFirstIteration())
2752       return;
2753 
2754   setDebugLocFromInst(Builder, Instr);
2755 
2756   // Does this instruction return a value ?
2757   bool IsVoidRetTy = Instr->getType()->isVoidTy();
2758 
2759   Instruction *Cloned = Instr->clone();
2760   if (!IsVoidRetTy)
2761     Cloned->setName(Instr->getName() + ".cloned");
2762 
2763   State.Builder.SetInsertPoint(Builder.GetInsertBlock(),
2764                                Builder.GetInsertPoint());
2765   // Replace the operands of the cloned instructions with their scalar
2766   // equivalents in the new loop.
2767   for (unsigned op = 0, e = User.getNumOperands(); op != e; ++op) {
2768     auto *Operand = dyn_cast<Instruction>(Instr->getOperand(op));
2769     auto InputInstance = Instance;
2770     if (!Operand || !OrigLoop->contains(Operand) ||
2771         (Cost->isUniformAfterVectorization(Operand, State.VF)))
2772       InputInstance.Lane = 0;
2773     auto *NewOp = State.get(User.getOperand(op), InputInstance);
2774     Cloned->setOperand(op, NewOp);
2775   }
2776   addNewMetadata(Cloned, Instr);
2777 
2778   // Place the cloned scalar in the new loop.
2779   Builder.Insert(Cloned);
2780 
2781   State.set(Def, Cloned, Instance);
2782 
2783   // If we just cloned a new assumption, add it the assumption cache.
2784   if (auto *II = dyn_cast<IntrinsicInst>(Cloned))
2785     if (II->getIntrinsicID() == Intrinsic::assume)
2786       AC->registerAssumption(II);
2787 
2788   // End if-block.
2789   if (IfPredicateInstr)
2790     PredicatedInstructions.push_back(Cloned);
2791 }
2792 
2793 PHINode *InnerLoopVectorizer::createInductionVariable(Loop *L, Value *Start,
2794                                                       Value *End, Value *Step,
2795                                                       Instruction *DL) {
2796   BasicBlock *Header = L->getHeader();
2797   BasicBlock *Latch = L->getLoopLatch();
2798   // As we're just creating this loop, it's possible no latch exists
2799   // yet. If so, use the header as this will be a single block loop.
2800   if (!Latch)
2801     Latch = Header;
2802 
2803   IRBuilder<> Builder(&*Header->getFirstInsertionPt());
2804   Instruction *OldInst = getDebugLocFromInstOrOperands(OldInduction);
2805   setDebugLocFromInst(Builder, OldInst);
2806   auto *Induction = Builder.CreatePHI(Start->getType(), 2, "index");
2807 
2808   Builder.SetInsertPoint(Latch->getTerminator());
2809   setDebugLocFromInst(Builder, OldInst);
2810 
2811   // Create i+1 and fill the PHINode.
2812   Value *Next = Builder.CreateAdd(Induction, Step, "index.next");
2813   Induction->addIncoming(Start, L->getLoopPreheader());
2814   Induction->addIncoming(Next, Latch);
2815   // Create the compare.
2816   Value *ICmp = Builder.CreateICmpEQ(Next, End);
2817   Builder.CreateCondBr(ICmp, L->getUniqueExitBlock(), Header);
2818 
2819   // Now we have two terminators. Remove the old one from the block.
2820   Latch->getTerminator()->eraseFromParent();
2821 
2822   return Induction;
2823 }
2824 
2825 Value *InnerLoopVectorizer::getOrCreateTripCount(Loop *L) {
2826   if (TripCount)
2827     return TripCount;
2828 
2829   assert(L && "Create Trip Count for null loop.");
2830   IRBuilder<> Builder(L->getLoopPreheader()->getTerminator());
2831   // Find the loop boundaries.
2832   ScalarEvolution *SE = PSE.getSE();
2833   const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount();
2834   assert(!isa<SCEVCouldNotCompute>(BackedgeTakenCount) &&
2835          "Invalid loop count");
2836 
2837   Type *IdxTy = Legal->getWidestInductionType();
2838   assert(IdxTy && "No type for induction");
2839 
2840   // The exit count might have the type of i64 while the phi is i32. This can
2841   // happen if we have an induction variable that is sign extended before the
2842   // compare. The only way that we get a backedge taken count is that the
2843   // induction variable was signed and as such will not overflow. In such a case
2844   // truncation is legal.
2845   if (SE->getTypeSizeInBits(BackedgeTakenCount->getType()) >
2846       IdxTy->getPrimitiveSizeInBits())
2847     BackedgeTakenCount = SE->getTruncateOrNoop(BackedgeTakenCount, IdxTy);
2848   BackedgeTakenCount = SE->getNoopOrZeroExtend(BackedgeTakenCount, IdxTy);
2849 
2850   // Get the total trip count from the count by adding 1.
2851   const SCEV *ExitCount = SE->getAddExpr(
2852       BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType()));
2853 
2854   const DataLayout &DL = L->getHeader()->getModule()->getDataLayout();
2855 
2856   // Expand the trip count and place the new instructions in the preheader.
2857   // Notice that the pre-header does not change, only the loop body.
2858   SCEVExpander Exp(*SE, DL, "induction");
2859 
2860   // Count holds the overall loop count (N).
2861   TripCount = Exp.expandCodeFor(ExitCount, ExitCount->getType(),
2862                                 L->getLoopPreheader()->getTerminator());
2863 
2864   if (TripCount->getType()->isPointerTy())
2865     TripCount =
2866         CastInst::CreatePointerCast(TripCount, IdxTy, "exitcount.ptrcnt.to.int",
2867                                     L->getLoopPreheader()->getTerminator());
2868 
2869   return TripCount;
2870 }
2871 
2872 Value *InnerLoopVectorizer::getOrCreateVectorTripCount(Loop *L) {
2873   if (VectorTripCount)
2874     return VectorTripCount;
2875 
2876   Value *TC = getOrCreateTripCount(L);
2877   IRBuilder<> Builder(L->getLoopPreheader()->getTerminator());
2878 
2879   Type *Ty = TC->getType();
2880   // This is where we can make the step a runtime constant.
2881   Value *Step = createStepForVF(Builder, ConstantInt::get(Ty, UF), VF);
2882 
2883   // If the tail is to be folded by masking, round the number of iterations N
2884   // up to a multiple of Step instead of rounding down. This is done by first
2885   // adding Step-1 and then rounding down. Note that it's ok if this addition
2886   // overflows: the vector induction variable will eventually wrap to zero given
2887   // that it starts at zero and its Step is a power of two; the loop will then
2888   // exit, with the last early-exit vector comparison also producing all-true.
2889   if (Cost->foldTailByMasking()) {
2890     assert(isPowerOf2_32(VF.getKnownMinValue() * UF) &&
2891            "VF*UF must be a power of 2 when folding tail by masking");
2892     assert(!VF.isScalable() &&
2893            "Tail folding not yet supported for scalable vectors");
2894     TC = Builder.CreateAdd(
2895         TC, ConstantInt::get(Ty, VF.getKnownMinValue() * UF - 1), "n.rnd.up");
2896   }
2897 
2898   // Now we need to generate the expression for the part of the loop that the
2899   // vectorized body will execute. This is equal to N - (N % Step) if scalar
2900   // iterations are not required for correctness, or N - Step, otherwise. Step
2901   // is equal to the vectorization factor (number of SIMD elements) times the
2902   // unroll factor (number of SIMD instructions).
2903   Value *R = Builder.CreateURem(TC, Step, "n.mod.vf");
2904 
2905   // There are two cases where we need to ensure (at least) the last iteration
2906   // runs in the scalar remainder loop. Thus, if the step evenly divides
2907   // the trip count, we set the remainder to be equal to the step. If the step
2908   // does not evenly divide the trip count, no adjustment is necessary since
2909   // there will already be scalar iterations. Note that the minimum iterations
2910   // check ensures that N >= Step. The cases are:
2911   // 1) If there is a non-reversed interleaved group that may speculatively
2912   //    access memory out-of-bounds.
2913   // 2) If any instruction may follow a conditionally taken exit. That is, if
2914   //    the loop contains multiple exiting blocks, or a single exiting block
2915   //    which is not the latch.
2916   if (VF.isVector() && Cost->requiresScalarEpilogue()) {
2917     auto *IsZero = Builder.CreateICmpEQ(R, ConstantInt::get(R->getType(), 0));
2918     R = Builder.CreateSelect(IsZero, Step, R);
2919   }
2920 
2921   VectorTripCount = Builder.CreateSub(TC, R, "n.vec");
2922 
2923   return VectorTripCount;
2924 }
2925 
2926 Value *InnerLoopVectorizer::createBitOrPointerCast(Value *V, VectorType *DstVTy,
2927                                                    const DataLayout &DL) {
2928   // Verify that V is a vector type with same number of elements as DstVTy.
2929   auto *DstFVTy = cast<FixedVectorType>(DstVTy);
2930   unsigned VF = DstFVTy->getNumElements();
2931   auto *SrcVecTy = cast<FixedVectorType>(V->getType());
2932   assert((VF == SrcVecTy->getNumElements()) && "Vector dimensions do not match");
2933   Type *SrcElemTy = SrcVecTy->getElementType();
2934   Type *DstElemTy = DstFVTy->getElementType();
2935   assert((DL.getTypeSizeInBits(SrcElemTy) == DL.getTypeSizeInBits(DstElemTy)) &&
2936          "Vector elements must have same size");
2937 
2938   // Do a direct cast if element types are castable.
2939   if (CastInst::isBitOrNoopPointerCastable(SrcElemTy, DstElemTy, DL)) {
2940     return Builder.CreateBitOrPointerCast(V, DstFVTy);
2941   }
2942   // V cannot be directly casted to desired vector type.
2943   // May happen when V is a floating point vector but DstVTy is a vector of
2944   // pointers or vice-versa. Handle this using a two-step bitcast using an
2945   // intermediate Integer type for the bitcast i.e. Ptr <-> Int <-> Float.
2946   assert((DstElemTy->isPointerTy() != SrcElemTy->isPointerTy()) &&
2947          "Only one type should be a pointer type");
2948   assert((DstElemTy->isFloatingPointTy() != SrcElemTy->isFloatingPointTy()) &&
2949          "Only one type should be a floating point type");
2950   Type *IntTy =
2951       IntegerType::getIntNTy(V->getContext(), DL.getTypeSizeInBits(SrcElemTy));
2952   auto *VecIntTy = FixedVectorType::get(IntTy, VF);
2953   Value *CastVal = Builder.CreateBitOrPointerCast(V, VecIntTy);
2954   return Builder.CreateBitOrPointerCast(CastVal, DstFVTy);
2955 }
2956 
2957 void InnerLoopVectorizer::emitMinimumIterationCountCheck(Loop *L,
2958                                                          BasicBlock *Bypass) {
2959   Value *Count = getOrCreateTripCount(L);
2960   // Reuse existing vector loop preheader for TC checks.
2961   // Note that new preheader block is generated for vector loop.
2962   BasicBlock *const TCCheckBlock = LoopVectorPreHeader;
2963   IRBuilder<> Builder(TCCheckBlock->getTerminator());
2964 
2965   // Generate code to check if the loop's trip count is less than VF * UF, or
2966   // equal to it in case a scalar epilogue is required; this implies that the
2967   // vector trip count is zero. This check also covers the case where adding one
2968   // to the backedge-taken count overflowed leading to an incorrect trip count
2969   // of zero. In this case we will also jump to the scalar loop.
2970   auto P = Cost->requiresScalarEpilogue() ? ICmpInst::ICMP_ULE
2971                                           : ICmpInst::ICMP_ULT;
2972 
2973   // If tail is to be folded, vector loop takes care of all iterations.
2974   Value *CheckMinIters = Builder.getFalse();
2975   if (!Cost->foldTailByMasking()) {
2976     Value *Step =
2977         createStepForVF(Builder, ConstantInt::get(Count->getType(), UF), VF);
2978     CheckMinIters = Builder.CreateICmp(P, Count, Step, "min.iters.check");
2979   }
2980   // Create new preheader for vector loop.
2981   LoopVectorPreHeader =
2982       SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(), DT, LI, nullptr,
2983                  "vector.ph");
2984 
2985   assert(DT->properlyDominates(DT->getNode(TCCheckBlock),
2986                                DT->getNode(Bypass)->getIDom()) &&
2987          "TC check is expected to dominate Bypass");
2988 
2989   // Update dominator for Bypass & LoopExit.
2990   DT->changeImmediateDominator(Bypass, TCCheckBlock);
2991   DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock);
2992 
2993   ReplaceInstWithInst(
2994       TCCheckBlock->getTerminator(),
2995       BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters));
2996   LoopBypassBlocks.push_back(TCCheckBlock);
2997 }
2998 
2999 void InnerLoopVectorizer::emitSCEVChecks(Loop *L, BasicBlock *Bypass) {
3000   // Reuse existing vector loop preheader for SCEV checks.
3001   // Note that new preheader block is generated for vector loop.
3002   BasicBlock *const SCEVCheckBlock = LoopVectorPreHeader;
3003 
3004   // Generate the code to check that the SCEV assumptions that we made.
3005   // We want the new basic block to start at the first instruction in a
3006   // sequence of instructions that form a check.
3007   SCEVExpander Exp(*PSE.getSE(), Bypass->getModule()->getDataLayout(),
3008                    "scev.check");
3009   Value *SCEVCheck = Exp.expandCodeForPredicate(
3010       &PSE.getUnionPredicate(), SCEVCheckBlock->getTerminator());
3011 
3012   if (auto *C = dyn_cast<ConstantInt>(SCEVCheck))
3013     if (C->isZero())
3014       return;
3015 
3016   assert(!(SCEVCheckBlock->getParent()->hasOptSize() ||
3017            (OptForSizeBasedOnProfile &&
3018             Cost->Hints->getForce() != LoopVectorizeHints::FK_Enabled)) &&
3019          "Cannot SCEV check stride or overflow when optimizing for size");
3020 
3021   SCEVCheckBlock->setName("vector.scevcheck");
3022   // Create new preheader for vector loop.
3023   LoopVectorPreHeader =
3024       SplitBlock(SCEVCheckBlock, SCEVCheckBlock->getTerminator(), DT, LI,
3025                  nullptr, "vector.ph");
3026 
3027   // Update dominator only if this is first RT check.
3028   if (LoopBypassBlocks.empty()) {
3029     DT->changeImmediateDominator(Bypass, SCEVCheckBlock);
3030     DT->changeImmediateDominator(LoopExitBlock, SCEVCheckBlock);
3031   }
3032 
3033   ReplaceInstWithInst(
3034       SCEVCheckBlock->getTerminator(),
3035       BranchInst::Create(Bypass, LoopVectorPreHeader, SCEVCheck));
3036   LoopBypassBlocks.push_back(SCEVCheckBlock);
3037   AddedSafetyChecks = true;
3038 }
3039 
3040 void InnerLoopVectorizer::emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass) {
3041   // VPlan-native path does not do any analysis for runtime checks currently.
3042   if (EnableVPlanNativePath)
3043     return;
3044 
3045   // Reuse existing vector loop preheader for runtime memory checks.
3046   // Note that new preheader block is generated for vector loop.
3047   BasicBlock *const MemCheckBlock = L->getLoopPreheader();
3048 
3049   // Generate the code that checks in runtime if arrays overlap. We put the
3050   // checks into a separate block to make the more common case of few elements
3051   // faster.
3052   auto *LAI = Legal->getLAI();
3053   const auto &RtPtrChecking = *LAI->getRuntimePointerChecking();
3054   if (!RtPtrChecking.Need)
3055     return;
3056 
3057   if (MemCheckBlock->getParent()->hasOptSize() || OptForSizeBasedOnProfile) {
3058     assert(Cost->Hints->getForce() == LoopVectorizeHints::FK_Enabled &&
3059            "Cannot emit memory checks when optimizing for size, unless forced "
3060            "to vectorize.");
3061     ORE->emit([&]() {
3062       return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationCodeSize",
3063                                         L->getStartLoc(), L->getHeader())
3064              << "Code-size may be reduced by not forcing "
3065                 "vectorization, or by source-code modifications "
3066                 "eliminating the need for runtime checks "
3067                 "(e.g., adding 'restrict').";
3068     });
3069   }
3070 
3071   MemCheckBlock->setName("vector.memcheck");
3072   // Create new preheader for vector loop.
3073   LoopVectorPreHeader =
3074       SplitBlock(MemCheckBlock, MemCheckBlock->getTerminator(), DT, LI, nullptr,
3075                  "vector.ph");
3076 
3077   auto *CondBranch = cast<BranchInst>(
3078       Builder.CreateCondBr(Builder.getTrue(), Bypass, LoopVectorPreHeader));
3079   ReplaceInstWithInst(MemCheckBlock->getTerminator(), CondBranch);
3080   LoopBypassBlocks.push_back(MemCheckBlock);
3081   AddedSafetyChecks = true;
3082 
3083   // Update dominator only if this is first RT check.
3084   if (LoopBypassBlocks.empty()) {
3085     DT->changeImmediateDominator(Bypass, MemCheckBlock);
3086     DT->changeImmediateDominator(LoopExitBlock, MemCheckBlock);
3087   }
3088 
3089   Instruction *FirstCheckInst;
3090   Instruction *MemRuntimeCheck;
3091   SCEVExpander Exp(*PSE.getSE(), MemCheckBlock->getModule()->getDataLayout(),
3092                    "induction");
3093   std::tie(FirstCheckInst, MemRuntimeCheck) = addRuntimeChecks(
3094       MemCheckBlock->getTerminator(), OrigLoop, RtPtrChecking.getChecks(), Exp);
3095   assert(MemRuntimeCheck && "no RT checks generated although RtPtrChecking "
3096                             "claimed checks are required");
3097   CondBranch->setCondition(MemRuntimeCheck);
3098 
3099   // We currently don't use LoopVersioning for the actual loop cloning but we
3100   // still use it to add the noalias metadata.
3101   LVer = std::make_unique<LoopVersioning>(
3102       *Legal->getLAI(),
3103       Legal->getLAI()->getRuntimePointerChecking()->getChecks(), OrigLoop, LI,
3104       DT, PSE.getSE());
3105   LVer->prepareNoAliasMetadata();
3106 }
3107 
3108 Value *InnerLoopVectorizer::emitTransformedIndex(
3109     IRBuilder<> &B, Value *Index, ScalarEvolution *SE, const DataLayout &DL,
3110     const InductionDescriptor &ID) const {
3111 
3112   SCEVExpander Exp(*SE, DL, "induction");
3113   auto Step = ID.getStep();
3114   auto StartValue = ID.getStartValue();
3115   assert(Index->getType() == Step->getType() &&
3116          "Index type does not match StepValue type");
3117 
3118   // Note: the IR at this point is broken. We cannot use SE to create any new
3119   // SCEV and then expand it, hoping that SCEV's simplification will give us
3120   // a more optimal code. Unfortunately, attempt of doing so on invalid IR may
3121   // lead to various SCEV crashes. So all we can do is to use builder and rely
3122   // on InstCombine for future simplifications. Here we handle some trivial
3123   // cases only.
3124   auto CreateAdd = [&B](Value *X, Value *Y) {
3125     assert(X->getType() == Y->getType() && "Types don't match!");
3126     if (auto *CX = dyn_cast<ConstantInt>(X))
3127       if (CX->isZero())
3128         return Y;
3129     if (auto *CY = dyn_cast<ConstantInt>(Y))
3130       if (CY->isZero())
3131         return X;
3132     return B.CreateAdd(X, Y);
3133   };
3134 
3135   auto CreateMul = [&B](Value *X, Value *Y) {
3136     assert(X->getType() == Y->getType() && "Types don't match!");
3137     if (auto *CX = dyn_cast<ConstantInt>(X))
3138       if (CX->isOne())
3139         return Y;
3140     if (auto *CY = dyn_cast<ConstantInt>(Y))
3141       if (CY->isOne())
3142         return X;
3143     return B.CreateMul(X, Y);
3144   };
3145 
3146   // Get a suitable insert point for SCEV expansion. For blocks in the vector
3147   // loop, choose the end of the vector loop header (=LoopVectorBody), because
3148   // the DomTree is not kept up-to-date for additional blocks generated in the
3149   // vector loop. By using the header as insertion point, we guarantee that the
3150   // expanded instructions dominate all their uses.
3151   auto GetInsertPoint = [this, &B]() {
3152     BasicBlock *InsertBB = B.GetInsertPoint()->getParent();
3153     if (InsertBB != LoopVectorBody &&
3154         LI->getLoopFor(LoopVectorBody) == LI->getLoopFor(InsertBB))
3155       return LoopVectorBody->getTerminator();
3156     return &*B.GetInsertPoint();
3157   };
3158   switch (ID.getKind()) {
3159   case InductionDescriptor::IK_IntInduction: {
3160     assert(Index->getType() == StartValue->getType() &&
3161            "Index type does not match StartValue type");
3162     if (ID.getConstIntStepValue() && ID.getConstIntStepValue()->isMinusOne())
3163       return B.CreateSub(StartValue, Index);
3164     auto *Offset = CreateMul(
3165         Index, Exp.expandCodeFor(Step, Index->getType(), GetInsertPoint()));
3166     return CreateAdd(StartValue, Offset);
3167   }
3168   case InductionDescriptor::IK_PtrInduction: {
3169     assert(isa<SCEVConstant>(Step) &&
3170            "Expected constant step for pointer induction");
3171     return B.CreateGEP(
3172         StartValue->getType()->getPointerElementType(), StartValue,
3173         CreateMul(Index,
3174                   Exp.expandCodeFor(Step, Index->getType(), GetInsertPoint())));
3175   }
3176   case InductionDescriptor::IK_FpInduction: {
3177     assert(Step->getType()->isFloatingPointTy() && "Expected FP Step value");
3178     auto InductionBinOp = ID.getInductionBinOp();
3179     assert(InductionBinOp &&
3180            (InductionBinOp->getOpcode() == Instruction::FAdd ||
3181             InductionBinOp->getOpcode() == Instruction::FSub) &&
3182            "Original bin op should be defined for FP induction");
3183 
3184     Value *StepValue = cast<SCEVUnknown>(Step)->getValue();
3185 
3186     // Floating point operations had to be 'fast' to enable the induction.
3187     FastMathFlags Flags;
3188     Flags.setFast();
3189 
3190     Value *MulExp = B.CreateFMul(StepValue, Index);
3191     if (isa<Instruction>(MulExp))
3192       // We have to check, the MulExp may be a constant.
3193       cast<Instruction>(MulExp)->setFastMathFlags(Flags);
3194 
3195     Value *BOp = B.CreateBinOp(InductionBinOp->getOpcode(), StartValue, MulExp,
3196                                "induction");
3197     if (isa<Instruction>(BOp))
3198       cast<Instruction>(BOp)->setFastMathFlags(Flags);
3199 
3200     return BOp;
3201   }
3202   case InductionDescriptor::IK_NoInduction:
3203     return nullptr;
3204   }
3205   llvm_unreachable("invalid enum");
3206 }
3207 
3208 Loop *InnerLoopVectorizer::createVectorLoopSkeleton(StringRef Prefix) {
3209   LoopScalarBody = OrigLoop->getHeader();
3210   LoopVectorPreHeader = OrigLoop->getLoopPreheader();
3211   LoopExitBlock = OrigLoop->getUniqueExitBlock();
3212   assert(LoopExitBlock && "Must have an exit block");
3213   assert(LoopVectorPreHeader && "Invalid loop structure");
3214 
3215   LoopMiddleBlock =
3216       SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT,
3217                  LI, nullptr, Twine(Prefix) + "middle.block");
3218   LoopScalarPreHeader =
3219       SplitBlock(LoopMiddleBlock, LoopMiddleBlock->getTerminator(), DT, LI,
3220                  nullptr, Twine(Prefix) + "scalar.ph");
3221 
3222   // Set up branch from middle block to the exit and scalar preheader blocks.
3223   // completeLoopSkeleton will update the condition to use an iteration check,
3224   // if required to decide whether to execute the remainder.
3225   BranchInst *BrInst =
3226       BranchInst::Create(LoopExitBlock, LoopScalarPreHeader, Builder.getTrue());
3227   auto *ScalarLatchTerm = OrigLoop->getLoopLatch()->getTerminator();
3228   BrInst->setDebugLoc(ScalarLatchTerm->getDebugLoc());
3229   ReplaceInstWithInst(LoopMiddleBlock->getTerminator(), BrInst);
3230 
3231   // We intentionally don't let SplitBlock to update LoopInfo since
3232   // LoopVectorBody should belong to another loop than LoopVectorPreHeader.
3233   // LoopVectorBody is explicitly added to the correct place few lines later.
3234   LoopVectorBody =
3235       SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT,
3236                  nullptr, nullptr, Twine(Prefix) + "vector.body");
3237 
3238   // Update dominator for loop exit.
3239   DT->changeImmediateDominator(LoopExitBlock, LoopMiddleBlock);
3240 
3241   // Create and register the new vector loop.
3242   Loop *Lp = LI->AllocateLoop();
3243   Loop *ParentLoop = OrigLoop->getParentLoop();
3244 
3245   // Insert the new loop into the loop nest and register the new basic blocks
3246   // before calling any utilities such as SCEV that require valid LoopInfo.
3247   if (ParentLoop) {
3248     ParentLoop->addChildLoop(Lp);
3249   } else {
3250     LI->addTopLevelLoop(Lp);
3251   }
3252   Lp->addBasicBlockToLoop(LoopVectorBody, *LI);
3253   return Lp;
3254 }
3255 
3256 void InnerLoopVectorizer::createInductionResumeValues(
3257     Loop *L, Value *VectorTripCount,
3258     std::pair<BasicBlock *, Value *> AdditionalBypass) {
3259   assert(VectorTripCount && L && "Expected valid arguments");
3260   assert(((AdditionalBypass.first && AdditionalBypass.second) ||
3261           (!AdditionalBypass.first && !AdditionalBypass.second)) &&
3262          "Inconsistent information about additional bypass.");
3263   // We are going to resume the execution of the scalar loop.
3264   // Go over all of the induction variables that we found and fix the
3265   // PHIs that are left in the scalar version of the loop.
3266   // The starting values of PHI nodes depend on the counter of the last
3267   // iteration in the vectorized loop.
3268   // If we come from a bypass edge then we need to start from the original
3269   // start value.
3270   for (auto &InductionEntry : Legal->getInductionVars()) {
3271     PHINode *OrigPhi = InductionEntry.first;
3272     InductionDescriptor II = InductionEntry.second;
3273 
3274     // Create phi nodes to merge from the  backedge-taken check block.
3275     PHINode *BCResumeVal =
3276         PHINode::Create(OrigPhi->getType(), 3, "bc.resume.val",
3277                         LoopScalarPreHeader->getTerminator());
3278     // Copy original phi DL over to the new one.
3279     BCResumeVal->setDebugLoc(OrigPhi->getDebugLoc());
3280     Value *&EndValue = IVEndValues[OrigPhi];
3281     Value *EndValueFromAdditionalBypass = AdditionalBypass.second;
3282     if (OrigPhi == OldInduction) {
3283       // We know what the end value is.
3284       EndValue = VectorTripCount;
3285     } else {
3286       IRBuilder<> B(L->getLoopPreheader()->getTerminator());
3287       Type *StepType = II.getStep()->getType();
3288       Instruction::CastOps CastOp =
3289           CastInst::getCastOpcode(VectorTripCount, true, StepType, true);
3290       Value *CRD = B.CreateCast(CastOp, VectorTripCount, StepType, "cast.crd");
3291       const DataLayout &DL = LoopScalarBody->getModule()->getDataLayout();
3292       EndValue = emitTransformedIndex(B, CRD, PSE.getSE(), DL, II);
3293       EndValue->setName("ind.end");
3294 
3295       // Compute the end value for the additional bypass (if applicable).
3296       if (AdditionalBypass.first) {
3297         B.SetInsertPoint(&(*AdditionalBypass.first->getFirstInsertionPt()));
3298         CastOp = CastInst::getCastOpcode(AdditionalBypass.second, true,
3299                                          StepType, true);
3300         CRD =
3301             B.CreateCast(CastOp, AdditionalBypass.second, StepType, "cast.crd");
3302         EndValueFromAdditionalBypass =
3303             emitTransformedIndex(B, CRD, PSE.getSE(), DL, II);
3304         EndValueFromAdditionalBypass->setName("ind.end");
3305       }
3306     }
3307     // The new PHI merges the original incoming value, in case of a bypass,
3308     // or the value at the end of the vectorized loop.
3309     BCResumeVal->addIncoming(EndValue, LoopMiddleBlock);
3310 
3311     // Fix the scalar body counter (PHI node).
3312     // The old induction's phi node in the scalar body needs the truncated
3313     // value.
3314     for (BasicBlock *BB : LoopBypassBlocks)
3315       BCResumeVal->addIncoming(II.getStartValue(), BB);
3316 
3317     if (AdditionalBypass.first)
3318       BCResumeVal->setIncomingValueForBlock(AdditionalBypass.first,
3319                                             EndValueFromAdditionalBypass);
3320 
3321     OrigPhi->setIncomingValueForBlock(LoopScalarPreHeader, BCResumeVal);
3322   }
3323 }
3324 
3325 BasicBlock *InnerLoopVectorizer::completeLoopSkeleton(Loop *L,
3326                                                       MDNode *OrigLoopID) {
3327   assert(L && "Expected valid loop.");
3328 
3329   // The trip counts should be cached by now.
3330   Value *Count = getOrCreateTripCount(L);
3331   Value *VectorTripCount = getOrCreateVectorTripCount(L);
3332 
3333   auto *ScalarLatchTerm = OrigLoop->getLoopLatch()->getTerminator();
3334 
3335   // Add a check in the middle block to see if we have completed
3336   // all of the iterations in the first vector loop.
3337   // If (N - N%VF) == N, then we *don't* need to run the remainder.
3338   // If tail is to be folded, we know we don't need to run the remainder.
3339   if (!Cost->foldTailByMasking()) {
3340     Instruction *CmpN = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ,
3341                                         Count, VectorTripCount, "cmp.n",
3342                                         LoopMiddleBlock->getTerminator());
3343 
3344     // Here we use the same DebugLoc as the scalar loop latch terminator instead
3345     // of the corresponding compare because they may have ended up with
3346     // different line numbers and we want to avoid awkward line stepping while
3347     // debugging. Eg. if the compare has got a line number inside the loop.
3348     CmpN->setDebugLoc(ScalarLatchTerm->getDebugLoc());
3349     cast<BranchInst>(LoopMiddleBlock->getTerminator())->setCondition(CmpN);
3350   }
3351 
3352   // Get ready to start creating new instructions into the vectorized body.
3353   assert(LoopVectorPreHeader == L->getLoopPreheader() &&
3354          "Inconsistent vector loop preheader");
3355   Builder.SetInsertPoint(&*LoopVectorBody->getFirstInsertionPt());
3356 
3357   Optional<MDNode *> VectorizedLoopID =
3358       makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll,
3359                                       LLVMLoopVectorizeFollowupVectorized});
3360   if (VectorizedLoopID.hasValue()) {
3361     L->setLoopID(VectorizedLoopID.getValue());
3362 
3363     // Do not setAlreadyVectorized if loop attributes have been defined
3364     // explicitly.
3365     return LoopVectorPreHeader;
3366   }
3367 
3368   // Keep all loop hints from the original loop on the vector loop (we'll
3369   // replace the vectorizer-specific hints below).
3370   if (MDNode *LID = OrigLoop->getLoopID())
3371     L->setLoopID(LID);
3372 
3373   LoopVectorizeHints Hints(L, true, *ORE);
3374   Hints.setAlreadyVectorized();
3375 
3376 #ifdef EXPENSIVE_CHECKS
3377   assert(DT->verify(DominatorTree::VerificationLevel::Fast));
3378   LI->verify(*DT);
3379 #endif
3380 
3381   return LoopVectorPreHeader;
3382 }
3383 
3384 BasicBlock *InnerLoopVectorizer::createVectorizedLoopSkeleton() {
3385   /*
3386    In this function we generate a new loop. The new loop will contain
3387    the vectorized instructions while the old loop will continue to run the
3388    scalar remainder.
3389 
3390        [ ] <-- loop iteration number check.
3391     /   |
3392    /    v
3393   |    [ ] <-- vector loop bypass (may consist of multiple blocks).
3394   |  /  |
3395   | /   v
3396   ||   [ ]     <-- vector pre header.
3397   |/    |
3398   |     v
3399   |    [  ] \
3400   |    [  ]_|   <-- vector loop.
3401   |     |
3402   |     v
3403   |   -[ ]   <--- middle-block.
3404   |  /  |
3405   | /   v
3406   -|- >[ ]     <--- new preheader.
3407    |    |
3408    |    v
3409    |   [ ] \
3410    |   [ ]_|   <-- old scalar loop to handle remainder.
3411     \   |
3412      \  v
3413       >[ ]     <-- exit block.
3414    ...
3415    */
3416 
3417   // Get the metadata of the original loop before it gets modified.
3418   MDNode *OrigLoopID = OrigLoop->getLoopID();
3419 
3420   // Create an empty vector loop, and prepare basic blocks for the runtime
3421   // checks.
3422   Loop *Lp = createVectorLoopSkeleton("");
3423 
3424   // Now, compare the new count to zero. If it is zero skip the vector loop and
3425   // jump to the scalar loop. This check also covers the case where the
3426   // backedge-taken count is uint##_max: adding one to it will overflow leading
3427   // to an incorrect trip count of zero. In this (rare) case we will also jump
3428   // to the scalar loop.
3429   emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader);
3430 
3431   // Generate the code to check any assumptions that we've made for SCEV
3432   // expressions.
3433   emitSCEVChecks(Lp, LoopScalarPreHeader);
3434 
3435   // Generate the code that checks in runtime if arrays overlap. We put the
3436   // checks into a separate block to make the more common case of few elements
3437   // faster.
3438   emitMemRuntimeChecks(Lp, LoopScalarPreHeader);
3439 
3440   // Some loops have a single integer induction variable, while other loops
3441   // don't. One example is c++ iterators that often have multiple pointer
3442   // induction variables. In the code below we also support a case where we
3443   // don't have a single induction variable.
3444   //
3445   // We try to obtain an induction variable from the original loop as hard
3446   // as possible. However if we don't find one that:
3447   //   - is an integer
3448   //   - counts from zero, stepping by one
3449   //   - is the size of the widest induction variable type
3450   // then we create a new one.
3451   OldInduction = Legal->getPrimaryInduction();
3452   Type *IdxTy = Legal->getWidestInductionType();
3453   Value *StartIdx = ConstantInt::get(IdxTy, 0);
3454   // The loop step is equal to the vectorization factor (num of SIMD elements)
3455   // times the unroll factor (num of SIMD instructions).
3456   Builder.SetInsertPoint(&*Lp->getHeader()->getFirstInsertionPt());
3457   Value *Step = createStepForVF(Builder, ConstantInt::get(IdxTy, UF), VF);
3458   Value *CountRoundDown = getOrCreateVectorTripCount(Lp);
3459   Induction =
3460       createInductionVariable(Lp, StartIdx, CountRoundDown, Step,
3461                               getDebugLocFromInstOrOperands(OldInduction));
3462 
3463   // Emit phis for the new starting index of the scalar loop.
3464   createInductionResumeValues(Lp, CountRoundDown);
3465 
3466   return completeLoopSkeleton(Lp, OrigLoopID);
3467 }
3468 
3469 // Fix up external users of the induction variable. At this point, we are
3470 // in LCSSA form, with all external PHIs that use the IV having one input value,
3471 // coming from the remainder loop. We need those PHIs to also have a correct
3472 // value for the IV when arriving directly from the middle block.
3473 void InnerLoopVectorizer::fixupIVUsers(PHINode *OrigPhi,
3474                                        const InductionDescriptor &II,
3475                                        Value *CountRoundDown, Value *EndValue,
3476                                        BasicBlock *MiddleBlock) {
3477   // There are two kinds of external IV usages - those that use the value
3478   // computed in the last iteration (the PHI) and those that use the penultimate
3479   // value (the value that feeds into the phi from the loop latch).
3480   // We allow both, but they, obviously, have different values.
3481 
3482   assert(OrigLoop->getUniqueExitBlock() && "Expected a single exit block");
3483 
3484   DenseMap<Value *, Value *> MissingVals;
3485 
3486   // An external user of the last iteration's value should see the value that
3487   // the remainder loop uses to initialize its own IV.
3488   Value *PostInc = OrigPhi->getIncomingValueForBlock(OrigLoop->getLoopLatch());
3489   for (User *U : PostInc->users()) {
3490     Instruction *UI = cast<Instruction>(U);
3491     if (!OrigLoop->contains(UI)) {
3492       assert(isa<PHINode>(UI) && "Expected LCSSA form");
3493       MissingVals[UI] = EndValue;
3494     }
3495   }
3496 
3497   // An external user of the penultimate value need to see EndValue - Step.
3498   // The simplest way to get this is to recompute it from the constituent SCEVs,
3499   // that is Start + (Step * (CRD - 1)).
3500   for (User *U : OrigPhi->users()) {
3501     auto *UI = cast<Instruction>(U);
3502     if (!OrigLoop->contains(UI)) {
3503       const DataLayout &DL =
3504           OrigLoop->getHeader()->getModule()->getDataLayout();
3505       assert(isa<PHINode>(UI) && "Expected LCSSA form");
3506 
3507       IRBuilder<> B(MiddleBlock->getTerminator());
3508       Value *CountMinusOne = B.CreateSub(
3509           CountRoundDown, ConstantInt::get(CountRoundDown->getType(), 1));
3510       Value *CMO =
3511           !II.getStep()->getType()->isIntegerTy()
3512               ? B.CreateCast(Instruction::SIToFP, CountMinusOne,
3513                              II.getStep()->getType())
3514               : B.CreateSExtOrTrunc(CountMinusOne, II.getStep()->getType());
3515       CMO->setName("cast.cmo");
3516       Value *Escape = emitTransformedIndex(B, CMO, PSE.getSE(), DL, II);
3517       Escape->setName("ind.escape");
3518       MissingVals[UI] = Escape;
3519     }
3520   }
3521 
3522   for (auto &I : MissingVals) {
3523     PHINode *PHI = cast<PHINode>(I.first);
3524     // One corner case we have to handle is two IVs "chasing" each-other,
3525     // that is %IV2 = phi [...], [ %IV1, %latch ]
3526     // In this case, if IV1 has an external use, we need to avoid adding both
3527     // "last value of IV1" and "penultimate value of IV2". So, verify that we
3528     // don't already have an incoming value for the middle block.
3529     if (PHI->getBasicBlockIndex(MiddleBlock) == -1)
3530       PHI->addIncoming(I.second, MiddleBlock);
3531   }
3532 }
3533 
3534 namespace {
3535 
3536 struct CSEDenseMapInfo {
3537   static bool canHandle(const Instruction *I) {
3538     return isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) ||
3539            isa<ShuffleVectorInst>(I) || isa<GetElementPtrInst>(I);
3540   }
3541 
3542   static inline Instruction *getEmptyKey() {
3543     return DenseMapInfo<Instruction *>::getEmptyKey();
3544   }
3545 
3546   static inline Instruction *getTombstoneKey() {
3547     return DenseMapInfo<Instruction *>::getTombstoneKey();
3548   }
3549 
3550   static unsigned getHashValue(const Instruction *I) {
3551     assert(canHandle(I) && "Unknown instruction!");
3552     return hash_combine(I->getOpcode(), hash_combine_range(I->value_op_begin(),
3553                                                            I->value_op_end()));
3554   }
3555 
3556   static bool isEqual(const Instruction *LHS, const Instruction *RHS) {
3557     if (LHS == getEmptyKey() || RHS == getEmptyKey() ||
3558         LHS == getTombstoneKey() || RHS == getTombstoneKey())
3559       return LHS == RHS;
3560     return LHS->isIdenticalTo(RHS);
3561   }
3562 };
3563 
3564 } // end anonymous namespace
3565 
3566 ///Perform cse of induction variable instructions.
3567 static void cse(BasicBlock *BB) {
3568   // Perform simple cse.
3569   SmallDenseMap<Instruction *, Instruction *, 4, CSEDenseMapInfo> CSEMap;
3570   for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) {
3571     Instruction *In = &*I++;
3572 
3573     if (!CSEDenseMapInfo::canHandle(In))
3574       continue;
3575 
3576     // Check if we can replace this instruction with any of the
3577     // visited instructions.
3578     if (Instruction *V = CSEMap.lookup(In)) {
3579       In->replaceAllUsesWith(V);
3580       In->eraseFromParent();
3581       continue;
3582     }
3583 
3584     CSEMap[In] = In;
3585   }
3586 }
3587 
3588 InstructionCost
3589 LoopVectorizationCostModel::getVectorCallCost(CallInst *CI, ElementCount VF,
3590                                               bool &NeedToScalarize) {
3591   Function *F = CI->getCalledFunction();
3592   Type *ScalarRetTy = CI->getType();
3593   SmallVector<Type *, 4> Tys, ScalarTys;
3594   for (auto &ArgOp : CI->arg_operands())
3595     ScalarTys.push_back(ArgOp->getType());
3596 
3597   // Estimate cost of scalarized vector call. The source operands are assumed
3598   // to be vectors, so we need to extract individual elements from there,
3599   // execute VF scalar calls, and then gather the result into the vector return
3600   // value.
3601   InstructionCost ScalarCallCost =
3602       TTI.getCallInstrCost(F, ScalarRetTy, ScalarTys, TTI::TCK_RecipThroughput);
3603   if (VF.isScalar())
3604     return ScalarCallCost;
3605 
3606   // Compute corresponding vector type for return value and arguments.
3607   Type *RetTy = ToVectorTy(ScalarRetTy, VF);
3608   for (Type *ScalarTy : ScalarTys)
3609     Tys.push_back(ToVectorTy(ScalarTy, VF));
3610 
3611   // Compute costs of unpacking argument values for the scalar calls and
3612   // packing the return values to a vector.
3613   InstructionCost ScalarizationCost = getScalarizationOverhead(CI, VF);
3614 
3615   InstructionCost Cost =
3616       ScalarCallCost * VF.getKnownMinValue() + ScalarizationCost;
3617 
3618   // If we can't emit a vector call for this function, then the currently found
3619   // cost is the cost we need to return.
3620   NeedToScalarize = true;
3621   VFShape Shape = VFShape::get(*CI, VF, false /*HasGlobalPred*/);
3622   Function *VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape);
3623 
3624   if (!TLI || CI->isNoBuiltin() || !VecFunc)
3625     return Cost;
3626 
3627   // If the corresponding vector cost is cheaper, return its cost.
3628   InstructionCost VectorCallCost =
3629       TTI.getCallInstrCost(nullptr, RetTy, Tys, TTI::TCK_RecipThroughput);
3630   if (VectorCallCost < Cost) {
3631     NeedToScalarize = false;
3632     Cost = VectorCallCost;
3633   }
3634   return Cost;
3635 }
3636 
3637 InstructionCost
3638 LoopVectorizationCostModel::getVectorIntrinsicCost(CallInst *CI,
3639                                                    ElementCount VF) {
3640   Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
3641   assert(ID && "Expected intrinsic call!");
3642 
3643   IntrinsicCostAttributes CostAttrs(ID, *CI, VF);
3644   return TTI.getIntrinsicInstrCost(CostAttrs,
3645                                    TargetTransformInfo::TCK_RecipThroughput);
3646 }
3647 
3648 static Type *smallestIntegerVectorType(Type *T1, Type *T2) {
3649   auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType());
3650   auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType());
3651   return I1->getBitWidth() < I2->getBitWidth() ? T1 : T2;
3652 }
3653 
3654 static Type *largestIntegerVectorType(Type *T1, Type *T2) {
3655   auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType());
3656   auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType());
3657   return I1->getBitWidth() > I2->getBitWidth() ? T1 : T2;
3658 }
3659 
3660 void InnerLoopVectorizer::truncateToMinimalBitwidths(VPTransformState &State) {
3661   // For every instruction `I` in MinBWs, truncate the operands, create a
3662   // truncated version of `I` and reextend its result. InstCombine runs
3663   // later and will remove any ext/trunc pairs.
3664   SmallPtrSet<Value *, 4> Erased;
3665   for (const auto &KV : Cost->getMinimalBitwidths()) {
3666     // If the value wasn't vectorized, we must maintain the original scalar
3667     // type. The absence of the value from State indicates that it
3668     // wasn't vectorized.
3669     VPValue *Def = State.Plan->getVPValue(KV.first);
3670     if (!State.hasAnyVectorValue(Def))
3671       continue;
3672     for (unsigned Part = 0; Part < UF; ++Part) {
3673       Value *I = State.get(Def, Part);
3674       if (Erased.count(I) || I->use_empty() || !isa<Instruction>(I))
3675         continue;
3676       Type *OriginalTy = I->getType();
3677       Type *ScalarTruncatedTy =
3678           IntegerType::get(OriginalTy->getContext(), KV.second);
3679       auto *TruncatedTy = FixedVectorType::get(
3680           ScalarTruncatedTy,
3681           cast<FixedVectorType>(OriginalTy)->getNumElements());
3682       if (TruncatedTy == OriginalTy)
3683         continue;
3684 
3685       IRBuilder<> B(cast<Instruction>(I));
3686       auto ShrinkOperand = [&](Value *V) -> Value * {
3687         if (auto *ZI = dyn_cast<ZExtInst>(V))
3688           if (ZI->getSrcTy() == TruncatedTy)
3689             return ZI->getOperand(0);
3690         return B.CreateZExtOrTrunc(V, TruncatedTy);
3691       };
3692 
3693       // The actual instruction modification depends on the instruction type,
3694       // unfortunately.
3695       Value *NewI = nullptr;
3696       if (auto *BO = dyn_cast<BinaryOperator>(I)) {
3697         NewI = B.CreateBinOp(BO->getOpcode(), ShrinkOperand(BO->getOperand(0)),
3698                              ShrinkOperand(BO->getOperand(1)));
3699 
3700         // Any wrapping introduced by shrinking this operation shouldn't be
3701         // considered undefined behavior. So, we can't unconditionally copy
3702         // arithmetic wrapping flags to NewI.
3703         cast<BinaryOperator>(NewI)->copyIRFlags(I, /*IncludeWrapFlags=*/false);
3704       } else if (auto *CI = dyn_cast<ICmpInst>(I)) {
3705         NewI =
3706             B.CreateICmp(CI->getPredicate(), ShrinkOperand(CI->getOperand(0)),
3707                          ShrinkOperand(CI->getOperand(1)));
3708       } else if (auto *SI = dyn_cast<SelectInst>(I)) {
3709         NewI = B.CreateSelect(SI->getCondition(),
3710                               ShrinkOperand(SI->getTrueValue()),
3711                               ShrinkOperand(SI->getFalseValue()));
3712       } else if (auto *CI = dyn_cast<CastInst>(I)) {
3713         switch (CI->getOpcode()) {
3714         default:
3715           llvm_unreachable("Unhandled cast!");
3716         case Instruction::Trunc:
3717           NewI = ShrinkOperand(CI->getOperand(0));
3718           break;
3719         case Instruction::SExt:
3720           NewI = B.CreateSExtOrTrunc(
3721               CI->getOperand(0),
3722               smallestIntegerVectorType(OriginalTy, TruncatedTy));
3723           break;
3724         case Instruction::ZExt:
3725           NewI = B.CreateZExtOrTrunc(
3726               CI->getOperand(0),
3727               smallestIntegerVectorType(OriginalTy, TruncatedTy));
3728           break;
3729         }
3730       } else if (auto *SI = dyn_cast<ShuffleVectorInst>(I)) {
3731         auto Elements0 = cast<FixedVectorType>(SI->getOperand(0)->getType())
3732                              ->getNumElements();
3733         auto *O0 = B.CreateZExtOrTrunc(
3734             SI->getOperand(0),
3735             FixedVectorType::get(ScalarTruncatedTy, Elements0));
3736         auto Elements1 = cast<FixedVectorType>(SI->getOperand(1)->getType())
3737                              ->getNumElements();
3738         auto *O1 = B.CreateZExtOrTrunc(
3739             SI->getOperand(1),
3740             FixedVectorType::get(ScalarTruncatedTy, Elements1));
3741 
3742         NewI = B.CreateShuffleVector(O0, O1, SI->getShuffleMask());
3743       } else if (isa<LoadInst>(I) || isa<PHINode>(I)) {
3744         // Don't do anything with the operands, just extend the result.
3745         continue;
3746       } else if (auto *IE = dyn_cast<InsertElementInst>(I)) {
3747         auto Elements = cast<FixedVectorType>(IE->getOperand(0)->getType())
3748                             ->getNumElements();
3749         auto *O0 = B.CreateZExtOrTrunc(
3750             IE->getOperand(0),
3751             FixedVectorType::get(ScalarTruncatedTy, Elements));
3752         auto *O1 = B.CreateZExtOrTrunc(IE->getOperand(1), ScalarTruncatedTy);
3753         NewI = B.CreateInsertElement(O0, O1, IE->getOperand(2));
3754       } else if (auto *EE = dyn_cast<ExtractElementInst>(I)) {
3755         auto Elements = cast<FixedVectorType>(EE->getOperand(0)->getType())
3756                             ->getNumElements();
3757         auto *O0 = B.CreateZExtOrTrunc(
3758             EE->getOperand(0),
3759             FixedVectorType::get(ScalarTruncatedTy, Elements));
3760         NewI = B.CreateExtractElement(O0, EE->getOperand(2));
3761       } else {
3762         // If we don't know what to do, be conservative and don't do anything.
3763         continue;
3764       }
3765 
3766       // Lastly, extend the result.
3767       NewI->takeName(cast<Instruction>(I));
3768       Value *Res = B.CreateZExtOrTrunc(NewI, OriginalTy);
3769       I->replaceAllUsesWith(Res);
3770       cast<Instruction>(I)->eraseFromParent();
3771       Erased.insert(I);
3772       State.reset(Def, Res, Part);
3773     }
3774   }
3775 
3776   // We'll have created a bunch of ZExts that are now parentless. Clean up.
3777   for (const auto &KV : Cost->getMinimalBitwidths()) {
3778     // If the value wasn't vectorized, we must maintain the original scalar
3779     // type. The absence of the value from State indicates that it
3780     // wasn't vectorized.
3781     VPValue *Def = State.Plan->getVPValue(KV.first);
3782     if (!State.hasAnyVectorValue(Def))
3783       continue;
3784     for (unsigned Part = 0; Part < UF; ++Part) {
3785       Value *I = State.get(Def, Part);
3786       ZExtInst *Inst = dyn_cast<ZExtInst>(I);
3787       if (Inst && Inst->use_empty()) {
3788         Value *NewI = Inst->getOperand(0);
3789         Inst->eraseFromParent();
3790         State.reset(Def, NewI, Part);
3791       }
3792     }
3793   }
3794 }
3795 
3796 void InnerLoopVectorizer::fixVectorizedLoop(VPTransformState &State) {
3797   // Insert truncates and extends for any truncated instructions as hints to
3798   // InstCombine.
3799   if (VF.isVector())
3800     truncateToMinimalBitwidths(State);
3801 
3802   // Fix widened non-induction PHIs by setting up the PHI operands.
3803   if (OrigPHIsToFix.size()) {
3804     assert(EnableVPlanNativePath &&
3805            "Unexpected non-induction PHIs for fixup in non VPlan-native path");
3806     fixNonInductionPHIs(State);
3807   }
3808 
3809   // At this point every instruction in the original loop is widened to a
3810   // vector form. Now we need to fix the recurrences in the loop. These PHI
3811   // nodes are currently empty because we did not want to introduce cycles.
3812   // This is the second stage of vectorizing recurrences.
3813   fixCrossIterationPHIs(State);
3814 
3815   // Forget the original basic block.
3816   PSE.getSE()->forgetLoop(OrigLoop);
3817 
3818   // Fix-up external users of the induction variables.
3819   for (auto &Entry : Legal->getInductionVars())
3820     fixupIVUsers(Entry.first, Entry.second,
3821                  getOrCreateVectorTripCount(LI->getLoopFor(LoopVectorBody)),
3822                  IVEndValues[Entry.first], LoopMiddleBlock);
3823 
3824   fixLCSSAPHIs(State);
3825   for (Instruction *PI : PredicatedInstructions)
3826     sinkScalarOperands(&*PI);
3827 
3828   // Remove redundant induction instructions.
3829   cse(LoopVectorBody);
3830 
3831   // Set/update profile weights for the vector and remainder loops as original
3832   // loop iterations are now distributed among them. Note that original loop
3833   // represented by LoopScalarBody becomes remainder loop after vectorization.
3834   //
3835   // For cases like foldTailByMasking() and requiresScalarEpiloque() we may
3836   // end up getting slightly roughened result but that should be OK since
3837   // profile is not inherently precise anyway. Note also possible bypass of
3838   // vector code caused by legality checks is ignored, assigning all the weight
3839   // to the vector loop, optimistically.
3840   //
3841   // For scalable vectorization we can't know at compile time how many iterations
3842   // of the loop are handled in one vector iteration, so instead assume a pessimistic
3843   // vscale of '1'.
3844   setProfileInfoAfterUnrolling(
3845       LI->getLoopFor(LoopScalarBody), LI->getLoopFor(LoopVectorBody),
3846       LI->getLoopFor(LoopScalarBody), VF.getKnownMinValue() * UF);
3847 }
3848 
3849 void InnerLoopVectorizer::fixCrossIterationPHIs(VPTransformState &State) {
3850   // In order to support recurrences we need to be able to vectorize Phi nodes.
3851   // Phi nodes have cycles, so we need to vectorize them in two stages. This is
3852   // stage #2: We now need to fix the recurrences by adding incoming edges to
3853   // the currently empty PHI nodes. At this point every instruction in the
3854   // original loop is widened to a vector form so we can use them to construct
3855   // the incoming edges.
3856   for (PHINode &Phi : OrigLoop->getHeader()->phis()) {
3857     // Handle first-order recurrences and reductions that need to be fixed.
3858     if (Legal->isFirstOrderRecurrence(&Phi))
3859       fixFirstOrderRecurrence(&Phi, State);
3860     else if (Legal->isReductionVariable(&Phi))
3861       fixReduction(&Phi, State);
3862   }
3863 }
3864 
3865 void InnerLoopVectorizer::fixFirstOrderRecurrence(PHINode *Phi,
3866                                                   VPTransformState &State) {
3867   // This is the second phase of vectorizing first-order recurrences. An
3868   // overview of the transformation is described below. Suppose we have the
3869   // following loop.
3870   //
3871   //   for (int i = 0; i < n; ++i)
3872   //     b[i] = a[i] - a[i - 1];
3873   //
3874   // There is a first-order recurrence on "a". For this loop, the shorthand
3875   // scalar IR looks like:
3876   //
3877   //   scalar.ph:
3878   //     s_init = a[-1]
3879   //     br scalar.body
3880   //
3881   //   scalar.body:
3882   //     i = phi [0, scalar.ph], [i+1, scalar.body]
3883   //     s1 = phi [s_init, scalar.ph], [s2, scalar.body]
3884   //     s2 = a[i]
3885   //     b[i] = s2 - s1
3886   //     br cond, scalar.body, ...
3887   //
3888   // In this example, s1 is a recurrence because it's value depends on the
3889   // previous iteration. In the first phase of vectorization, we created a
3890   // temporary value for s1. We now complete the vectorization and produce the
3891   // shorthand vector IR shown below (for VF = 4, UF = 1).
3892   //
3893   //   vector.ph:
3894   //     v_init = vector(..., ..., ..., a[-1])
3895   //     br vector.body
3896   //
3897   //   vector.body
3898   //     i = phi [0, vector.ph], [i+4, vector.body]
3899   //     v1 = phi [v_init, vector.ph], [v2, vector.body]
3900   //     v2 = a[i, i+1, i+2, i+3];
3901   //     v3 = vector(v1(3), v2(0, 1, 2))
3902   //     b[i, i+1, i+2, i+3] = v2 - v3
3903   //     br cond, vector.body, middle.block
3904   //
3905   //   middle.block:
3906   //     x = v2(3)
3907   //     br scalar.ph
3908   //
3909   //   scalar.ph:
3910   //     s_init = phi [x, middle.block], [a[-1], otherwise]
3911   //     br scalar.body
3912   //
3913   // After execution completes the vector loop, we extract the next value of
3914   // the recurrence (x) to use as the initial value in the scalar loop.
3915 
3916   // Get the original loop preheader and single loop latch.
3917   auto *Preheader = OrigLoop->getLoopPreheader();
3918   auto *Latch = OrigLoop->getLoopLatch();
3919 
3920   // Get the initial and previous values of the scalar recurrence.
3921   auto *ScalarInit = Phi->getIncomingValueForBlock(Preheader);
3922   auto *Previous = Phi->getIncomingValueForBlock(Latch);
3923 
3924   // Create a vector from the initial value.
3925   auto *VectorInit = ScalarInit;
3926   if (VF.isVector()) {
3927     Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
3928     assert(!VF.isScalable() && "VF is assumed to be non scalable.");
3929     VectorInit = Builder.CreateInsertElement(
3930         PoisonValue::get(VectorType::get(VectorInit->getType(), VF)), VectorInit,
3931         Builder.getInt32(VF.getKnownMinValue() - 1), "vector.recur.init");
3932   }
3933 
3934   VPValue *PhiDef = State.Plan->getVPValue(Phi);
3935   VPValue *PreviousDef = State.Plan->getVPValue(Previous);
3936   // We constructed a temporary phi node in the first phase of vectorization.
3937   // This phi node will eventually be deleted.
3938   Builder.SetInsertPoint(cast<Instruction>(State.get(PhiDef, 0)));
3939 
3940   // Create a phi node for the new recurrence. The current value will either be
3941   // the initial value inserted into a vector or loop-varying vector value.
3942   auto *VecPhi = Builder.CreatePHI(VectorInit->getType(), 2, "vector.recur");
3943   VecPhi->addIncoming(VectorInit, LoopVectorPreHeader);
3944 
3945   // Get the vectorized previous value of the last part UF - 1. It appears last
3946   // among all unrolled iterations, due to the order of their construction.
3947   Value *PreviousLastPart = State.get(PreviousDef, UF - 1);
3948 
3949   // Find and set the insertion point after the previous value if it is an
3950   // instruction.
3951   BasicBlock::iterator InsertPt;
3952   // Note that the previous value may have been constant-folded so it is not
3953   // guaranteed to be an instruction in the vector loop.
3954   // FIXME: Loop invariant values do not form recurrences. We should deal with
3955   //        them earlier.
3956   if (LI->getLoopFor(LoopVectorBody)->isLoopInvariant(PreviousLastPart))
3957     InsertPt = LoopVectorBody->getFirstInsertionPt();
3958   else {
3959     Instruction *PreviousInst = cast<Instruction>(PreviousLastPart);
3960     if (isa<PHINode>(PreviousLastPart))
3961       // If the previous value is a phi node, we should insert after all the phi
3962       // nodes in the block containing the PHI to avoid breaking basic block
3963       // verification. Note that the basic block may be different to
3964       // LoopVectorBody, in case we predicate the loop.
3965       InsertPt = PreviousInst->getParent()->getFirstInsertionPt();
3966     else
3967       InsertPt = ++PreviousInst->getIterator();
3968   }
3969   Builder.SetInsertPoint(&*InsertPt);
3970 
3971   // We will construct a vector for the recurrence by combining the values for
3972   // the current and previous iterations. This is the required shuffle mask.
3973   assert(!VF.isScalable());
3974   SmallVector<int, 8> ShuffleMask(VF.getKnownMinValue());
3975   ShuffleMask[0] = VF.getKnownMinValue() - 1;
3976   for (unsigned I = 1; I < VF.getKnownMinValue(); ++I)
3977     ShuffleMask[I] = I + VF.getKnownMinValue() - 1;
3978 
3979   // The vector from which to take the initial value for the current iteration
3980   // (actual or unrolled). Initially, this is the vector phi node.
3981   Value *Incoming = VecPhi;
3982 
3983   // Shuffle the current and previous vector and update the vector parts.
3984   for (unsigned Part = 0; Part < UF; ++Part) {
3985     Value *PreviousPart = State.get(PreviousDef, Part);
3986     Value *PhiPart = State.get(PhiDef, Part);
3987     auto *Shuffle =
3988         VF.isVector()
3989             ? Builder.CreateShuffleVector(Incoming, PreviousPart, ShuffleMask)
3990             : Incoming;
3991     PhiPart->replaceAllUsesWith(Shuffle);
3992     cast<Instruction>(PhiPart)->eraseFromParent();
3993     State.reset(PhiDef, Shuffle, Part);
3994     Incoming = PreviousPart;
3995   }
3996 
3997   // Fix the latch value of the new recurrence in the vector loop.
3998   VecPhi->addIncoming(Incoming, LI->getLoopFor(LoopVectorBody)->getLoopLatch());
3999 
4000   // Extract the last vector element in the middle block. This will be the
4001   // initial value for the recurrence when jumping to the scalar loop.
4002   auto *ExtractForScalar = Incoming;
4003   if (VF.isVector()) {
4004     Builder.SetInsertPoint(LoopMiddleBlock->getTerminator());
4005     ExtractForScalar = Builder.CreateExtractElement(
4006         ExtractForScalar, Builder.getInt32(VF.getKnownMinValue() - 1),
4007         "vector.recur.extract");
4008   }
4009   // Extract the second last element in the middle block if the
4010   // Phi is used outside the loop. We need to extract the phi itself
4011   // and not the last element (the phi update in the current iteration). This
4012   // will be the value when jumping to the exit block from the LoopMiddleBlock,
4013   // when the scalar loop is not run at all.
4014   Value *ExtractForPhiUsedOutsideLoop = nullptr;
4015   if (VF.isVector())
4016     ExtractForPhiUsedOutsideLoop = Builder.CreateExtractElement(
4017         Incoming, Builder.getInt32(VF.getKnownMinValue() - 2),
4018         "vector.recur.extract.for.phi");
4019   // When loop is unrolled without vectorizing, initialize
4020   // ExtractForPhiUsedOutsideLoop with the value just prior to unrolled value of
4021   // `Incoming`. This is analogous to the vectorized case above: extracting the
4022   // second last element when VF > 1.
4023   else if (UF > 1)
4024     ExtractForPhiUsedOutsideLoop = State.get(PreviousDef, UF - 2);
4025 
4026   // Fix the initial value of the original recurrence in the scalar loop.
4027   Builder.SetInsertPoint(&*LoopScalarPreHeader->begin());
4028   auto *Start = Builder.CreatePHI(Phi->getType(), 2, "scalar.recur.init");
4029   for (auto *BB : predecessors(LoopScalarPreHeader)) {
4030     auto *Incoming = BB == LoopMiddleBlock ? ExtractForScalar : ScalarInit;
4031     Start->addIncoming(Incoming, BB);
4032   }
4033 
4034   Phi->setIncomingValueForBlock(LoopScalarPreHeader, Start);
4035   Phi->setName("scalar.recur");
4036 
4037   // Finally, fix users of the recurrence outside the loop. The users will need
4038   // either the last value of the scalar recurrence or the last value of the
4039   // vector recurrence we extracted in the middle block. Since the loop is in
4040   // LCSSA form, we just need to find all the phi nodes for the original scalar
4041   // recurrence in the exit block, and then add an edge for the middle block.
4042   // Note that LCSSA does not imply single entry when the original scalar loop
4043   // had multiple exiting edges (as we always run the last iteration in the
4044   // scalar epilogue); in that case, the exiting path through middle will be
4045   // dynamically dead and the value picked for the phi doesn't matter.
4046   for (PHINode &LCSSAPhi : LoopExitBlock->phis())
4047     if (any_of(LCSSAPhi.incoming_values(),
4048                [Phi](Value *V) { return V == Phi; }))
4049       LCSSAPhi.addIncoming(ExtractForPhiUsedOutsideLoop, LoopMiddleBlock);
4050 }
4051 
4052 void InnerLoopVectorizer::fixReduction(PHINode *Phi, VPTransformState &State) {
4053   // Get it's reduction variable descriptor.
4054   assert(Legal->isReductionVariable(Phi) &&
4055          "Unable to find the reduction variable");
4056   RecurrenceDescriptor RdxDesc = Legal->getReductionVars()[Phi];
4057 
4058   RecurKind RK = RdxDesc.getRecurrenceKind();
4059   TrackingVH<Value> ReductionStartValue = RdxDesc.getRecurrenceStartValue();
4060   Instruction *LoopExitInst = RdxDesc.getLoopExitInstr();
4061   setDebugLocFromInst(Builder, ReductionStartValue);
4062   bool IsInLoopReductionPhi = Cost->isInLoopReduction(Phi);
4063 
4064   VPValue *LoopExitInstDef = State.Plan->getVPValue(LoopExitInst);
4065   // This is the vector-clone of the value that leaves the loop.
4066   Type *VecTy = State.get(LoopExitInstDef, 0)->getType();
4067 
4068   // Wrap flags are in general invalid after vectorization, clear them.
4069   clearReductionWrapFlags(RdxDesc, State);
4070 
4071   // Fix the vector-loop phi.
4072 
4073   // Reductions do not have to start at zero. They can start with
4074   // any loop invariant values.
4075   BasicBlock *Latch = OrigLoop->getLoopLatch();
4076   Value *LoopVal = Phi->getIncomingValueForBlock(Latch);
4077 
4078   for (unsigned Part = 0; Part < UF; ++Part) {
4079     Value *VecRdxPhi = State.get(State.Plan->getVPValue(Phi), Part);
4080     Value *Val = State.get(State.Plan->getVPValue(LoopVal), Part);
4081     cast<PHINode>(VecRdxPhi)
4082       ->addIncoming(Val, LI->getLoopFor(LoopVectorBody)->getLoopLatch());
4083   }
4084 
4085   // Before each round, move the insertion point right between
4086   // the PHIs and the values we are going to write.
4087   // This allows us to write both PHINodes and the extractelement
4088   // instructions.
4089   Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt());
4090 
4091   setDebugLocFromInst(Builder, LoopExitInst);
4092 
4093   // If tail is folded by masking, the vector value to leave the loop should be
4094   // a Select choosing between the vectorized LoopExitInst and vectorized Phi,
4095   // instead of the former. For an inloop reduction the reduction will already
4096   // be predicated, and does not need to be handled here.
4097   if (Cost->foldTailByMasking() && !IsInLoopReductionPhi) {
4098     for (unsigned Part = 0; Part < UF; ++Part) {
4099       Value *VecLoopExitInst = State.get(LoopExitInstDef, Part);
4100       Value *Sel = nullptr;
4101       for (User *U : VecLoopExitInst->users()) {
4102         if (isa<SelectInst>(U)) {
4103           assert(!Sel && "Reduction exit feeding two selects");
4104           Sel = U;
4105         } else
4106           assert(isa<PHINode>(U) && "Reduction exit must feed Phi's or select");
4107       }
4108       assert(Sel && "Reduction exit feeds no select");
4109       State.reset(LoopExitInstDef, Sel, Part);
4110 
4111       // If the target can create a predicated operator for the reduction at no
4112       // extra cost in the loop (for example a predicated vadd), it can be
4113       // cheaper for the select to remain in the loop than be sunk out of it,
4114       // and so use the select value for the phi instead of the old
4115       // LoopExitValue.
4116       RecurrenceDescriptor RdxDesc = Legal->getReductionVars()[Phi];
4117       if (PreferPredicatedReductionSelect ||
4118           TTI->preferPredicatedReductionSelect(
4119               RdxDesc.getOpcode(), Phi->getType(),
4120               TargetTransformInfo::ReductionFlags())) {
4121         auto *VecRdxPhi =
4122             cast<PHINode>(State.get(State.Plan->getVPValue(Phi), Part));
4123         VecRdxPhi->setIncomingValueForBlock(
4124             LI->getLoopFor(LoopVectorBody)->getLoopLatch(), Sel);
4125       }
4126     }
4127   }
4128 
4129   // If the vector reduction can be performed in a smaller type, we truncate
4130   // then extend the loop exit value to enable InstCombine to evaluate the
4131   // entire expression in the smaller type.
4132   if (VF.isVector() && Phi->getType() != RdxDesc.getRecurrenceType()) {
4133     assert(!IsInLoopReductionPhi && "Unexpected truncated inloop reduction!");
4134     assert(!VF.isScalable() && "scalable vectors not yet supported.");
4135     Type *RdxVecTy = VectorType::get(RdxDesc.getRecurrenceType(), VF);
4136     Builder.SetInsertPoint(
4137         LI->getLoopFor(LoopVectorBody)->getLoopLatch()->getTerminator());
4138     VectorParts RdxParts(UF);
4139     for (unsigned Part = 0; Part < UF; ++Part) {
4140       RdxParts[Part] = State.get(LoopExitInstDef, Part);
4141       Value *Trunc = Builder.CreateTrunc(RdxParts[Part], RdxVecTy);
4142       Value *Extnd = RdxDesc.isSigned() ? Builder.CreateSExt(Trunc, VecTy)
4143                                         : Builder.CreateZExt(Trunc, VecTy);
4144       for (Value::user_iterator UI = RdxParts[Part]->user_begin();
4145            UI != RdxParts[Part]->user_end();)
4146         if (*UI != Trunc) {
4147           (*UI++)->replaceUsesOfWith(RdxParts[Part], Extnd);
4148           RdxParts[Part] = Extnd;
4149         } else {
4150           ++UI;
4151         }
4152     }
4153     Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt());
4154     for (unsigned Part = 0; Part < UF; ++Part) {
4155       RdxParts[Part] = Builder.CreateTrunc(RdxParts[Part], RdxVecTy);
4156       State.reset(LoopExitInstDef, RdxParts[Part], Part);
4157     }
4158   }
4159 
4160   // Reduce all of the unrolled parts into a single vector.
4161   Value *ReducedPartRdx = State.get(LoopExitInstDef, 0);
4162   unsigned Op = RecurrenceDescriptor::getOpcode(RK);
4163 
4164   // The middle block terminator has already been assigned a DebugLoc here (the
4165   // OrigLoop's single latch terminator). We want the whole middle block to
4166   // appear to execute on this line because: (a) it is all compiler generated,
4167   // (b) these instructions are always executed after evaluating the latch
4168   // conditional branch, and (c) other passes may add new predecessors which
4169   // terminate on this line. This is the easiest way to ensure we don't
4170   // accidentally cause an extra step back into the loop while debugging.
4171   setDebugLocFromInst(Builder, LoopMiddleBlock->getTerminator());
4172   {
4173     // Floating-point operations should have some FMF to enable the reduction.
4174     IRBuilderBase::FastMathFlagGuard FMFG(Builder);
4175     Builder.setFastMathFlags(RdxDesc.getFastMathFlags());
4176     for (unsigned Part = 1; Part < UF; ++Part) {
4177       Value *RdxPart = State.get(LoopExitInstDef, Part);
4178       if (Op != Instruction::ICmp && Op != Instruction::FCmp) {
4179         ReducedPartRdx = Builder.CreateBinOp(
4180             (Instruction::BinaryOps)Op, RdxPart, ReducedPartRdx, "bin.rdx");
4181       } else {
4182         ReducedPartRdx = createMinMaxOp(Builder, RK, ReducedPartRdx, RdxPart);
4183       }
4184     }
4185   }
4186 
4187   // Create the reduction after the loop. Note that inloop reductions create the
4188   // target reduction in the loop using a Reduction recipe.
4189   if (VF.isVector() && !IsInLoopReductionPhi) {
4190     ReducedPartRdx =
4191         createTargetReduction(Builder, TTI, RdxDesc, ReducedPartRdx);
4192     // If the reduction can be performed in a smaller type, we need to extend
4193     // the reduction to the wider type before we branch to the original loop.
4194     if (Phi->getType() != RdxDesc.getRecurrenceType())
4195       ReducedPartRdx =
4196         RdxDesc.isSigned()
4197         ? Builder.CreateSExt(ReducedPartRdx, Phi->getType())
4198         : Builder.CreateZExt(ReducedPartRdx, Phi->getType());
4199   }
4200 
4201   // Create a phi node that merges control-flow from the backedge-taken check
4202   // block and the middle block.
4203   PHINode *BCBlockPhi = PHINode::Create(Phi->getType(), 2, "bc.merge.rdx",
4204                                         LoopScalarPreHeader->getTerminator());
4205   for (unsigned I = 0, E = LoopBypassBlocks.size(); I != E; ++I)
4206     BCBlockPhi->addIncoming(ReductionStartValue, LoopBypassBlocks[I]);
4207   BCBlockPhi->addIncoming(ReducedPartRdx, LoopMiddleBlock);
4208 
4209   // Now, we need to fix the users of the reduction variable
4210   // inside and outside of the scalar remainder loop.
4211 
4212   // We know that the loop is in LCSSA form. We need to update the PHI nodes
4213   // in the exit blocks.  See comment on analogous loop in
4214   // fixFirstOrderRecurrence for a more complete explaination of the logic.
4215   for (PHINode &LCSSAPhi : LoopExitBlock->phis())
4216     if (any_of(LCSSAPhi.incoming_values(),
4217                [LoopExitInst](Value *V) { return V == LoopExitInst; }))
4218       LCSSAPhi.addIncoming(ReducedPartRdx, LoopMiddleBlock);
4219 
4220   // Fix the scalar loop reduction variable with the incoming reduction sum
4221   // from the vector body and from the backedge value.
4222   int IncomingEdgeBlockIdx =
4223     Phi->getBasicBlockIndex(OrigLoop->getLoopLatch());
4224   assert(IncomingEdgeBlockIdx >= 0 && "Invalid block index");
4225   // Pick the other block.
4226   int SelfEdgeBlockIdx = (IncomingEdgeBlockIdx ? 0 : 1);
4227   Phi->setIncomingValue(SelfEdgeBlockIdx, BCBlockPhi);
4228   Phi->setIncomingValue(IncomingEdgeBlockIdx, LoopExitInst);
4229 }
4230 
4231 void InnerLoopVectorizer::clearReductionWrapFlags(RecurrenceDescriptor &RdxDesc,
4232                                                   VPTransformState &State) {
4233   RecurKind RK = RdxDesc.getRecurrenceKind();
4234   if (RK != RecurKind::Add && RK != RecurKind::Mul)
4235     return;
4236 
4237   Instruction *LoopExitInstr = RdxDesc.getLoopExitInstr();
4238   assert(LoopExitInstr && "null loop exit instruction");
4239   SmallVector<Instruction *, 8> Worklist;
4240   SmallPtrSet<Instruction *, 8> Visited;
4241   Worklist.push_back(LoopExitInstr);
4242   Visited.insert(LoopExitInstr);
4243 
4244   while (!Worklist.empty()) {
4245     Instruction *Cur = Worklist.pop_back_val();
4246     if (isa<OverflowingBinaryOperator>(Cur))
4247       for (unsigned Part = 0; Part < UF; ++Part) {
4248         Value *V = State.get(State.Plan->getVPValue(Cur), Part);
4249         cast<Instruction>(V)->dropPoisonGeneratingFlags();
4250       }
4251 
4252     for (User *U : Cur->users()) {
4253       Instruction *UI = cast<Instruction>(U);
4254       if ((Cur != LoopExitInstr || OrigLoop->contains(UI->getParent())) &&
4255           Visited.insert(UI).second)
4256         Worklist.push_back(UI);
4257     }
4258   }
4259 }
4260 
4261 void InnerLoopVectorizer::fixLCSSAPHIs(VPTransformState &State) {
4262   for (PHINode &LCSSAPhi : LoopExitBlock->phis()) {
4263     if (LCSSAPhi.getBasicBlockIndex(LoopMiddleBlock) != -1)
4264       // Some phis were already hand updated by the reduction and recurrence
4265       // code above, leave them alone.
4266       continue;
4267 
4268     auto *IncomingValue = LCSSAPhi.getIncomingValue(0);
4269     // Non-instruction incoming values will have only one value.
4270     unsigned LastLane = 0;
4271     if (isa<Instruction>(IncomingValue))
4272       LastLane = Cost->isUniformAfterVectorization(
4273                      cast<Instruction>(IncomingValue), VF)
4274                      ? 0
4275                      : VF.getKnownMinValue() - 1;
4276     assert((!VF.isScalable() || LastLane == 0) &&
4277            "scalable vectors dont support non-uniform scalars yet");
4278     // Can be a loop invariant incoming value or the last scalar value to be
4279     // extracted from the vectorized loop.
4280     Builder.SetInsertPoint(LoopMiddleBlock->getTerminator());
4281     Value *lastIncomingValue =
4282         OrigLoop->isLoopInvariant(IncomingValue)
4283             ? IncomingValue
4284             : State.get(State.Plan->getVPValue(IncomingValue),
4285                         VPIteration(UF - 1, LastLane));
4286     LCSSAPhi.addIncoming(lastIncomingValue, LoopMiddleBlock);
4287   }
4288 }
4289 
4290 void InnerLoopVectorizer::sinkScalarOperands(Instruction *PredInst) {
4291   // The basic block and loop containing the predicated instruction.
4292   auto *PredBB = PredInst->getParent();
4293   auto *VectorLoop = LI->getLoopFor(PredBB);
4294 
4295   // Initialize a worklist with the operands of the predicated instruction.
4296   SetVector<Value *> Worklist(PredInst->op_begin(), PredInst->op_end());
4297 
4298   // Holds instructions that we need to analyze again. An instruction may be
4299   // reanalyzed if we don't yet know if we can sink it or not.
4300   SmallVector<Instruction *, 8> InstsToReanalyze;
4301 
4302   // Returns true if a given use occurs in the predicated block. Phi nodes use
4303   // their operands in their corresponding predecessor blocks.
4304   auto isBlockOfUsePredicated = [&](Use &U) -> bool {
4305     auto *I = cast<Instruction>(U.getUser());
4306     BasicBlock *BB = I->getParent();
4307     if (auto *Phi = dyn_cast<PHINode>(I))
4308       BB = Phi->getIncomingBlock(
4309           PHINode::getIncomingValueNumForOperand(U.getOperandNo()));
4310     return BB == PredBB;
4311   };
4312 
4313   // Iteratively sink the scalarized operands of the predicated instruction
4314   // into the block we created for it. When an instruction is sunk, it's
4315   // operands are then added to the worklist. The algorithm ends after one pass
4316   // through the worklist doesn't sink a single instruction.
4317   bool Changed;
4318   do {
4319     // Add the instructions that need to be reanalyzed to the worklist, and
4320     // reset the changed indicator.
4321     Worklist.insert(InstsToReanalyze.begin(), InstsToReanalyze.end());
4322     InstsToReanalyze.clear();
4323     Changed = false;
4324 
4325     while (!Worklist.empty()) {
4326       auto *I = dyn_cast<Instruction>(Worklist.pop_back_val());
4327 
4328       // We can't sink an instruction if it is a phi node, is already in the
4329       // predicated block, is not in the loop, or may have side effects.
4330       if (!I || isa<PHINode>(I) || I->getParent() == PredBB ||
4331           !VectorLoop->contains(I) || I->mayHaveSideEffects())
4332         continue;
4333 
4334       // It's legal to sink the instruction if all its uses occur in the
4335       // predicated block. Otherwise, there's nothing to do yet, and we may
4336       // need to reanalyze the instruction.
4337       if (!llvm::all_of(I->uses(), isBlockOfUsePredicated)) {
4338         InstsToReanalyze.push_back(I);
4339         continue;
4340       }
4341 
4342       // Move the instruction to the beginning of the predicated block, and add
4343       // it's operands to the worklist.
4344       I->moveBefore(&*PredBB->getFirstInsertionPt());
4345       Worklist.insert(I->op_begin(), I->op_end());
4346 
4347       // The sinking may have enabled other instructions to be sunk, so we will
4348       // need to iterate.
4349       Changed = true;
4350     }
4351   } while (Changed);
4352 }
4353 
4354 void InnerLoopVectorizer::fixNonInductionPHIs(VPTransformState &State) {
4355   for (PHINode *OrigPhi : OrigPHIsToFix) {
4356     PHINode *NewPhi =
4357         cast<PHINode>(State.get(State.Plan->getVPValue(OrigPhi), 0));
4358     unsigned NumIncomingValues = OrigPhi->getNumIncomingValues();
4359 
4360     SmallVector<BasicBlock *, 2> ScalarBBPredecessors(
4361         predecessors(OrigPhi->getParent()));
4362     SmallVector<BasicBlock *, 2> VectorBBPredecessors(
4363         predecessors(NewPhi->getParent()));
4364     assert(ScalarBBPredecessors.size() == VectorBBPredecessors.size() &&
4365            "Scalar and Vector BB should have the same number of predecessors");
4366 
4367     // The insertion point in Builder may be invalidated by the time we get
4368     // here. Force the Builder insertion point to something valid so that we do
4369     // not run into issues during insertion point restore in
4370     // State::get() calls below.
4371     Builder.SetInsertPoint(NewPhi);
4372 
4373     // The predecessor order is preserved and we can rely on mapping between
4374     // scalar and vector block predecessors.
4375     for (unsigned i = 0; i < NumIncomingValues; ++i) {
4376       BasicBlock *NewPredBB = VectorBBPredecessors[i];
4377 
4378       // When looking up the new scalar/vector values to fix up, use incoming
4379       // values from original phi.
4380       Value *ScIncV =
4381           OrigPhi->getIncomingValueForBlock(ScalarBBPredecessors[i]);
4382 
4383       // Scalar incoming value may need a broadcast
4384       Value *NewIncV = State.get(State.Plan->getOrAddVPValue(ScIncV), 0);
4385       NewPhi->addIncoming(NewIncV, NewPredBB);
4386     }
4387   }
4388 }
4389 
4390 void InnerLoopVectorizer::widenGEP(GetElementPtrInst *GEP, VPValue *VPDef,
4391                                    VPUser &Operands, unsigned UF,
4392                                    ElementCount VF, bool IsPtrLoopInvariant,
4393                                    SmallBitVector &IsIndexLoopInvariant,
4394                                    VPTransformState &State) {
4395   // Construct a vector GEP by widening the operands of the scalar GEP as
4396   // necessary. We mark the vector GEP 'inbounds' if appropriate. A GEP
4397   // results in a vector of pointers when at least one operand of the GEP
4398   // is vector-typed. Thus, to keep the representation compact, we only use
4399   // vector-typed operands for loop-varying values.
4400 
4401   if (VF.isVector() && IsPtrLoopInvariant && IsIndexLoopInvariant.all()) {
4402     // If we are vectorizing, but the GEP has only loop-invariant operands,
4403     // the GEP we build (by only using vector-typed operands for
4404     // loop-varying values) would be a scalar pointer. Thus, to ensure we
4405     // produce a vector of pointers, we need to either arbitrarily pick an
4406     // operand to broadcast, or broadcast a clone of the original GEP.
4407     // Here, we broadcast a clone of the original.
4408     //
4409     // TODO: If at some point we decide to scalarize instructions having
4410     //       loop-invariant operands, this special case will no longer be
4411     //       required. We would add the scalarization decision to
4412     //       collectLoopScalars() and teach getVectorValue() to broadcast
4413     //       the lane-zero scalar value.
4414     auto *Clone = Builder.Insert(GEP->clone());
4415     for (unsigned Part = 0; Part < UF; ++Part) {
4416       Value *EntryPart = Builder.CreateVectorSplat(VF, Clone);
4417       State.set(VPDef, EntryPart, Part);
4418       addMetadata(EntryPart, GEP);
4419     }
4420   } else {
4421     // If the GEP has at least one loop-varying operand, we are sure to
4422     // produce a vector of pointers. But if we are only unrolling, we want
4423     // to produce a scalar GEP for each unroll part. Thus, the GEP we
4424     // produce with the code below will be scalar (if VF == 1) or vector
4425     // (otherwise). Note that for the unroll-only case, we still maintain
4426     // values in the vector mapping with initVector, as we do for other
4427     // instructions.
4428     for (unsigned Part = 0; Part < UF; ++Part) {
4429       // The pointer operand of the new GEP. If it's loop-invariant, we
4430       // won't broadcast it.
4431       auto *Ptr = IsPtrLoopInvariant
4432                       ? State.get(Operands.getOperand(0), VPIteration(0, 0))
4433                       : State.get(Operands.getOperand(0), Part);
4434 
4435       // Collect all the indices for the new GEP. If any index is
4436       // loop-invariant, we won't broadcast it.
4437       SmallVector<Value *, 4> Indices;
4438       for (unsigned I = 1, E = Operands.getNumOperands(); I < E; I++) {
4439         VPValue *Operand = Operands.getOperand(I);
4440         if (IsIndexLoopInvariant[I - 1])
4441           Indices.push_back(State.get(Operand, VPIteration(0, 0)));
4442         else
4443           Indices.push_back(State.get(Operand, Part));
4444       }
4445 
4446       // Create the new GEP. Note that this GEP may be a scalar if VF == 1,
4447       // but it should be a vector, otherwise.
4448       auto *NewGEP =
4449           GEP->isInBounds()
4450               ? Builder.CreateInBoundsGEP(GEP->getSourceElementType(), Ptr,
4451                                           Indices)
4452               : Builder.CreateGEP(GEP->getSourceElementType(), Ptr, Indices);
4453       assert((VF.isScalar() || NewGEP->getType()->isVectorTy()) &&
4454              "NewGEP is not a pointer vector");
4455       State.set(VPDef, NewGEP, Part);
4456       addMetadata(NewGEP, GEP);
4457     }
4458   }
4459 }
4460 
4461 void InnerLoopVectorizer::widenPHIInstruction(Instruction *PN,
4462                                               RecurrenceDescriptor *RdxDesc,
4463                                               Value *StartV, VPValue *Def,
4464                                               VPTransformState &State) {
4465   PHINode *P = cast<PHINode>(PN);
4466   if (EnableVPlanNativePath) {
4467     // Currently we enter here in the VPlan-native path for non-induction
4468     // PHIs where all control flow is uniform. We simply widen these PHIs.
4469     // Create a vector phi with no operands - the vector phi operands will be
4470     // set at the end of vector code generation.
4471     Type *VecTy = (State.VF.isScalar())
4472                       ? PN->getType()
4473                       : VectorType::get(PN->getType(), State.VF);
4474     Value *VecPhi = Builder.CreatePHI(VecTy, PN->getNumOperands(), "vec.phi");
4475     State.set(Def, VecPhi, 0);
4476     OrigPHIsToFix.push_back(P);
4477 
4478     return;
4479   }
4480 
4481   assert(PN->getParent() == OrigLoop->getHeader() &&
4482          "Non-header phis should have been handled elsewhere");
4483 
4484   // In order to support recurrences we need to be able to vectorize Phi nodes.
4485   // Phi nodes have cycles, so we need to vectorize them in two stages. This is
4486   // stage #1: We create a new vector PHI node with no incoming edges. We'll use
4487   // this value when we vectorize all of the instructions that use the PHI.
4488   if (RdxDesc || Legal->isFirstOrderRecurrence(P)) {
4489     Value *Iden = nullptr;
4490     bool ScalarPHI =
4491         (State.VF.isScalar()) || Cost->isInLoopReduction(cast<PHINode>(PN));
4492     Type *VecTy =
4493         ScalarPHI ? PN->getType() : VectorType::get(PN->getType(), State.VF);
4494 
4495     if (RdxDesc) {
4496       assert(Legal->isReductionVariable(P) && StartV &&
4497              "RdxDesc should only be set for reduction variables; in that case "
4498              "a StartV is also required");
4499       RecurKind RK = RdxDesc->getRecurrenceKind();
4500       if (RecurrenceDescriptor::isMinMaxRecurrenceKind(RK)) {
4501         // MinMax reduction have the start value as their identify.
4502         if (ScalarPHI) {
4503           Iden = StartV;
4504         } else {
4505           IRBuilderBase::InsertPointGuard IPBuilder(Builder);
4506           Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
4507           StartV = Iden =
4508               Builder.CreateVectorSplat(State.VF, StartV, "minmax.ident");
4509         }
4510       } else {
4511         Constant *IdenC = RecurrenceDescriptor::getRecurrenceIdentity(
4512             RK, VecTy->getScalarType());
4513         Iden = IdenC;
4514 
4515         if (!ScalarPHI) {
4516           Iden = ConstantVector::getSplat(State.VF, IdenC);
4517           IRBuilderBase::InsertPointGuard IPBuilder(Builder);
4518           Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
4519           Constant *Zero = Builder.getInt32(0);
4520           StartV = Builder.CreateInsertElement(Iden, StartV, Zero);
4521         }
4522       }
4523     }
4524 
4525     for (unsigned Part = 0; Part < State.UF; ++Part) {
4526       // This is phase one of vectorizing PHIs.
4527       Value *EntryPart = PHINode::Create(
4528           VecTy, 2, "vec.phi", &*LoopVectorBody->getFirstInsertionPt());
4529       State.set(Def, EntryPart, Part);
4530       if (StartV) {
4531         // Make sure to add the reduction start value only to the
4532         // first unroll part.
4533         Value *StartVal = (Part == 0) ? StartV : Iden;
4534         cast<PHINode>(EntryPart)->addIncoming(StartVal, LoopVectorPreHeader);
4535       }
4536     }
4537     return;
4538   }
4539 
4540   assert(!Legal->isReductionVariable(P) &&
4541          "reductions should be handled above");
4542 
4543   setDebugLocFromInst(Builder, P);
4544 
4545   // This PHINode must be an induction variable.
4546   // Make sure that we know about it.
4547   assert(Legal->getInductionVars().count(P) && "Not an induction variable");
4548 
4549   InductionDescriptor II = Legal->getInductionVars().lookup(P);
4550   const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout();
4551 
4552   // FIXME: The newly created binary instructions should contain nsw/nuw flags,
4553   // which can be found from the original scalar operations.
4554   switch (II.getKind()) {
4555   case InductionDescriptor::IK_NoInduction:
4556     llvm_unreachable("Unknown induction");
4557   case InductionDescriptor::IK_IntInduction:
4558   case InductionDescriptor::IK_FpInduction:
4559     llvm_unreachable("Integer/fp induction is handled elsewhere.");
4560   case InductionDescriptor::IK_PtrInduction: {
4561     // Handle the pointer induction variable case.
4562     assert(P->getType()->isPointerTy() && "Unexpected type.");
4563 
4564     if (Cost->isScalarAfterVectorization(P, State.VF)) {
4565       // This is the normalized GEP that starts counting at zero.
4566       Value *PtrInd =
4567           Builder.CreateSExtOrTrunc(Induction, II.getStep()->getType());
4568       // Determine the number of scalars we need to generate for each unroll
4569       // iteration. If the instruction is uniform, we only need to generate the
4570       // first lane. Otherwise, we generate all VF values.
4571       unsigned Lanes = Cost->isUniformAfterVectorization(P, State.VF)
4572                            ? 1
4573                            : State.VF.getKnownMinValue();
4574       for (unsigned Part = 0; Part < UF; ++Part) {
4575         for (unsigned Lane = 0; Lane < Lanes; ++Lane) {
4576           Constant *Idx = ConstantInt::get(
4577               PtrInd->getType(), Lane + Part * State.VF.getKnownMinValue());
4578           Value *GlobalIdx = Builder.CreateAdd(PtrInd, Idx);
4579           Value *SclrGep =
4580               emitTransformedIndex(Builder, GlobalIdx, PSE.getSE(), DL, II);
4581           SclrGep->setName("next.gep");
4582           State.set(Def, SclrGep, VPIteration(Part, Lane));
4583         }
4584       }
4585       return;
4586     }
4587     assert(isa<SCEVConstant>(II.getStep()) &&
4588            "Induction step not a SCEV constant!");
4589     Type *PhiType = II.getStep()->getType();
4590 
4591     // Build a pointer phi
4592     Value *ScalarStartValue = II.getStartValue();
4593     Type *ScStValueType = ScalarStartValue->getType();
4594     PHINode *NewPointerPhi =
4595         PHINode::Create(ScStValueType, 2, "pointer.phi", Induction);
4596     NewPointerPhi->addIncoming(ScalarStartValue, LoopVectorPreHeader);
4597 
4598     // A pointer induction, performed by using a gep
4599     BasicBlock *LoopLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch();
4600     Instruction *InductionLoc = LoopLatch->getTerminator();
4601     const SCEV *ScalarStep = II.getStep();
4602     SCEVExpander Exp(*PSE.getSE(), DL, "induction");
4603     Value *ScalarStepValue =
4604         Exp.expandCodeFor(ScalarStep, PhiType, InductionLoc);
4605     Value *InductionGEP = GetElementPtrInst::Create(
4606         ScStValueType->getPointerElementType(), NewPointerPhi,
4607         Builder.CreateMul(
4608             ScalarStepValue,
4609             ConstantInt::get(PhiType, State.VF.getKnownMinValue() * State.UF)),
4610         "ptr.ind", InductionLoc);
4611     NewPointerPhi->addIncoming(InductionGEP, LoopLatch);
4612 
4613     // Create UF many actual address geps that use the pointer
4614     // phi as base and a vectorized version of the step value
4615     // (<step*0, ..., step*N>) as offset.
4616     for (unsigned Part = 0; Part < State.UF; ++Part) {
4617       SmallVector<Constant *, 8> Indices;
4618       // Create a vector of consecutive numbers from zero to VF.
4619       for (unsigned i = 0; i < State.VF.getKnownMinValue(); ++i)
4620         Indices.push_back(
4621             ConstantInt::get(PhiType, i + Part * State.VF.getKnownMinValue()));
4622       Constant *StartOffset = ConstantVector::get(Indices);
4623 
4624       Value *GEP = Builder.CreateGEP(
4625           ScStValueType->getPointerElementType(), NewPointerPhi,
4626           Builder.CreateMul(StartOffset,
4627                             Builder.CreateVectorSplat(
4628                                 State.VF.getKnownMinValue(), ScalarStepValue),
4629                             "vector.gep"));
4630       State.set(Def, GEP, Part);
4631     }
4632   }
4633   }
4634 }
4635 
4636 /// A helper function for checking whether an integer division-related
4637 /// instruction may divide by zero (in which case it must be predicated if
4638 /// executed conditionally in the scalar code).
4639 /// TODO: It may be worthwhile to generalize and check isKnownNonZero().
4640 /// Non-zero divisors that are non compile-time constants will not be
4641 /// converted into multiplication, so we will still end up scalarizing
4642 /// the division, but can do so w/o predication.
4643 static bool mayDivideByZero(Instruction &I) {
4644   assert((I.getOpcode() == Instruction::UDiv ||
4645           I.getOpcode() == Instruction::SDiv ||
4646           I.getOpcode() == Instruction::URem ||
4647           I.getOpcode() == Instruction::SRem) &&
4648          "Unexpected instruction");
4649   Value *Divisor = I.getOperand(1);
4650   auto *CInt = dyn_cast<ConstantInt>(Divisor);
4651   return !CInt || CInt->isZero();
4652 }
4653 
4654 void InnerLoopVectorizer::widenInstruction(Instruction &I, VPValue *Def,
4655                                            VPUser &User,
4656                                            VPTransformState &State) {
4657   switch (I.getOpcode()) {
4658   case Instruction::Call:
4659   case Instruction::Br:
4660   case Instruction::PHI:
4661   case Instruction::GetElementPtr:
4662   case Instruction::Select:
4663     llvm_unreachable("This instruction is handled by a different recipe.");
4664   case Instruction::UDiv:
4665   case Instruction::SDiv:
4666   case Instruction::SRem:
4667   case Instruction::URem:
4668   case Instruction::Add:
4669   case Instruction::FAdd:
4670   case Instruction::Sub:
4671   case Instruction::FSub:
4672   case Instruction::FNeg:
4673   case Instruction::Mul:
4674   case Instruction::FMul:
4675   case Instruction::FDiv:
4676   case Instruction::FRem:
4677   case Instruction::Shl:
4678   case Instruction::LShr:
4679   case Instruction::AShr:
4680   case Instruction::And:
4681   case Instruction::Or:
4682   case Instruction::Xor: {
4683     // Just widen unops and binops.
4684     setDebugLocFromInst(Builder, &I);
4685 
4686     for (unsigned Part = 0; Part < UF; ++Part) {
4687       SmallVector<Value *, 2> Ops;
4688       for (VPValue *VPOp : User.operands())
4689         Ops.push_back(State.get(VPOp, Part));
4690 
4691       Value *V = Builder.CreateNAryOp(I.getOpcode(), Ops);
4692 
4693       if (auto *VecOp = dyn_cast<Instruction>(V))
4694         VecOp->copyIRFlags(&I);
4695 
4696       // Use this vector value for all users of the original instruction.
4697       State.set(Def, V, Part);
4698       addMetadata(V, &I);
4699     }
4700 
4701     break;
4702   }
4703   case Instruction::ICmp:
4704   case Instruction::FCmp: {
4705     // Widen compares. Generate vector compares.
4706     bool FCmp = (I.getOpcode() == Instruction::FCmp);
4707     auto *Cmp = cast<CmpInst>(&I);
4708     setDebugLocFromInst(Builder, Cmp);
4709     for (unsigned Part = 0; Part < UF; ++Part) {
4710       Value *A = State.get(User.getOperand(0), Part);
4711       Value *B = State.get(User.getOperand(1), Part);
4712       Value *C = nullptr;
4713       if (FCmp) {
4714         // Propagate fast math flags.
4715         IRBuilder<>::FastMathFlagGuard FMFG(Builder);
4716         Builder.setFastMathFlags(Cmp->getFastMathFlags());
4717         C = Builder.CreateFCmp(Cmp->getPredicate(), A, B);
4718       } else {
4719         C = Builder.CreateICmp(Cmp->getPredicate(), A, B);
4720       }
4721       State.set(Def, C, Part);
4722       addMetadata(C, &I);
4723     }
4724 
4725     break;
4726   }
4727 
4728   case Instruction::ZExt:
4729   case Instruction::SExt:
4730   case Instruction::FPToUI:
4731   case Instruction::FPToSI:
4732   case Instruction::FPExt:
4733   case Instruction::PtrToInt:
4734   case Instruction::IntToPtr:
4735   case Instruction::SIToFP:
4736   case Instruction::UIToFP:
4737   case Instruction::Trunc:
4738   case Instruction::FPTrunc:
4739   case Instruction::BitCast: {
4740     auto *CI = cast<CastInst>(&I);
4741     setDebugLocFromInst(Builder, CI);
4742 
4743     /// Vectorize casts.
4744     Type *DestTy =
4745         (VF.isScalar()) ? CI->getType() : VectorType::get(CI->getType(), VF);
4746 
4747     for (unsigned Part = 0; Part < UF; ++Part) {
4748       Value *A = State.get(User.getOperand(0), Part);
4749       Value *Cast = Builder.CreateCast(CI->getOpcode(), A, DestTy);
4750       State.set(Def, Cast, Part);
4751       addMetadata(Cast, &I);
4752     }
4753     break;
4754   }
4755   default:
4756     // This instruction is not vectorized by simple widening.
4757     LLVM_DEBUG(dbgs() << "LV: Found an unhandled instruction: " << I);
4758     llvm_unreachable("Unhandled instruction!");
4759   } // end of switch.
4760 }
4761 
4762 void InnerLoopVectorizer::widenCallInstruction(CallInst &I, VPValue *Def,
4763                                                VPUser &ArgOperands,
4764                                                VPTransformState &State) {
4765   assert(!isa<DbgInfoIntrinsic>(I) &&
4766          "DbgInfoIntrinsic should have been dropped during VPlan construction");
4767   setDebugLocFromInst(Builder, &I);
4768 
4769   Module *M = I.getParent()->getParent()->getParent();
4770   auto *CI = cast<CallInst>(&I);
4771 
4772   SmallVector<Type *, 4> Tys;
4773   for (Value *ArgOperand : CI->arg_operands())
4774     Tys.push_back(ToVectorTy(ArgOperand->getType(), VF.getKnownMinValue()));
4775 
4776   Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
4777 
4778   // The flag shows whether we use Intrinsic or a usual Call for vectorized
4779   // version of the instruction.
4780   // Is it beneficial to perform intrinsic call compared to lib call?
4781   bool NeedToScalarize = false;
4782   InstructionCost CallCost = Cost->getVectorCallCost(CI, VF, NeedToScalarize);
4783   InstructionCost IntrinsicCost = ID ? Cost->getVectorIntrinsicCost(CI, VF) : 0;
4784   bool UseVectorIntrinsic = ID && IntrinsicCost <= CallCost;
4785   assert((UseVectorIntrinsic || !NeedToScalarize) &&
4786          "Instruction should be scalarized elsewhere.");
4787   assert(IntrinsicCost.isValid() && CallCost.isValid() &&
4788          "Cannot have invalid costs while widening");
4789 
4790   for (unsigned Part = 0; Part < UF; ++Part) {
4791     SmallVector<Value *, 4> Args;
4792     for (auto &I : enumerate(ArgOperands.operands())) {
4793       // Some intrinsics have a scalar argument - don't replace it with a
4794       // vector.
4795       Value *Arg;
4796       if (!UseVectorIntrinsic || !hasVectorInstrinsicScalarOpd(ID, I.index()))
4797         Arg = State.get(I.value(), Part);
4798       else
4799         Arg = State.get(I.value(), VPIteration(0, 0));
4800       Args.push_back(Arg);
4801     }
4802 
4803     Function *VectorF;
4804     if (UseVectorIntrinsic) {
4805       // Use vector version of the intrinsic.
4806       Type *TysForDecl[] = {CI->getType()};
4807       if (VF.isVector())
4808         TysForDecl[0] = VectorType::get(CI->getType()->getScalarType(), VF);
4809       VectorF = Intrinsic::getDeclaration(M, ID, TysForDecl);
4810       assert(VectorF && "Can't retrieve vector intrinsic.");
4811     } else {
4812       // Use vector version of the function call.
4813       const VFShape Shape = VFShape::get(*CI, VF, false /*HasGlobalPred*/);
4814 #ifndef NDEBUG
4815       assert(VFDatabase(*CI).getVectorizedFunction(Shape) != nullptr &&
4816              "Can't create vector function.");
4817 #endif
4818         VectorF = VFDatabase(*CI).getVectorizedFunction(Shape);
4819     }
4820       SmallVector<OperandBundleDef, 1> OpBundles;
4821       CI->getOperandBundlesAsDefs(OpBundles);
4822       CallInst *V = Builder.CreateCall(VectorF, Args, OpBundles);
4823 
4824       if (isa<FPMathOperator>(V))
4825         V->copyFastMathFlags(CI);
4826 
4827       State.set(Def, V, Part);
4828       addMetadata(V, &I);
4829   }
4830 }
4831 
4832 void InnerLoopVectorizer::widenSelectInstruction(SelectInst &I, VPValue *VPDef,
4833                                                  VPUser &Operands,
4834                                                  bool InvariantCond,
4835                                                  VPTransformState &State) {
4836   setDebugLocFromInst(Builder, &I);
4837 
4838   // The condition can be loop invariant  but still defined inside the
4839   // loop. This means that we can't just use the original 'cond' value.
4840   // We have to take the 'vectorized' value and pick the first lane.
4841   // Instcombine will make this a no-op.
4842   auto *InvarCond = InvariantCond
4843                         ? State.get(Operands.getOperand(0), VPIteration(0, 0))
4844                         : nullptr;
4845 
4846   for (unsigned Part = 0; Part < UF; ++Part) {
4847     Value *Cond =
4848         InvarCond ? InvarCond : State.get(Operands.getOperand(0), Part);
4849     Value *Op0 = State.get(Operands.getOperand(1), Part);
4850     Value *Op1 = State.get(Operands.getOperand(2), Part);
4851     Value *Sel = Builder.CreateSelect(Cond, Op0, Op1);
4852     State.set(VPDef, Sel, Part);
4853     addMetadata(Sel, &I);
4854   }
4855 }
4856 
4857 void LoopVectorizationCostModel::collectLoopScalars(ElementCount VF) {
4858   // We should not collect Scalars more than once per VF. Right now, this
4859   // function is called from collectUniformsAndScalars(), which already does
4860   // this check. Collecting Scalars for VF=1 does not make any sense.
4861   assert(VF.isVector() && Scalars.find(VF) == Scalars.end() &&
4862          "This function should not be visited twice for the same VF");
4863 
4864   SmallSetVector<Instruction *, 8> Worklist;
4865 
4866   // These sets are used to seed the analysis with pointers used by memory
4867   // accesses that will remain scalar.
4868   SmallSetVector<Instruction *, 8> ScalarPtrs;
4869   SmallPtrSet<Instruction *, 8> PossibleNonScalarPtrs;
4870   auto *Latch = TheLoop->getLoopLatch();
4871 
4872   // A helper that returns true if the use of Ptr by MemAccess will be scalar.
4873   // The pointer operands of loads and stores will be scalar as long as the
4874   // memory access is not a gather or scatter operation. The value operand of a
4875   // store will remain scalar if the store is scalarized.
4876   auto isScalarUse = [&](Instruction *MemAccess, Value *Ptr) {
4877     InstWidening WideningDecision = getWideningDecision(MemAccess, VF);
4878     assert(WideningDecision != CM_Unknown &&
4879            "Widening decision should be ready at this moment");
4880     if (auto *Store = dyn_cast<StoreInst>(MemAccess))
4881       if (Ptr == Store->getValueOperand())
4882         return WideningDecision == CM_Scalarize;
4883     assert(Ptr == getLoadStorePointerOperand(MemAccess) &&
4884            "Ptr is neither a value or pointer operand");
4885     return WideningDecision != CM_GatherScatter;
4886   };
4887 
4888   // A helper that returns true if the given value is a bitcast or
4889   // getelementptr instruction contained in the loop.
4890   auto isLoopVaryingBitCastOrGEP = [&](Value *V) {
4891     return ((isa<BitCastInst>(V) && V->getType()->isPointerTy()) ||
4892             isa<GetElementPtrInst>(V)) &&
4893            !TheLoop->isLoopInvariant(V);
4894   };
4895 
4896   auto isScalarPtrInduction = [&](Instruction *MemAccess, Value *Ptr) {
4897     if (!isa<PHINode>(Ptr) ||
4898         !Legal->getInductionVars().count(cast<PHINode>(Ptr)))
4899       return false;
4900     auto &Induction = Legal->getInductionVars()[cast<PHINode>(Ptr)];
4901     if (Induction.getKind() != InductionDescriptor::IK_PtrInduction)
4902       return false;
4903     return isScalarUse(MemAccess, Ptr);
4904   };
4905 
4906   // A helper that evaluates a memory access's use of a pointer. If the
4907   // pointer is actually the pointer induction of a loop, it is being
4908   // inserted into Worklist. If the use will be a scalar use, and the
4909   // pointer is only used by memory accesses, we place the pointer in
4910   // ScalarPtrs. Otherwise, the pointer is placed in PossibleNonScalarPtrs.
4911   auto evaluatePtrUse = [&](Instruction *MemAccess, Value *Ptr) {
4912     if (isScalarPtrInduction(MemAccess, Ptr)) {
4913       Worklist.insert(cast<Instruction>(Ptr));
4914       Instruction *Update = cast<Instruction>(
4915           cast<PHINode>(Ptr)->getIncomingValueForBlock(Latch));
4916       Worklist.insert(Update);
4917       LLVM_DEBUG(dbgs() << "LV: Found new scalar instruction: " << *Ptr
4918                         << "\n");
4919       LLVM_DEBUG(dbgs() << "LV: Found new scalar instruction: " << *Update
4920                         << "\n");
4921       return;
4922     }
4923     // We only care about bitcast and getelementptr instructions contained in
4924     // the loop.
4925     if (!isLoopVaryingBitCastOrGEP(Ptr))
4926       return;
4927 
4928     // If the pointer has already been identified as scalar (e.g., if it was
4929     // also identified as uniform), there's nothing to do.
4930     auto *I = cast<Instruction>(Ptr);
4931     if (Worklist.count(I))
4932       return;
4933 
4934     // If the use of the pointer will be a scalar use, and all users of the
4935     // pointer are memory accesses, place the pointer in ScalarPtrs. Otherwise,
4936     // place the pointer in PossibleNonScalarPtrs.
4937     if (isScalarUse(MemAccess, Ptr) && llvm::all_of(I->users(), [&](User *U) {
4938           return isa<LoadInst>(U) || isa<StoreInst>(U);
4939         }))
4940       ScalarPtrs.insert(I);
4941     else
4942       PossibleNonScalarPtrs.insert(I);
4943   };
4944 
4945   // We seed the scalars analysis with three classes of instructions: (1)
4946   // instructions marked uniform-after-vectorization and (2) bitcast,
4947   // getelementptr and (pointer) phi instructions used by memory accesses
4948   // requiring a scalar use.
4949   //
4950   // (1) Add to the worklist all instructions that have been identified as
4951   // uniform-after-vectorization.
4952   Worklist.insert(Uniforms[VF].begin(), Uniforms[VF].end());
4953 
4954   // (2) Add to the worklist all bitcast and getelementptr instructions used by
4955   // memory accesses requiring a scalar use. The pointer operands of loads and
4956   // stores will be scalar as long as the memory accesses is not a gather or
4957   // scatter operation. The value operand of a store will remain scalar if the
4958   // store is scalarized.
4959   for (auto *BB : TheLoop->blocks())
4960     for (auto &I : *BB) {
4961       if (auto *Load = dyn_cast<LoadInst>(&I)) {
4962         evaluatePtrUse(Load, Load->getPointerOperand());
4963       } else if (auto *Store = dyn_cast<StoreInst>(&I)) {
4964         evaluatePtrUse(Store, Store->getPointerOperand());
4965         evaluatePtrUse(Store, Store->getValueOperand());
4966       }
4967     }
4968   for (auto *I : ScalarPtrs)
4969     if (!PossibleNonScalarPtrs.count(I)) {
4970       LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *I << "\n");
4971       Worklist.insert(I);
4972     }
4973 
4974   // Insert the forced scalars.
4975   // FIXME: Currently widenPHIInstruction() often creates a dead vector
4976   // induction variable when the PHI user is scalarized.
4977   auto ForcedScalar = ForcedScalars.find(VF);
4978   if (ForcedScalar != ForcedScalars.end())
4979     for (auto *I : ForcedScalar->second)
4980       Worklist.insert(I);
4981 
4982   // Expand the worklist by looking through any bitcasts and getelementptr
4983   // instructions we've already identified as scalar. This is similar to the
4984   // expansion step in collectLoopUniforms(); however, here we're only
4985   // expanding to include additional bitcasts and getelementptr instructions.
4986   unsigned Idx = 0;
4987   while (Idx != Worklist.size()) {
4988     Instruction *Dst = Worklist[Idx++];
4989     if (!isLoopVaryingBitCastOrGEP(Dst->getOperand(0)))
4990       continue;
4991     auto *Src = cast<Instruction>(Dst->getOperand(0));
4992     if (llvm::all_of(Src->users(), [&](User *U) -> bool {
4993           auto *J = cast<Instruction>(U);
4994           return !TheLoop->contains(J) || Worklist.count(J) ||
4995                  ((isa<LoadInst>(J) || isa<StoreInst>(J)) &&
4996                   isScalarUse(J, Src));
4997         })) {
4998       Worklist.insert(Src);
4999       LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Src << "\n");
5000     }
5001   }
5002 
5003   // An induction variable will remain scalar if all users of the induction
5004   // variable and induction variable update remain scalar.
5005   for (auto &Induction : Legal->getInductionVars()) {
5006     auto *Ind = Induction.first;
5007     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
5008 
5009     // If tail-folding is applied, the primary induction variable will be used
5010     // to feed a vector compare.
5011     if (Ind == Legal->getPrimaryInduction() && foldTailByMasking())
5012       continue;
5013 
5014     // Determine if all users of the induction variable are scalar after
5015     // vectorization.
5016     auto ScalarInd = llvm::all_of(Ind->users(), [&](User *U) -> bool {
5017       auto *I = cast<Instruction>(U);
5018       return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I);
5019     });
5020     if (!ScalarInd)
5021       continue;
5022 
5023     // Determine if all users of the induction variable update instruction are
5024     // scalar after vectorization.
5025     auto ScalarIndUpdate =
5026         llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
5027           auto *I = cast<Instruction>(U);
5028           return I == Ind || !TheLoop->contains(I) || Worklist.count(I);
5029         });
5030     if (!ScalarIndUpdate)
5031       continue;
5032 
5033     // The induction variable and its update instruction will remain scalar.
5034     Worklist.insert(Ind);
5035     Worklist.insert(IndUpdate);
5036     LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n");
5037     LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate
5038                       << "\n");
5039   }
5040 
5041   Scalars[VF].insert(Worklist.begin(), Worklist.end());
5042 }
5043 
5044 bool LoopVectorizationCostModel::isScalarWithPredication(Instruction *I,
5045                                                          ElementCount VF) {
5046   if (!blockNeedsPredication(I->getParent()))
5047     return false;
5048   switch(I->getOpcode()) {
5049   default:
5050     break;
5051   case Instruction::Load:
5052   case Instruction::Store: {
5053     if (!Legal->isMaskRequired(I))
5054       return false;
5055     auto *Ptr = getLoadStorePointerOperand(I);
5056     auto *Ty = getMemInstValueType(I);
5057     // We have already decided how to vectorize this instruction, get that
5058     // result.
5059     if (VF.isVector()) {
5060       InstWidening WideningDecision = getWideningDecision(I, VF);
5061       assert(WideningDecision != CM_Unknown &&
5062              "Widening decision should be ready at this moment");
5063       return WideningDecision == CM_Scalarize;
5064     }
5065     const Align Alignment = getLoadStoreAlignment(I);
5066     return isa<LoadInst>(I) ? !(isLegalMaskedLoad(Ty, Ptr, Alignment) ||
5067                                 isLegalMaskedGather(Ty, Alignment))
5068                             : !(isLegalMaskedStore(Ty, Ptr, Alignment) ||
5069                                 isLegalMaskedScatter(Ty, Alignment));
5070   }
5071   case Instruction::UDiv:
5072   case Instruction::SDiv:
5073   case Instruction::SRem:
5074   case Instruction::URem:
5075     return mayDivideByZero(*I);
5076   }
5077   return false;
5078 }
5079 
5080 bool LoopVectorizationCostModel::interleavedAccessCanBeWidened(
5081     Instruction *I, ElementCount VF) {
5082   assert(isAccessInterleaved(I) && "Expecting interleaved access.");
5083   assert(getWideningDecision(I, VF) == CM_Unknown &&
5084          "Decision should not be set yet.");
5085   auto *Group = getInterleavedAccessGroup(I);
5086   assert(Group && "Must have a group.");
5087 
5088   // If the instruction's allocated size doesn't equal it's type size, it
5089   // requires padding and will be scalarized.
5090   auto &DL = I->getModule()->getDataLayout();
5091   auto *ScalarTy = getMemInstValueType(I);
5092   if (hasIrregularType(ScalarTy, DL, VF))
5093     return false;
5094 
5095   // Check if masking is required.
5096   // A Group may need masking for one of two reasons: it resides in a block that
5097   // needs predication, or it was decided to use masking to deal with gaps.
5098   bool PredicatedAccessRequiresMasking =
5099       Legal->blockNeedsPredication(I->getParent()) && Legal->isMaskRequired(I);
5100   bool AccessWithGapsRequiresMasking =
5101       Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed();
5102   if (!PredicatedAccessRequiresMasking && !AccessWithGapsRequiresMasking)
5103     return true;
5104 
5105   // If masked interleaving is required, we expect that the user/target had
5106   // enabled it, because otherwise it either wouldn't have been created or
5107   // it should have been invalidated by the CostModel.
5108   assert(useMaskedInterleavedAccesses(TTI) &&
5109          "Masked interleave-groups for predicated accesses are not enabled.");
5110 
5111   auto *Ty = getMemInstValueType(I);
5112   const Align Alignment = getLoadStoreAlignment(I);
5113   return isa<LoadInst>(I) ? TTI.isLegalMaskedLoad(Ty, Alignment)
5114                           : TTI.isLegalMaskedStore(Ty, Alignment);
5115 }
5116 
5117 bool LoopVectorizationCostModel::memoryInstructionCanBeWidened(
5118     Instruction *I, ElementCount VF) {
5119   // Get and ensure we have a valid memory instruction.
5120   LoadInst *LI = dyn_cast<LoadInst>(I);
5121   StoreInst *SI = dyn_cast<StoreInst>(I);
5122   assert((LI || SI) && "Invalid memory instruction");
5123 
5124   auto *Ptr = getLoadStorePointerOperand(I);
5125 
5126   // In order to be widened, the pointer should be consecutive, first of all.
5127   if (!Legal->isConsecutivePtr(Ptr))
5128     return false;
5129 
5130   // If the instruction is a store located in a predicated block, it will be
5131   // scalarized.
5132   if (isScalarWithPredication(I))
5133     return false;
5134 
5135   // If the instruction's allocated size doesn't equal it's type size, it
5136   // requires padding and will be scalarized.
5137   auto &DL = I->getModule()->getDataLayout();
5138   auto *ScalarTy = LI ? LI->getType() : SI->getValueOperand()->getType();
5139   if (hasIrregularType(ScalarTy, DL, VF))
5140     return false;
5141 
5142   return true;
5143 }
5144 
5145 void LoopVectorizationCostModel::collectLoopUniforms(ElementCount VF) {
5146   // We should not collect Uniforms more than once per VF. Right now,
5147   // this function is called from collectUniformsAndScalars(), which
5148   // already does this check. Collecting Uniforms for VF=1 does not make any
5149   // sense.
5150 
5151   assert(VF.isVector() && Uniforms.find(VF) == Uniforms.end() &&
5152          "This function should not be visited twice for the same VF");
5153 
5154   // Visit the list of Uniforms. If we'll not find any uniform value, we'll
5155   // not analyze again.  Uniforms.count(VF) will return 1.
5156   Uniforms[VF].clear();
5157 
5158   // We now know that the loop is vectorizable!
5159   // Collect instructions inside the loop that will remain uniform after
5160   // vectorization.
5161 
5162   // Global values, params and instructions outside of current loop are out of
5163   // scope.
5164   auto isOutOfScope = [&](Value *V) -> bool {
5165     Instruction *I = dyn_cast<Instruction>(V);
5166     return (!I || !TheLoop->contains(I));
5167   };
5168 
5169   SetVector<Instruction *> Worklist;
5170   BasicBlock *Latch = TheLoop->getLoopLatch();
5171 
5172   // Instructions that are scalar with predication must not be considered
5173   // uniform after vectorization, because that would create an erroneous
5174   // replicating region where only a single instance out of VF should be formed.
5175   // TODO: optimize such seldom cases if found important, see PR40816.
5176   auto addToWorklistIfAllowed = [&](Instruction *I) -> void {
5177     if (isOutOfScope(I)) {
5178       LLVM_DEBUG(dbgs() << "LV: Found not uniform due to scope: "
5179                         << *I << "\n");
5180       return;
5181     }
5182     if (isScalarWithPredication(I, VF)) {
5183       LLVM_DEBUG(dbgs() << "LV: Found not uniform being ScalarWithPredication: "
5184                         << *I << "\n");
5185       return;
5186     }
5187     LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *I << "\n");
5188     Worklist.insert(I);
5189   };
5190 
5191   // Start with the conditional branch. If the branch condition is an
5192   // instruction contained in the loop that is only used by the branch, it is
5193   // uniform.
5194   auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0));
5195   if (Cmp && TheLoop->contains(Cmp) && Cmp->hasOneUse())
5196     addToWorklistIfAllowed(Cmp);
5197 
5198   auto isUniformDecision = [&](Instruction *I, ElementCount VF) {
5199     InstWidening WideningDecision = getWideningDecision(I, VF);
5200     assert(WideningDecision != CM_Unknown &&
5201            "Widening decision should be ready at this moment");
5202 
5203     // A uniform memory op is itself uniform.  We exclude uniform stores
5204     // here as they demand the last lane, not the first one.
5205     if (isa<LoadInst>(I) && Legal->isUniformMemOp(*I)) {
5206       assert(WideningDecision == CM_Scalarize);
5207       return true;
5208     }
5209 
5210     return (WideningDecision == CM_Widen ||
5211             WideningDecision == CM_Widen_Reverse ||
5212             WideningDecision == CM_Interleave);
5213   };
5214 
5215 
5216   // Returns true if Ptr is the pointer operand of a memory access instruction
5217   // I, and I is known to not require scalarization.
5218   auto isVectorizedMemAccessUse = [&](Instruction *I, Value *Ptr) -> bool {
5219     return getLoadStorePointerOperand(I) == Ptr && isUniformDecision(I, VF);
5220   };
5221 
5222   // Holds a list of values which are known to have at least one uniform use.
5223   // Note that there may be other uses which aren't uniform.  A "uniform use"
5224   // here is something which only demands lane 0 of the unrolled iterations;
5225   // it does not imply that all lanes produce the same value (e.g. this is not
5226   // the usual meaning of uniform)
5227   SmallPtrSet<Value *, 8> HasUniformUse;
5228 
5229   // Scan the loop for instructions which are either a) known to have only
5230   // lane 0 demanded or b) are uses which demand only lane 0 of their operand.
5231   for (auto *BB : TheLoop->blocks())
5232     for (auto &I : *BB) {
5233       // If there's no pointer operand, there's nothing to do.
5234       auto *Ptr = getLoadStorePointerOperand(&I);
5235       if (!Ptr)
5236         continue;
5237 
5238       // A uniform memory op is itself uniform.  We exclude uniform stores
5239       // here as they demand the last lane, not the first one.
5240       if (isa<LoadInst>(I) && Legal->isUniformMemOp(I))
5241         addToWorklistIfAllowed(&I);
5242 
5243       if (isUniformDecision(&I, VF)) {
5244         assert(isVectorizedMemAccessUse(&I, Ptr) && "consistency check");
5245         HasUniformUse.insert(Ptr);
5246       }
5247     }
5248 
5249   // Add to the worklist any operands which have *only* uniform (e.g. lane 0
5250   // demanding) users.  Since loops are assumed to be in LCSSA form, this
5251   // disallows uses outside the loop as well.
5252   for (auto *V : HasUniformUse) {
5253     if (isOutOfScope(V))
5254       continue;
5255     auto *I = cast<Instruction>(V);
5256     auto UsersAreMemAccesses =
5257       llvm::all_of(I->users(), [&](User *U) -> bool {
5258         return isVectorizedMemAccessUse(cast<Instruction>(U), V);
5259       });
5260     if (UsersAreMemAccesses)
5261       addToWorklistIfAllowed(I);
5262   }
5263 
5264   // Expand Worklist in topological order: whenever a new instruction
5265   // is added , its users should be already inside Worklist.  It ensures
5266   // a uniform instruction will only be used by uniform instructions.
5267   unsigned idx = 0;
5268   while (idx != Worklist.size()) {
5269     Instruction *I = Worklist[idx++];
5270 
5271     for (auto OV : I->operand_values()) {
5272       // isOutOfScope operands cannot be uniform instructions.
5273       if (isOutOfScope(OV))
5274         continue;
5275       // First order recurrence Phi's should typically be considered
5276       // non-uniform.
5277       auto *OP = dyn_cast<PHINode>(OV);
5278       if (OP && Legal->isFirstOrderRecurrence(OP))
5279         continue;
5280       // If all the users of the operand are uniform, then add the
5281       // operand into the uniform worklist.
5282       auto *OI = cast<Instruction>(OV);
5283       if (llvm::all_of(OI->users(), [&](User *U) -> bool {
5284             auto *J = cast<Instruction>(U);
5285             return Worklist.count(J) || isVectorizedMemAccessUse(J, OI);
5286           }))
5287         addToWorklistIfAllowed(OI);
5288     }
5289   }
5290 
5291   // For an instruction to be added into Worklist above, all its users inside
5292   // the loop should also be in Worklist. However, this condition cannot be
5293   // true for phi nodes that form a cyclic dependence. We must process phi
5294   // nodes separately. An induction variable will remain uniform if all users
5295   // of the induction variable and induction variable update remain uniform.
5296   // The code below handles both pointer and non-pointer induction variables.
5297   for (auto &Induction : Legal->getInductionVars()) {
5298     auto *Ind = Induction.first;
5299     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
5300 
5301     // Determine if all users of the induction variable are uniform after
5302     // vectorization.
5303     auto UniformInd = llvm::all_of(Ind->users(), [&](User *U) -> bool {
5304       auto *I = cast<Instruction>(U);
5305       return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) ||
5306              isVectorizedMemAccessUse(I, Ind);
5307     });
5308     if (!UniformInd)
5309       continue;
5310 
5311     // Determine if all users of the induction variable update instruction are
5312     // uniform after vectorization.
5313     auto UniformIndUpdate =
5314         llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
5315           auto *I = cast<Instruction>(U);
5316           return I == Ind || !TheLoop->contains(I) || Worklist.count(I) ||
5317                  isVectorizedMemAccessUse(I, IndUpdate);
5318         });
5319     if (!UniformIndUpdate)
5320       continue;
5321 
5322     // The induction variable and its update instruction will remain uniform.
5323     addToWorklistIfAllowed(Ind);
5324     addToWorklistIfAllowed(IndUpdate);
5325   }
5326 
5327   Uniforms[VF].insert(Worklist.begin(), Worklist.end());
5328 }
5329 
5330 bool LoopVectorizationCostModel::runtimeChecksRequired() {
5331   LLVM_DEBUG(dbgs() << "LV: Performing code size checks.\n");
5332 
5333   if (Legal->getRuntimePointerChecking()->Need) {
5334     reportVectorizationFailure("Runtime ptr check is required with -Os/-Oz",
5335         "runtime pointer checks needed. Enable vectorization of this "
5336         "loop with '#pragma clang loop vectorize(enable)' when "
5337         "compiling with -Os/-Oz",
5338         "CantVersionLoopWithOptForSize", ORE, TheLoop);
5339     return true;
5340   }
5341 
5342   if (!PSE.getUnionPredicate().getPredicates().empty()) {
5343     reportVectorizationFailure("Runtime SCEV check is required with -Os/-Oz",
5344         "runtime SCEV checks needed. Enable vectorization of this "
5345         "loop with '#pragma clang loop vectorize(enable)' when "
5346         "compiling with -Os/-Oz",
5347         "CantVersionLoopWithOptForSize", ORE, TheLoop);
5348     return true;
5349   }
5350 
5351   // FIXME: Avoid specializing for stride==1 instead of bailing out.
5352   if (!Legal->getLAI()->getSymbolicStrides().empty()) {
5353     reportVectorizationFailure("Runtime stride check for small trip count",
5354         "runtime stride == 1 checks needed. Enable vectorization of "
5355         "this loop without such check by compiling with -Os/-Oz",
5356         "CantVersionLoopWithOptForSize", ORE, TheLoop);
5357     return true;
5358   }
5359 
5360   return false;
5361 }
5362 
5363 Optional<ElementCount>
5364 LoopVectorizationCostModel::computeMaxVF(ElementCount UserVF, unsigned UserIC) {
5365   if (Legal->getRuntimePointerChecking()->Need && TTI.hasBranchDivergence()) {
5366     // TODO: It may by useful to do since it's still likely to be dynamically
5367     // uniform if the target can skip.
5368     reportVectorizationFailure(
5369         "Not inserting runtime ptr check for divergent target",
5370         "runtime pointer checks needed. Not enabled for divergent target",
5371         "CantVersionLoopWithDivergentTarget", ORE, TheLoop);
5372     return None;
5373   }
5374 
5375   unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop);
5376   LLVM_DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n');
5377   if (TC == 1) {
5378     reportVectorizationFailure("Single iteration (non) loop",
5379         "loop trip count is one, irrelevant for vectorization",
5380         "SingleIterationLoop", ORE, TheLoop);
5381     return None;
5382   }
5383 
5384   switch (ScalarEpilogueStatus) {
5385   case CM_ScalarEpilogueAllowed:
5386     return computeFeasibleMaxVF(TC, UserVF);
5387   case CM_ScalarEpilogueNotAllowedUsePredicate:
5388     LLVM_FALLTHROUGH;
5389   case CM_ScalarEpilogueNotNeededUsePredicate:
5390     LLVM_DEBUG(
5391         dbgs() << "LV: vector predicate hint/switch found.\n"
5392                << "LV: Not allowing scalar epilogue, creating predicated "
5393                << "vector loop.\n");
5394     break;
5395   case CM_ScalarEpilogueNotAllowedLowTripLoop:
5396     // fallthrough as a special case of OptForSize
5397   case CM_ScalarEpilogueNotAllowedOptSize:
5398     if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedOptSize)
5399       LLVM_DEBUG(
5400           dbgs() << "LV: Not allowing scalar epilogue due to -Os/-Oz.\n");
5401     else
5402       LLVM_DEBUG(dbgs() << "LV: Not allowing scalar epilogue due to low trip "
5403                         << "count.\n");
5404 
5405     // Bail if runtime checks are required, which are not good when optimising
5406     // for size.
5407     if (runtimeChecksRequired())
5408       return None;
5409 
5410     break;
5411   }
5412 
5413   // The only loops we can vectorize without a scalar epilogue, are loops with
5414   // a bottom-test and a single exiting block. We'd have to handle the fact
5415   // that not every instruction executes on the last iteration.  This will
5416   // require a lane mask which varies through the vector loop body.  (TODO)
5417   if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) {
5418     // If there was a tail-folding hint/switch, but we can't fold the tail by
5419     // masking, fallback to a vectorization with a scalar epilogue.
5420     if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) {
5421       LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a "
5422                            "scalar epilogue instead.\n");
5423       ScalarEpilogueStatus = CM_ScalarEpilogueAllowed;
5424       return computeFeasibleMaxVF(TC, UserVF);
5425     }
5426     return None;
5427   }
5428 
5429   // Now try the tail folding
5430 
5431   // Invalidate interleave groups that require an epilogue if we can't mask
5432   // the interleave-group.
5433   if (!useMaskedInterleavedAccesses(TTI)) {
5434     assert(WideningDecisions.empty() && Uniforms.empty() && Scalars.empty() &&
5435            "No decisions should have been taken at this point");
5436     // Note: There is no need to invalidate any cost modeling decisions here, as
5437     // non where taken so far.
5438     InterleaveInfo.invalidateGroupsRequiringScalarEpilogue();
5439   }
5440 
5441   ElementCount MaxVF = computeFeasibleMaxVF(TC, UserVF);
5442   assert(!MaxVF.isScalable() &&
5443          "Scalable vectors do not yet support tail folding");
5444   assert((UserVF.isNonZero() || isPowerOf2_32(MaxVF.getFixedValue())) &&
5445          "MaxVF must be a power of 2");
5446   unsigned MaxVFtimesIC =
5447       UserIC ? MaxVF.getFixedValue() * UserIC : MaxVF.getFixedValue();
5448   // Avoid tail folding if the trip count is known to be a multiple of any VF we
5449   // chose.
5450   ScalarEvolution *SE = PSE.getSE();
5451   const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount();
5452   const SCEV *ExitCount = SE->getAddExpr(
5453       BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType()));
5454   const SCEV *Rem = SE->getURemExpr(
5455       SE->applyLoopGuards(ExitCount, TheLoop),
5456       SE->getConstant(BackedgeTakenCount->getType(), MaxVFtimesIC));
5457   if (Rem->isZero()) {
5458     // Accept MaxVF if we do not have a tail.
5459     LLVM_DEBUG(dbgs() << "LV: No tail will remain for any chosen VF.\n");
5460     return MaxVF;
5461   }
5462 
5463   // If we don't know the precise trip count, or if the trip count that we
5464   // found modulo the vectorization factor is not zero, try to fold the tail
5465   // by masking.
5466   // FIXME: look for a smaller MaxVF that does divide TC rather than masking.
5467   if (Legal->prepareToFoldTailByMasking()) {
5468     FoldTailByMasking = true;
5469     return MaxVF;
5470   }
5471 
5472   // If there was a tail-folding hint/switch, but we can't fold the tail by
5473   // masking, fallback to a vectorization with a scalar epilogue.
5474   if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) {
5475     LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a "
5476                          "scalar epilogue instead.\n");
5477     ScalarEpilogueStatus = CM_ScalarEpilogueAllowed;
5478     return MaxVF;
5479   }
5480 
5481   if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedUsePredicate) {
5482     LLVM_DEBUG(dbgs() << "LV: Can't fold tail by masking: don't vectorize\n");
5483     return None;
5484   }
5485 
5486   if (TC == 0) {
5487     reportVectorizationFailure(
5488         "Unable to calculate the loop count due to complex control flow",
5489         "unable to calculate the loop count due to complex control flow",
5490         "UnknownLoopCountComplexCFG", ORE, TheLoop);
5491     return None;
5492   }
5493 
5494   reportVectorizationFailure(
5495       "Cannot optimize for size and vectorize at the same time.",
5496       "cannot optimize for size and vectorize at the same time. "
5497       "Enable vectorization of this loop with '#pragma clang loop "
5498       "vectorize(enable)' when compiling with -Os/-Oz",
5499       "NoTailLoopWithOptForSize", ORE, TheLoop);
5500   return None;
5501 }
5502 
5503 ElementCount
5504 LoopVectorizationCostModel::computeFeasibleMaxVF(unsigned ConstTripCount,
5505                                                  ElementCount UserVF) {
5506   bool IgnoreScalableUserVF = UserVF.isScalable() &&
5507                               !TTI.supportsScalableVectors() &&
5508                               !ForceTargetSupportsScalableVectors;
5509   if (IgnoreScalableUserVF) {
5510     LLVM_DEBUG(
5511         dbgs() << "LV: Ignoring VF=" << UserVF
5512                << " because target does not support scalable vectors.\n");
5513     ORE->emit([&]() {
5514       return OptimizationRemarkAnalysis(DEBUG_TYPE, "IgnoreScalableUserVF",
5515                                         TheLoop->getStartLoc(),
5516                                         TheLoop->getHeader())
5517              << "Ignoring VF=" << ore::NV("UserVF", UserVF)
5518              << " because target does not support scalable vectors.";
5519     });
5520   }
5521 
5522   // Beyond this point two scenarios are handled. If UserVF isn't specified
5523   // then a suitable VF is chosen. If UserVF is specified and there are
5524   // dependencies, check if it's legal. However, if a UserVF is specified and
5525   // there are no dependencies, then there's nothing to do.
5526   if (UserVF.isNonZero() && !IgnoreScalableUserVF) {
5527     if (!canVectorizeReductions(UserVF)) {
5528       reportVectorizationFailure(
5529           "LV: Scalable vectorization not supported for the reduction "
5530           "operations found in this loop. Using fixed-width "
5531           "vectorization instead.",
5532           "Scalable vectorization not supported for the reduction operations "
5533           "found in this loop. Using fixed-width vectorization instead.",
5534           "ScalableVFUnfeasible", ORE, TheLoop);
5535       return computeFeasibleMaxVF(
5536           ConstTripCount, ElementCount::getFixed(UserVF.getKnownMinValue()));
5537     }
5538 
5539     if (Legal->isSafeForAnyVectorWidth())
5540       return UserVF;
5541   }
5542 
5543   MinBWs = computeMinimumValueSizes(TheLoop->getBlocks(), *DB, &TTI);
5544   unsigned SmallestType, WidestType;
5545   std::tie(SmallestType, WidestType) = getSmallestAndWidestTypes();
5546   unsigned WidestRegister = TTI.getRegisterBitWidth(true);
5547 
5548   // Get the maximum safe dependence distance in bits computed by LAA.
5549   // It is computed by MaxVF * sizeOf(type) * 8, where type is taken from
5550   // the memory accesses that is most restrictive (involved in the smallest
5551   // dependence distance).
5552   unsigned MaxSafeVectorWidthInBits = Legal->getMaxSafeVectorWidthInBits();
5553 
5554   // If the user vectorization factor is legally unsafe, clamp it to a safe
5555   // value. Otherwise, return as is.
5556   if (UserVF.isNonZero() && !IgnoreScalableUserVF) {
5557     unsigned MaxSafeElements =
5558         PowerOf2Floor(MaxSafeVectorWidthInBits / WidestType);
5559     ElementCount MaxSafeVF = ElementCount::getFixed(MaxSafeElements);
5560 
5561     if (UserVF.isScalable()) {
5562       Optional<unsigned> MaxVScale = TTI.getMaxVScale();
5563 
5564       // Scale VF by vscale before checking if it's safe.
5565       MaxSafeVF = ElementCount::getScalable(
5566           MaxVScale ? (MaxSafeElements / MaxVScale.getValue()) : 0);
5567 
5568       if (MaxSafeVF.isZero()) {
5569         // The dependence distance is too small to use scalable vectors,
5570         // fallback on fixed.
5571         LLVM_DEBUG(
5572             dbgs()
5573             << "LV: Max legal vector width too small, scalable vectorization "
5574                "unfeasible. Using fixed-width vectorization instead.\n");
5575         ORE->emit([&]() {
5576           return OptimizationRemarkAnalysis(DEBUG_TYPE, "ScalableVFUnfeasible",
5577                                             TheLoop->getStartLoc(),
5578                                             TheLoop->getHeader())
5579                  << "Max legal vector width too small, scalable vectorization "
5580                  << "unfeasible. Using fixed-width vectorization instead.";
5581         });
5582         return computeFeasibleMaxVF(
5583             ConstTripCount, ElementCount::getFixed(UserVF.getKnownMinValue()));
5584       }
5585     }
5586 
5587     LLVM_DEBUG(dbgs() << "LV: The max safe VF is: " << MaxSafeVF << ".\n");
5588 
5589     if (ElementCount::isKnownLE(UserVF, MaxSafeVF))
5590       return UserVF;
5591 
5592     LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF
5593                       << " is unsafe, clamping to max safe VF=" << MaxSafeVF
5594                       << ".\n");
5595     ORE->emit([&]() {
5596       return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor",
5597                                         TheLoop->getStartLoc(),
5598                                         TheLoop->getHeader())
5599              << "User-specified vectorization factor "
5600              << ore::NV("UserVectorizationFactor", UserVF)
5601              << " is unsafe, clamping to maximum safe vectorization factor "
5602              << ore::NV("VectorizationFactor", MaxSafeVF);
5603     });
5604     return MaxSafeVF;
5605   }
5606 
5607   WidestRegister = std::min(WidestRegister, MaxSafeVectorWidthInBits);
5608 
5609   // Ensure MaxVF is a power of 2; the dependence distance bound may not be.
5610   // Note that both WidestRegister and WidestType may not be a powers of 2.
5611   auto MaxVectorSize =
5612       ElementCount::getFixed(PowerOf2Floor(WidestRegister / WidestType));
5613 
5614   LLVM_DEBUG(dbgs() << "LV: The Smallest and Widest types: " << SmallestType
5615                     << " / " << WidestType << " bits.\n");
5616   LLVM_DEBUG(dbgs() << "LV: The Widest register safe to use is: "
5617                     << WidestRegister << " bits.\n");
5618 
5619   assert(MaxVectorSize.getFixedValue() <= WidestRegister &&
5620          "Did not expect to pack so many elements"
5621          " into one vector!");
5622   if (MaxVectorSize.getFixedValue() == 0) {
5623     LLVM_DEBUG(dbgs() << "LV: The target has no vector registers.\n");
5624     return ElementCount::getFixed(1);
5625   } else if (ConstTripCount && ConstTripCount < MaxVectorSize.getFixedValue() &&
5626              isPowerOf2_32(ConstTripCount)) {
5627     // We need to clamp the VF to be the ConstTripCount. There is no point in
5628     // choosing a higher viable VF as done in the loop below.
5629     LLVM_DEBUG(dbgs() << "LV: Clamping the MaxVF to the constant trip count: "
5630                       << ConstTripCount << "\n");
5631     return ElementCount::getFixed(ConstTripCount);
5632   }
5633 
5634   ElementCount MaxVF = MaxVectorSize;
5635   if (TTI.shouldMaximizeVectorBandwidth(!isScalarEpilogueAllowed()) ||
5636       (MaximizeBandwidth && isScalarEpilogueAllowed())) {
5637     // Collect all viable vectorization factors larger than the default MaxVF
5638     // (i.e. MaxVectorSize).
5639     SmallVector<ElementCount, 8> VFs;
5640     auto MaxVectorSizeMaxBW =
5641         ElementCount::getFixed(WidestRegister / SmallestType);
5642     for (ElementCount VS = MaxVectorSize * 2;
5643          ElementCount::isKnownLE(VS, MaxVectorSizeMaxBW); VS *= 2)
5644       VFs.push_back(VS);
5645 
5646     // For each VF calculate its register usage.
5647     auto RUs = calculateRegisterUsage(VFs);
5648 
5649     // Select the largest VF which doesn't require more registers than existing
5650     // ones.
5651     for (int i = RUs.size() - 1; i >= 0; --i) {
5652       bool Selected = true;
5653       for (auto &pair : RUs[i].MaxLocalUsers) {
5654         unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first);
5655         if (pair.second > TargetNumRegisters)
5656           Selected = false;
5657       }
5658       if (Selected) {
5659         MaxVF = VFs[i];
5660         break;
5661       }
5662     }
5663     if (ElementCount MinVF =
5664             TTI.getMinimumVF(SmallestType, /*IsScalable=*/false)) {
5665       if (ElementCount::isKnownLT(MaxVF, MinVF)) {
5666         LLVM_DEBUG(dbgs() << "LV: Overriding calculated MaxVF(" << MaxVF
5667                           << ") with target's minimum: " << MinVF << '\n');
5668         MaxVF = MinVF;
5669       }
5670     }
5671   }
5672   return MaxVF;
5673 }
5674 
5675 VectorizationFactor
5676 LoopVectorizationCostModel::selectVectorizationFactor(ElementCount MaxVF) {
5677   // FIXME: This can be fixed for scalable vectors later, because at this stage
5678   // the LoopVectorizer will only consider vectorizing a loop with scalable
5679   // vectors when the loop has a hint to enable vectorization for a given VF.
5680   assert(!MaxVF.isScalable() && "scalable vectors not yet supported");
5681 
5682   InstructionCost ExpectedCost = expectedCost(ElementCount::getFixed(1)).first;
5683   LLVM_DEBUG(dbgs() << "LV: Scalar loop costs: " << ExpectedCost << ".\n");
5684   assert(ExpectedCost.isValid() && "Unexpected invalid cost for scalar loop");
5685 
5686   auto Width = ElementCount::getFixed(1);
5687   const float ScalarCost = *ExpectedCost.getValue();
5688   float Cost = ScalarCost;
5689 
5690   bool ForceVectorization = Hints->getForce() == LoopVectorizeHints::FK_Enabled;
5691   if (ForceVectorization && MaxVF.isVector()) {
5692     // Ignore scalar width, because the user explicitly wants vectorization.
5693     // Initialize cost to max so that VF = 2 is, at least, chosen during cost
5694     // evaluation.
5695     Cost = std::numeric_limits<float>::max();
5696   }
5697 
5698   for (auto i = ElementCount::getFixed(2); ElementCount::isKnownLE(i, MaxVF);
5699        i *= 2) {
5700     // Notice that the vector loop needs to be executed less times, so
5701     // we need to divide the cost of the vector loops by the width of
5702     // the vector elements.
5703     VectorizationCostTy C = expectedCost(i);
5704     assert(C.first.isValid() && "Unexpected invalid cost for vector loop");
5705     float VectorCost = *C.first.getValue() / (float)i.getFixedValue();
5706     LLVM_DEBUG(dbgs() << "LV: Vector loop of width " << i
5707                       << " costs: " << (int)VectorCost << ".\n");
5708     if (!C.second && !ForceVectorization) {
5709       LLVM_DEBUG(
5710           dbgs() << "LV: Not considering vector loop of width " << i
5711                  << " because it will not generate any vector instructions.\n");
5712       continue;
5713     }
5714 
5715     // If profitable add it to ProfitableVF list.
5716     if (VectorCost < ScalarCost) {
5717       ProfitableVFs.push_back(VectorizationFactor(
5718           {i, (unsigned)VectorCost}));
5719     }
5720 
5721     if (VectorCost < Cost) {
5722       Cost = VectorCost;
5723       Width = i;
5724     }
5725   }
5726 
5727   if (!EnableCondStoresVectorization && NumPredStores) {
5728     reportVectorizationFailure("There are conditional stores.",
5729         "store that is conditionally executed prevents vectorization",
5730         "ConditionalStore", ORE, TheLoop);
5731     Width = ElementCount::getFixed(1);
5732     Cost = ScalarCost;
5733   }
5734 
5735   LLVM_DEBUG(if (ForceVectorization && !Width.isScalar() && Cost >= ScalarCost) dbgs()
5736              << "LV: Vectorization seems to be not beneficial, "
5737              << "but was forced by a user.\n");
5738   LLVM_DEBUG(dbgs() << "LV: Selecting VF: " << Width << ".\n");
5739   VectorizationFactor Factor = {Width,
5740                                 (unsigned)(Width.getKnownMinValue() * Cost)};
5741   return Factor;
5742 }
5743 
5744 bool LoopVectorizationCostModel::isCandidateForEpilogueVectorization(
5745     const Loop &L, ElementCount VF) const {
5746   // Cross iteration phis such as reductions need special handling and are
5747   // currently unsupported.
5748   if (any_of(L.getHeader()->phis(), [&](PHINode &Phi) {
5749         return Legal->isFirstOrderRecurrence(&Phi) ||
5750                Legal->isReductionVariable(&Phi);
5751       }))
5752     return false;
5753 
5754   // Phis with uses outside of the loop require special handling and are
5755   // currently unsupported.
5756   for (auto &Entry : Legal->getInductionVars()) {
5757     // Look for uses of the value of the induction at the last iteration.
5758     Value *PostInc = Entry.first->getIncomingValueForBlock(L.getLoopLatch());
5759     for (User *U : PostInc->users())
5760       if (!L.contains(cast<Instruction>(U)))
5761         return false;
5762     // Look for uses of penultimate value of the induction.
5763     for (User *U : Entry.first->users())
5764       if (!L.contains(cast<Instruction>(U)))
5765         return false;
5766   }
5767 
5768   // Induction variables that are widened require special handling that is
5769   // currently not supported.
5770   if (any_of(Legal->getInductionVars(), [&](auto &Entry) {
5771         return !(this->isScalarAfterVectorization(Entry.first, VF) ||
5772                  this->isProfitableToScalarize(Entry.first, VF));
5773       }))
5774     return false;
5775 
5776   return true;
5777 }
5778 
5779 bool LoopVectorizationCostModel::isEpilogueVectorizationProfitable(
5780     const ElementCount VF) const {
5781   // FIXME: We need a much better cost-model to take different parameters such
5782   // as register pressure, code size increase and cost of extra branches into
5783   // account. For now we apply a very crude heuristic and only consider loops
5784   // with vectorization factors larger than a certain value.
5785   // We also consider epilogue vectorization unprofitable for targets that don't
5786   // consider interleaving beneficial (eg. MVE).
5787   if (TTI.getMaxInterleaveFactor(VF.getKnownMinValue()) <= 1)
5788     return false;
5789   if (VF.getFixedValue() >= EpilogueVectorizationMinVF)
5790     return true;
5791   return false;
5792 }
5793 
5794 VectorizationFactor
5795 LoopVectorizationCostModel::selectEpilogueVectorizationFactor(
5796     const ElementCount MainLoopVF, const LoopVectorizationPlanner &LVP) {
5797   VectorizationFactor Result = VectorizationFactor::Disabled();
5798   if (!EnableEpilogueVectorization) {
5799     LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization is disabled.\n";);
5800     return Result;
5801   }
5802 
5803   if (!isScalarEpilogueAllowed()) {
5804     LLVM_DEBUG(
5805         dbgs() << "LEV: Unable to vectorize epilogue because no epilogue is "
5806                   "allowed.\n";);
5807     return Result;
5808   }
5809 
5810   // FIXME: This can be fixed for scalable vectors later, because at this stage
5811   // the LoopVectorizer will only consider vectorizing a loop with scalable
5812   // vectors when the loop has a hint to enable vectorization for a given VF.
5813   if (MainLoopVF.isScalable()) {
5814     LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization for scalable vectors not "
5815                          "yet supported.\n");
5816     return Result;
5817   }
5818 
5819   // Not really a cost consideration, but check for unsupported cases here to
5820   // simplify the logic.
5821   if (!isCandidateForEpilogueVectorization(*TheLoop, MainLoopVF)) {
5822     LLVM_DEBUG(
5823         dbgs() << "LEV: Unable to vectorize epilogue because the loop is "
5824                   "not a supported candidate.\n";);
5825     return Result;
5826   }
5827 
5828   if (EpilogueVectorizationForceVF > 1) {
5829     LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization factor is forced.\n";);
5830     if (LVP.hasPlanWithVFs(
5831             {MainLoopVF, ElementCount::getFixed(EpilogueVectorizationForceVF)}))
5832       return {ElementCount::getFixed(EpilogueVectorizationForceVF), 0};
5833     else {
5834       LLVM_DEBUG(
5835           dbgs()
5836               << "LEV: Epilogue vectorization forced factor is not viable.\n";);
5837       return Result;
5838     }
5839   }
5840 
5841   if (TheLoop->getHeader()->getParent()->hasOptSize() ||
5842       TheLoop->getHeader()->getParent()->hasMinSize()) {
5843     LLVM_DEBUG(
5844         dbgs()
5845             << "LEV: Epilogue vectorization skipped due to opt for size.\n";);
5846     return Result;
5847   }
5848 
5849   if (!isEpilogueVectorizationProfitable(MainLoopVF))
5850     return Result;
5851 
5852   for (auto &NextVF : ProfitableVFs)
5853     if (ElementCount::isKnownLT(NextVF.Width, MainLoopVF) &&
5854         (Result.Width.getFixedValue() == 1 || NextVF.Cost < Result.Cost) &&
5855         LVP.hasPlanWithVFs({MainLoopVF, NextVF.Width}))
5856       Result = NextVF;
5857 
5858   if (Result != VectorizationFactor::Disabled())
5859     LLVM_DEBUG(dbgs() << "LEV: Vectorizing epilogue loop with VF = "
5860                       << Result.Width.getFixedValue() << "\n";);
5861   return Result;
5862 }
5863 
5864 std::pair<unsigned, unsigned>
5865 LoopVectorizationCostModel::getSmallestAndWidestTypes() {
5866   unsigned MinWidth = -1U;
5867   unsigned MaxWidth = 8;
5868   const DataLayout &DL = TheFunction->getParent()->getDataLayout();
5869 
5870   // For each block.
5871   for (BasicBlock *BB : TheLoop->blocks()) {
5872     // For each instruction in the loop.
5873     for (Instruction &I : BB->instructionsWithoutDebug()) {
5874       Type *T = I.getType();
5875 
5876       // Skip ignored values.
5877       if (ValuesToIgnore.count(&I))
5878         continue;
5879 
5880       // Only examine Loads, Stores and PHINodes.
5881       if (!isa<LoadInst>(I) && !isa<StoreInst>(I) && !isa<PHINode>(I))
5882         continue;
5883 
5884       // Examine PHI nodes that are reduction variables. Update the type to
5885       // account for the recurrence type.
5886       if (auto *PN = dyn_cast<PHINode>(&I)) {
5887         if (!Legal->isReductionVariable(PN))
5888           continue;
5889         RecurrenceDescriptor RdxDesc = Legal->getReductionVars()[PN];
5890         if (PreferInLoopReductions ||
5891             TTI.preferInLoopReduction(RdxDesc.getOpcode(),
5892                                       RdxDesc.getRecurrenceType(),
5893                                       TargetTransformInfo::ReductionFlags()))
5894           continue;
5895         T = RdxDesc.getRecurrenceType();
5896       }
5897 
5898       // Examine the stored values.
5899       if (auto *ST = dyn_cast<StoreInst>(&I))
5900         T = ST->getValueOperand()->getType();
5901 
5902       // Ignore loaded pointer types and stored pointer types that are not
5903       // vectorizable.
5904       //
5905       // FIXME: The check here attempts to predict whether a load or store will
5906       //        be vectorized. We only know this for certain after a VF has
5907       //        been selected. Here, we assume that if an access can be
5908       //        vectorized, it will be. We should also look at extending this
5909       //        optimization to non-pointer types.
5910       //
5911       if (T->isPointerTy() && !isConsecutiveLoadOrStore(&I) &&
5912           !isAccessInterleaved(&I) && !isLegalGatherOrScatter(&I))
5913         continue;
5914 
5915       MinWidth = std::min(MinWidth,
5916                           (unsigned)DL.getTypeSizeInBits(T->getScalarType()));
5917       MaxWidth = std::max(MaxWidth,
5918                           (unsigned)DL.getTypeSizeInBits(T->getScalarType()));
5919     }
5920   }
5921 
5922   return {MinWidth, MaxWidth};
5923 }
5924 
5925 unsigned LoopVectorizationCostModel::selectInterleaveCount(ElementCount VF,
5926                                                            unsigned LoopCost) {
5927   // -- The interleave heuristics --
5928   // We interleave the loop in order to expose ILP and reduce the loop overhead.
5929   // There are many micro-architectural considerations that we can't predict
5930   // at this level. For example, frontend pressure (on decode or fetch) due to
5931   // code size, or the number and capabilities of the execution ports.
5932   //
5933   // We use the following heuristics to select the interleave count:
5934   // 1. If the code has reductions, then we interleave to break the cross
5935   // iteration dependency.
5936   // 2. If the loop is really small, then we interleave to reduce the loop
5937   // overhead.
5938   // 3. We don't interleave if we think that we will spill registers to memory
5939   // due to the increased register pressure.
5940 
5941   if (!isScalarEpilogueAllowed())
5942     return 1;
5943 
5944   // We used the distance for the interleave count.
5945   if (Legal->getMaxSafeDepDistBytes() != -1U)
5946     return 1;
5947 
5948   auto BestKnownTC = getSmallBestKnownTC(*PSE.getSE(), TheLoop);
5949   const bool HasReductions = !Legal->getReductionVars().empty();
5950   // Do not interleave loops with a relatively small known or estimated trip
5951   // count. But we will interleave when InterleaveSmallLoopScalarReduction is
5952   // enabled, and the code has scalar reductions(HasReductions && VF = 1),
5953   // because with the above conditions interleaving can expose ILP and break
5954   // cross iteration dependences for reductions.
5955   if (BestKnownTC && (*BestKnownTC < TinyTripCountInterleaveThreshold) &&
5956       !(InterleaveSmallLoopScalarReduction && HasReductions && VF.isScalar()))
5957     return 1;
5958 
5959   RegisterUsage R = calculateRegisterUsage({VF})[0];
5960   // We divide by these constants so assume that we have at least one
5961   // instruction that uses at least one register.
5962   for (auto& pair : R.MaxLocalUsers) {
5963     pair.second = std::max(pair.second, 1U);
5964   }
5965 
5966   // We calculate the interleave count using the following formula.
5967   // Subtract the number of loop invariants from the number of available
5968   // registers. These registers are used by all of the interleaved instances.
5969   // Next, divide the remaining registers by the number of registers that is
5970   // required by the loop, in order to estimate how many parallel instances
5971   // fit without causing spills. All of this is rounded down if necessary to be
5972   // a power of two. We want power of two interleave count to simplify any
5973   // addressing operations or alignment considerations.
5974   // We also want power of two interleave counts to ensure that the induction
5975   // variable of the vector loop wraps to zero, when tail is folded by masking;
5976   // this currently happens when OptForSize, in which case IC is set to 1 above.
5977   unsigned IC = UINT_MAX;
5978 
5979   for (auto& pair : R.MaxLocalUsers) {
5980     unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first);
5981     LLVM_DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters
5982                       << " registers of "
5983                       << TTI.getRegisterClassName(pair.first) << " register class\n");
5984     if (VF.isScalar()) {
5985       if (ForceTargetNumScalarRegs.getNumOccurrences() > 0)
5986         TargetNumRegisters = ForceTargetNumScalarRegs;
5987     } else {
5988       if (ForceTargetNumVectorRegs.getNumOccurrences() > 0)
5989         TargetNumRegisters = ForceTargetNumVectorRegs;
5990     }
5991     unsigned MaxLocalUsers = pair.second;
5992     unsigned LoopInvariantRegs = 0;
5993     if (R.LoopInvariantRegs.find(pair.first) != R.LoopInvariantRegs.end())
5994       LoopInvariantRegs = R.LoopInvariantRegs[pair.first];
5995 
5996     unsigned TmpIC = PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs) / MaxLocalUsers);
5997     // Don't count the induction variable as interleaved.
5998     if (EnableIndVarRegisterHeur) {
5999       TmpIC =
6000           PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs - 1) /
6001                         std::max(1U, (MaxLocalUsers - 1)));
6002     }
6003 
6004     IC = std::min(IC, TmpIC);
6005   }
6006 
6007   // Clamp the interleave ranges to reasonable counts.
6008   unsigned MaxInterleaveCount =
6009       TTI.getMaxInterleaveFactor(VF.getKnownMinValue());
6010 
6011   // Check if the user has overridden the max.
6012   if (VF.isScalar()) {
6013     if (ForceTargetMaxScalarInterleaveFactor.getNumOccurrences() > 0)
6014       MaxInterleaveCount = ForceTargetMaxScalarInterleaveFactor;
6015   } else {
6016     if (ForceTargetMaxVectorInterleaveFactor.getNumOccurrences() > 0)
6017       MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor;
6018   }
6019 
6020   // If trip count is known or estimated compile time constant, limit the
6021   // interleave count to be less than the trip count divided by VF, provided it
6022   // is at least 1.
6023   //
6024   // For scalable vectors we can't know if interleaving is beneficial. It may
6025   // not be beneficial for small loops if none of the lanes in the second vector
6026   // iterations is enabled. However, for larger loops, there is likely to be a
6027   // similar benefit as for fixed-width vectors. For now, we choose to leave
6028   // the InterleaveCount as if vscale is '1', although if some information about
6029   // the vector is known (e.g. min vector size), we can make a better decision.
6030   if (BestKnownTC) {
6031     MaxInterleaveCount =
6032         std::min(*BestKnownTC / VF.getKnownMinValue(), MaxInterleaveCount);
6033     // Make sure MaxInterleaveCount is greater than 0.
6034     MaxInterleaveCount = std::max(1u, MaxInterleaveCount);
6035   }
6036 
6037   assert(MaxInterleaveCount > 0 &&
6038          "Maximum interleave count must be greater than 0");
6039 
6040   // Clamp the calculated IC to be between the 1 and the max interleave count
6041   // that the target and trip count allows.
6042   if (IC > MaxInterleaveCount)
6043     IC = MaxInterleaveCount;
6044   else
6045     // Make sure IC is greater than 0.
6046     IC = std::max(1u, IC);
6047 
6048   assert(IC > 0 && "Interleave count must be greater than 0.");
6049 
6050   // If we did not calculate the cost for VF (because the user selected the VF)
6051   // then we calculate the cost of VF here.
6052   if (LoopCost == 0) {
6053     assert(expectedCost(VF).first.isValid() && "Expected a valid cost");
6054     LoopCost = *expectedCost(VF).first.getValue();
6055   }
6056 
6057   assert(LoopCost && "Non-zero loop cost expected");
6058 
6059   // Interleave if we vectorized this loop and there is a reduction that could
6060   // benefit from interleaving.
6061   if (VF.isVector() && HasReductions) {
6062     LLVM_DEBUG(dbgs() << "LV: Interleaving because of reductions.\n");
6063     return IC;
6064   }
6065 
6066   // Note that if we've already vectorized the loop we will have done the
6067   // runtime check and so interleaving won't require further checks.
6068   bool InterleavingRequiresRuntimePointerCheck =
6069       (VF.isScalar() && Legal->getRuntimePointerChecking()->Need);
6070 
6071   // We want to interleave small loops in order to reduce the loop overhead and
6072   // potentially expose ILP opportunities.
6073   LLVM_DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n'
6074                     << "LV: IC is " << IC << '\n'
6075                     << "LV: VF is " << VF << '\n');
6076   const bool AggressivelyInterleaveReductions =
6077       TTI.enableAggressiveInterleaving(HasReductions);
6078   if (!InterleavingRequiresRuntimePointerCheck && LoopCost < SmallLoopCost) {
6079     // We assume that the cost overhead is 1 and we use the cost model
6080     // to estimate the cost of the loop and interleave until the cost of the
6081     // loop overhead is about 5% of the cost of the loop.
6082     unsigned SmallIC =
6083         std::min(IC, (unsigned)PowerOf2Floor(SmallLoopCost / LoopCost));
6084 
6085     // Interleave until store/load ports (estimated by max interleave count) are
6086     // saturated.
6087     unsigned NumStores = Legal->getNumStores();
6088     unsigned NumLoads = Legal->getNumLoads();
6089     unsigned StoresIC = IC / (NumStores ? NumStores : 1);
6090     unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1);
6091 
6092     // If we have a scalar reduction (vector reductions are already dealt with
6093     // by this point), we can increase the critical path length if the loop
6094     // we're interleaving is inside another loop. Limit, by default to 2, so the
6095     // critical path only gets increased by one reduction operation.
6096     if (HasReductions && TheLoop->getLoopDepth() > 1) {
6097       unsigned F = static_cast<unsigned>(MaxNestedScalarReductionIC);
6098       SmallIC = std::min(SmallIC, F);
6099       StoresIC = std::min(StoresIC, F);
6100       LoadsIC = std::min(LoadsIC, F);
6101     }
6102 
6103     if (EnableLoadStoreRuntimeInterleave &&
6104         std::max(StoresIC, LoadsIC) > SmallIC) {
6105       LLVM_DEBUG(
6106           dbgs() << "LV: Interleaving to saturate store or load ports.\n");
6107       return std::max(StoresIC, LoadsIC);
6108     }
6109 
6110     // If there are scalar reductions and TTI has enabled aggressive
6111     // interleaving for reductions, we will interleave to expose ILP.
6112     if (InterleaveSmallLoopScalarReduction && VF.isScalar() &&
6113         AggressivelyInterleaveReductions) {
6114       LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n");
6115       // Interleave no less than SmallIC but not as aggressive as the normal IC
6116       // to satisfy the rare situation when resources are too limited.
6117       return std::max(IC / 2, SmallIC);
6118     } else {
6119       LLVM_DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n");
6120       return SmallIC;
6121     }
6122   }
6123 
6124   // Interleave if this is a large loop (small loops are already dealt with by
6125   // this point) that could benefit from interleaving.
6126   if (AggressivelyInterleaveReductions) {
6127     LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n");
6128     return IC;
6129   }
6130 
6131   LLVM_DEBUG(dbgs() << "LV: Not Interleaving.\n");
6132   return 1;
6133 }
6134 
6135 SmallVector<LoopVectorizationCostModel::RegisterUsage, 8>
6136 LoopVectorizationCostModel::calculateRegisterUsage(ArrayRef<ElementCount> VFs) {
6137   // This function calculates the register usage by measuring the highest number
6138   // of values that are alive at a single location. Obviously, this is a very
6139   // rough estimation. We scan the loop in a topological order in order and
6140   // assign a number to each instruction. We use RPO to ensure that defs are
6141   // met before their users. We assume that each instruction that has in-loop
6142   // users starts an interval. We record every time that an in-loop value is
6143   // used, so we have a list of the first and last occurrences of each
6144   // instruction. Next, we transpose this data structure into a multi map that
6145   // holds the list of intervals that *end* at a specific location. This multi
6146   // map allows us to perform a linear search. We scan the instructions linearly
6147   // and record each time that a new interval starts, by placing it in a set.
6148   // If we find this value in the multi-map then we remove it from the set.
6149   // The max register usage is the maximum size of the set.
6150   // We also search for instructions that are defined outside the loop, but are
6151   // used inside the loop. We need this number separately from the max-interval
6152   // usage number because when we unroll, loop-invariant values do not take
6153   // more register.
6154   LoopBlocksDFS DFS(TheLoop);
6155   DFS.perform(LI);
6156 
6157   RegisterUsage RU;
6158 
6159   // Each 'key' in the map opens a new interval. The values
6160   // of the map are the index of the 'last seen' usage of the
6161   // instruction that is the key.
6162   using IntervalMap = DenseMap<Instruction *, unsigned>;
6163 
6164   // Maps instruction to its index.
6165   SmallVector<Instruction *, 64> IdxToInstr;
6166   // Marks the end of each interval.
6167   IntervalMap EndPoint;
6168   // Saves the list of instruction indices that are used in the loop.
6169   SmallPtrSet<Instruction *, 8> Ends;
6170   // Saves the list of values that are used in the loop but are
6171   // defined outside the loop, such as arguments and constants.
6172   SmallPtrSet<Value *, 8> LoopInvariants;
6173 
6174   for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) {
6175     for (Instruction &I : BB->instructionsWithoutDebug()) {
6176       IdxToInstr.push_back(&I);
6177 
6178       // Save the end location of each USE.
6179       for (Value *U : I.operands()) {
6180         auto *Instr = dyn_cast<Instruction>(U);
6181 
6182         // Ignore non-instruction values such as arguments, constants, etc.
6183         if (!Instr)
6184           continue;
6185 
6186         // If this instruction is outside the loop then record it and continue.
6187         if (!TheLoop->contains(Instr)) {
6188           LoopInvariants.insert(Instr);
6189           continue;
6190         }
6191 
6192         // Overwrite previous end points.
6193         EndPoint[Instr] = IdxToInstr.size();
6194         Ends.insert(Instr);
6195       }
6196     }
6197   }
6198 
6199   // Saves the list of intervals that end with the index in 'key'.
6200   using InstrList = SmallVector<Instruction *, 2>;
6201   DenseMap<unsigned, InstrList> TransposeEnds;
6202 
6203   // Transpose the EndPoints to a list of values that end at each index.
6204   for (auto &Interval : EndPoint)
6205     TransposeEnds[Interval.second].push_back(Interval.first);
6206 
6207   SmallPtrSet<Instruction *, 8> OpenIntervals;
6208   SmallVector<RegisterUsage, 8> RUs(VFs.size());
6209   SmallVector<SmallMapVector<unsigned, unsigned, 4>, 8> MaxUsages(VFs.size());
6210 
6211   LLVM_DEBUG(dbgs() << "LV(REG): Calculating max register usage:\n");
6212 
6213   // A lambda that gets the register usage for the given type and VF.
6214   const auto &TTICapture = TTI;
6215   auto GetRegUsage = [&TTICapture](Type *Ty, ElementCount VF) {
6216     if (Ty->isTokenTy() || !VectorType::isValidElementType(Ty))
6217       return 0U;
6218     return TTICapture.getRegUsageForType(VectorType::get(Ty, VF));
6219   };
6220 
6221   for (unsigned int i = 0, s = IdxToInstr.size(); i < s; ++i) {
6222     Instruction *I = IdxToInstr[i];
6223 
6224     // Remove all of the instructions that end at this location.
6225     InstrList &List = TransposeEnds[i];
6226     for (Instruction *ToRemove : List)
6227       OpenIntervals.erase(ToRemove);
6228 
6229     // Ignore instructions that are never used within the loop.
6230     if (!Ends.count(I))
6231       continue;
6232 
6233     // Skip ignored values.
6234     if (ValuesToIgnore.count(I))
6235       continue;
6236 
6237     // For each VF find the maximum usage of registers.
6238     for (unsigned j = 0, e = VFs.size(); j < e; ++j) {
6239       // Count the number of live intervals.
6240       SmallMapVector<unsigned, unsigned, 4> RegUsage;
6241 
6242       if (VFs[j].isScalar()) {
6243         for (auto Inst : OpenIntervals) {
6244           unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType());
6245           if (RegUsage.find(ClassID) == RegUsage.end())
6246             RegUsage[ClassID] = 1;
6247           else
6248             RegUsage[ClassID] += 1;
6249         }
6250       } else {
6251         collectUniformsAndScalars(VFs[j]);
6252         for (auto Inst : OpenIntervals) {
6253           // Skip ignored values for VF > 1.
6254           if (VecValuesToIgnore.count(Inst))
6255             continue;
6256           if (isScalarAfterVectorization(Inst, VFs[j])) {
6257             unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType());
6258             if (RegUsage.find(ClassID) == RegUsage.end())
6259               RegUsage[ClassID] = 1;
6260             else
6261               RegUsage[ClassID] += 1;
6262           } else {
6263             unsigned ClassID = TTI.getRegisterClassForType(true, Inst->getType());
6264             if (RegUsage.find(ClassID) == RegUsage.end())
6265               RegUsage[ClassID] = GetRegUsage(Inst->getType(), VFs[j]);
6266             else
6267               RegUsage[ClassID] += GetRegUsage(Inst->getType(), VFs[j]);
6268           }
6269         }
6270       }
6271 
6272       for (auto& pair : RegUsage) {
6273         if (MaxUsages[j].find(pair.first) != MaxUsages[j].end())
6274           MaxUsages[j][pair.first] = std::max(MaxUsages[j][pair.first], pair.second);
6275         else
6276           MaxUsages[j][pair.first] = pair.second;
6277       }
6278     }
6279 
6280     LLVM_DEBUG(dbgs() << "LV(REG): At #" << i << " Interval # "
6281                       << OpenIntervals.size() << '\n');
6282 
6283     // Add the current instruction to the list of open intervals.
6284     OpenIntervals.insert(I);
6285   }
6286 
6287   for (unsigned i = 0, e = VFs.size(); i < e; ++i) {
6288     SmallMapVector<unsigned, unsigned, 4> Invariant;
6289 
6290     for (auto Inst : LoopInvariants) {
6291       unsigned Usage =
6292           VFs[i].isScalar() ? 1 : GetRegUsage(Inst->getType(), VFs[i]);
6293       unsigned ClassID =
6294           TTI.getRegisterClassForType(VFs[i].isVector(), Inst->getType());
6295       if (Invariant.find(ClassID) == Invariant.end())
6296         Invariant[ClassID] = Usage;
6297       else
6298         Invariant[ClassID] += Usage;
6299     }
6300 
6301     LLVM_DEBUG({
6302       dbgs() << "LV(REG): VF = " << VFs[i] << '\n';
6303       dbgs() << "LV(REG): Found max usage: " << MaxUsages[i].size()
6304              << " item\n";
6305       for (const auto &pair : MaxUsages[i]) {
6306         dbgs() << "LV(REG): RegisterClass: "
6307                << TTI.getRegisterClassName(pair.first) << ", " << pair.second
6308                << " registers\n";
6309       }
6310       dbgs() << "LV(REG): Found invariant usage: " << Invariant.size()
6311              << " item\n";
6312       for (const auto &pair : Invariant) {
6313         dbgs() << "LV(REG): RegisterClass: "
6314                << TTI.getRegisterClassName(pair.first) << ", " << pair.second
6315                << " registers\n";
6316       }
6317     });
6318 
6319     RU.LoopInvariantRegs = Invariant;
6320     RU.MaxLocalUsers = MaxUsages[i];
6321     RUs[i] = RU;
6322   }
6323 
6324   return RUs;
6325 }
6326 
6327 bool LoopVectorizationCostModel::useEmulatedMaskMemRefHack(Instruction *I){
6328   // TODO: Cost model for emulated masked load/store is completely
6329   // broken. This hack guides the cost model to use an artificially
6330   // high enough value to practically disable vectorization with such
6331   // operations, except where previously deployed legality hack allowed
6332   // using very low cost values. This is to avoid regressions coming simply
6333   // from moving "masked load/store" check from legality to cost model.
6334   // Masked Load/Gather emulation was previously never allowed.
6335   // Limited number of Masked Store/Scatter emulation was allowed.
6336   assert(isPredicatedInst(I) && "Expecting a scalar emulated instruction");
6337   return isa<LoadInst>(I) ||
6338          (isa<StoreInst>(I) &&
6339           NumPredStores > NumberOfStoresToPredicate);
6340 }
6341 
6342 void LoopVectorizationCostModel::collectInstsToScalarize(ElementCount VF) {
6343   // If we aren't vectorizing the loop, or if we've already collected the
6344   // instructions to scalarize, there's nothing to do. Collection may already
6345   // have occurred if we have a user-selected VF and are now computing the
6346   // expected cost for interleaving.
6347   if (VF.isScalar() || VF.isZero() ||
6348       InstsToScalarize.find(VF) != InstsToScalarize.end())
6349     return;
6350 
6351   // Initialize a mapping for VF in InstsToScalalarize. If we find that it's
6352   // not profitable to scalarize any instructions, the presence of VF in the
6353   // map will indicate that we've analyzed it already.
6354   ScalarCostsTy &ScalarCostsVF = InstsToScalarize[VF];
6355 
6356   // Find all the instructions that are scalar with predication in the loop and
6357   // determine if it would be better to not if-convert the blocks they are in.
6358   // If so, we also record the instructions to scalarize.
6359   for (BasicBlock *BB : TheLoop->blocks()) {
6360     if (!blockNeedsPredication(BB))
6361       continue;
6362     for (Instruction &I : *BB)
6363       if (isScalarWithPredication(&I)) {
6364         ScalarCostsTy ScalarCosts;
6365         // Do not apply discount logic if hacked cost is needed
6366         // for emulated masked memrefs.
6367         if (!useEmulatedMaskMemRefHack(&I) &&
6368             computePredInstDiscount(&I, ScalarCosts, VF) >= 0)
6369           ScalarCostsVF.insert(ScalarCosts.begin(), ScalarCosts.end());
6370         // Remember that BB will remain after vectorization.
6371         PredicatedBBsAfterVectorization.insert(BB);
6372       }
6373   }
6374 }
6375 
6376 int LoopVectorizationCostModel::computePredInstDiscount(
6377     Instruction *PredInst, ScalarCostsTy &ScalarCosts, ElementCount VF) {
6378   assert(!isUniformAfterVectorization(PredInst, VF) &&
6379          "Instruction marked uniform-after-vectorization will be predicated");
6380 
6381   // Initialize the discount to zero, meaning that the scalar version and the
6382   // vector version cost the same.
6383   InstructionCost Discount = 0;
6384 
6385   // Holds instructions to analyze. The instructions we visit are mapped in
6386   // ScalarCosts. Those instructions are the ones that would be scalarized if
6387   // we find that the scalar version costs less.
6388   SmallVector<Instruction *, 8> Worklist;
6389 
6390   // Returns true if the given instruction can be scalarized.
6391   auto canBeScalarized = [&](Instruction *I) -> bool {
6392     // We only attempt to scalarize instructions forming a single-use chain
6393     // from the original predicated block that would otherwise be vectorized.
6394     // Although not strictly necessary, we give up on instructions we know will
6395     // already be scalar to avoid traversing chains that are unlikely to be
6396     // beneficial.
6397     if (!I->hasOneUse() || PredInst->getParent() != I->getParent() ||
6398         isScalarAfterVectorization(I, VF))
6399       return false;
6400 
6401     // If the instruction is scalar with predication, it will be analyzed
6402     // separately. We ignore it within the context of PredInst.
6403     if (isScalarWithPredication(I))
6404       return false;
6405 
6406     // If any of the instruction's operands are uniform after vectorization,
6407     // the instruction cannot be scalarized. This prevents, for example, a
6408     // masked load from being scalarized.
6409     //
6410     // We assume we will only emit a value for lane zero of an instruction
6411     // marked uniform after vectorization, rather than VF identical values.
6412     // Thus, if we scalarize an instruction that uses a uniform, we would
6413     // create uses of values corresponding to the lanes we aren't emitting code
6414     // for. This behavior can be changed by allowing getScalarValue to clone
6415     // the lane zero values for uniforms rather than asserting.
6416     for (Use &U : I->operands())
6417       if (auto *J = dyn_cast<Instruction>(U.get()))
6418         if (isUniformAfterVectorization(J, VF))
6419           return false;
6420 
6421     // Otherwise, we can scalarize the instruction.
6422     return true;
6423   };
6424 
6425   // Compute the expected cost discount from scalarizing the entire expression
6426   // feeding the predicated instruction. We currently only consider expressions
6427   // that are single-use instruction chains.
6428   Worklist.push_back(PredInst);
6429   while (!Worklist.empty()) {
6430     Instruction *I = Worklist.pop_back_val();
6431 
6432     // If we've already analyzed the instruction, there's nothing to do.
6433     if (ScalarCosts.find(I) != ScalarCosts.end())
6434       continue;
6435 
6436     // Compute the cost of the vector instruction. Note that this cost already
6437     // includes the scalarization overhead of the predicated instruction.
6438     InstructionCost VectorCost = getInstructionCost(I, VF).first;
6439 
6440     // Compute the cost of the scalarized instruction. This cost is the cost of
6441     // the instruction as if it wasn't if-converted and instead remained in the
6442     // predicated block. We will scale this cost by block probability after
6443     // computing the scalarization overhead.
6444     assert(!VF.isScalable() && "scalable vectors not yet supported.");
6445     InstructionCost ScalarCost =
6446         VF.getKnownMinValue() *
6447         getInstructionCost(I, ElementCount::getFixed(1)).first;
6448 
6449     // Compute the scalarization overhead of needed insertelement instructions
6450     // and phi nodes.
6451     if (isScalarWithPredication(I) && !I->getType()->isVoidTy()) {
6452       ScalarCost += TTI.getScalarizationOverhead(
6453           cast<VectorType>(ToVectorTy(I->getType(), VF)),
6454           APInt::getAllOnesValue(VF.getKnownMinValue()), true, false);
6455       assert(!VF.isScalable() && "scalable vectors not yet supported.");
6456       ScalarCost +=
6457           VF.getKnownMinValue() *
6458           TTI.getCFInstrCost(Instruction::PHI, TTI::TCK_RecipThroughput);
6459     }
6460 
6461     // Compute the scalarization overhead of needed extractelement
6462     // instructions. For each of the instruction's operands, if the operand can
6463     // be scalarized, add it to the worklist; otherwise, account for the
6464     // overhead.
6465     for (Use &U : I->operands())
6466       if (auto *J = dyn_cast<Instruction>(U.get())) {
6467         assert(VectorType::isValidElementType(J->getType()) &&
6468                "Instruction has non-scalar type");
6469         if (canBeScalarized(J))
6470           Worklist.push_back(J);
6471         else if (needsExtract(J, VF)) {
6472           assert(!VF.isScalable() && "scalable vectors not yet supported.");
6473           ScalarCost += TTI.getScalarizationOverhead(
6474               cast<VectorType>(ToVectorTy(J->getType(), VF)),
6475               APInt::getAllOnesValue(VF.getKnownMinValue()), false, true);
6476         }
6477       }
6478 
6479     // Scale the total scalar cost by block probability.
6480     ScalarCost /= getReciprocalPredBlockProb();
6481 
6482     // Compute the discount. A non-negative discount means the vector version
6483     // of the instruction costs more, and scalarizing would be beneficial.
6484     Discount += VectorCost - ScalarCost;
6485     ScalarCosts[I] = ScalarCost;
6486   }
6487 
6488   return *Discount.getValue();
6489 }
6490 
6491 LoopVectorizationCostModel::VectorizationCostTy
6492 LoopVectorizationCostModel::expectedCost(ElementCount VF) {
6493   VectorizationCostTy Cost;
6494 
6495   // For each block.
6496   for (BasicBlock *BB : TheLoop->blocks()) {
6497     VectorizationCostTy BlockCost;
6498 
6499     // For each instruction in the old loop.
6500     for (Instruction &I : BB->instructionsWithoutDebug()) {
6501       // Skip ignored values.
6502       if (ValuesToIgnore.count(&I) ||
6503           (VF.isVector() && VecValuesToIgnore.count(&I)))
6504         continue;
6505 
6506       VectorizationCostTy C = getInstructionCost(&I, VF);
6507 
6508       // Check if we should override the cost.
6509       if (ForceTargetInstructionCost.getNumOccurrences() > 0)
6510         C.first = InstructionCost(ForceTargetInstructionCost);
6511 
6512       BlockCost.first += C.first;
6513       BlockCost.second |= C.second;
6514       LLVM_DEBUG(dbgs() << "LV: Found an estimated cost of " << C.first
6515                         << " for VF " << VF << " For instruction: " << I
6516                         << '\n');
6517     }
6518 
6519     // If we are vectorizing a predicated block, it will have been
6520     // if-converted. This means that the block's instructions (aside from
6521     // stores and instructions that may divide by zero) will now be
6522     // unconditionally executed. For the scalar case, we may not always execute
6523     // the predicated block, if it is an if-else block. Thus, scale the block's
6524     // cost by the probability of executing it. blockNeedsPredication from
6525     // Legal is used so as to not include all blocks in tail folded loops.
6526     if (VF.isScalar() && Legal->blockNeedsPredication(BB))
6527       BlockCost.first /= getReciprocalPredBlockProb();
6528 
6529     Cost.first += BlockCost.first;
6530     Cost.second |= BlockCost.second;
6531   }
6532 
6533   return Cost;
6534 }
6535 
6536 /// Gets Address Access SCEV after verifying that the access pattern
6537 /// is loop invariant except the induction variable dependence.
6538 ///
6539 /// This SCEV can be sent to the Target in order to estimate the address
6540 /// calculation cost.
6541 static const SCEV *getAddressAccessSCEV(
6542               Value *Ptr,
6543               LoopVectorizationLegality *Legal,
6544               PredicatedScalarEvolution &PSE,
6545               const Loop *TheLoop) {
6546 
6547   auto *Gep = dyn_cast<GetElementPtrInst>(Ptr);
6548   if (!Gep)
6549     return nullptr;
6550 
6551   // We are looking for a gep with all loop invariant indices except for one
6552   // which should be an induction variable.
6553   auto SE = PSE.getSE();
6554   unsigned NumOperands = Gep->getNumOperands();
6555   for (unsigned i = 1; i < NumOperands; ++i) {
6556     Value *Opd = Gep->getOperand(i);
6557     if (!SE->isLoopInvariant(SE->getSCEV(Opd), TheLoop) &&
6558         !Legal->isInductionVariable(Opd))
6559       return nullptr;
6560   }
6561 
6562   // Now we know we have a GEP ptr, %inv, %ind, %inv. return the Ptr SCEV.
6563   return PSE.getSCEV(Ptr);
6564 }
6565 
6566 static bool isStrideMul(Instruction *I, LoopVectorizationLegality *Legal) {
6567   return Legal->hasStride(I->getOperand(0)) ||
6568          Legal->hasStride(I->getOperand(1));
6569 }
6570 
6571 InstructionCost
6572 LoopVectorizationCostModel::getMemInstScalarizationCost(Instruction *I,
6573                                                         ElementCount VF) {
6574   assert(VF.isVector() &&
6575          "Scalarization cost of instruction implies vectorization.");
6576   assert(!VF.isScalable() && "scalable vectors not yet supported.");
6577   Type *ValTy = getMemInstValueType(I);
6578   auto SE = PSE.getSE();
6579 
6580   unsigned AS = getLoadStoreAddressSpace(I);
6581   Value *Ptr = getLoadStorePointerOperand(I);
6582   Type *PtrTy = ToVectorTy(Ptr->getType(), VF);
6583 
6584   // Figure out whether the access is strided and get the stride value
6585   // if it's known in compile time
6586   const SCEV *PtrSCEV = getAddressAccessSCEV(Ptr, Legal, PSE, TheLoop);
6587 
6588   // Get the cost of the scalar memory instruction and address computation.
6589   InstructionCost Cost =
6590       VF.getKnownMinValue() * TTI.getAddressComputationCost(PtrTy, SE, PtrSCEV);
6591 
6592   // Don't pass *I here, since it is scalar but will actually be part of a
6593   // vectorized loop where the user of it is a vectorized instruction.
6594   const Align Alignment = getLoadStoreAlignment(I);
6595   Cost += VF.getKnownMinValue() *
6596           TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), Alignment,
6597                               AS, TTI::TCK_RecipThroughput);
6598 
6599   // Get the overhead of the extractelement and insertelement instructions
6600   // we might create due to scalarization.
6601   Cost += getScalarizationOverhead(I, VF);
6602 
6603   // If we have a predicated store, it may not be executed for each vector
6604   // lane. Scale the cost by the probability of executing the predicated
6605   // block.
6606   if (isPredicatedInst(I)) {
6607     Cost /= getReciprocalPredBlockProb();
6608 
6609     if (useEmulatedMaskMemRefHack(I))
6610       // Artificially setting to a high enough value to practically disable
6611       // vectorization with such operations.
6612       Cost = 3000000;
6613   }
6614 
6615   return Cost;
6616 }
6617 
6618 InstructionCost
6619 LoopVectorizationCostModel::getConsecutiveMemOpCost(Instruction *I,
6620                                                     ElementCount VF) {
6621   Type *ValTy = getMemInstValueType(I);
6622   auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
6623   Value *Ptr = getLoadStorePointerOperand(I);
6624   unsigned AS = getLoadStoreAddressSpace(I);
6625   int ConsecutiveStride = Legal->isConsecutivePtr(Ptr);
6626   enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
6627 
6628   assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) &&
6629          "Stride should be 1 or -1 for consecutive memory access");
6630   const Align Alignment = getLoadStoreAlignment(I);
6631   InstructionCost Cost = 0;
6632   if (Legal->isMaskRequired(I))
6633     Cost += TTI.getMaskedMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS,
6634                                       CostKind);
6635   else
6636     Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS,
6637                                 CostKind, I);
6638 
6639   bool Reverse = ConsecutiveStride < 0;
6640   if (Reverse)
6641     Cost += TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, 0);
6642   return Cost;
6643 }
6644 
6645 InstructionCost
6646 LoopVectorizationCostModel::getUniformMemOpCost(Instruction *I,
6647                                                 ElementCount VF) {
6648   assert(Legal->isUniformMemOp(*I));
6649 
6650   Type *ValTy = getMemInstValueType(I);
6651   auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
6652   const Align Alignment = getLoadStoreAlignment(I);
6653   unsigned AS = getLoadStoreAddressSpace(I);
6654   enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
6655   if (isa<LoadInst>(I)) {
6656     return TTI.getAddressComputationCost(ValTy) +
6657            TTI.getMemoryOpCost(Instruction::Load, ValTy, Alignment, AS,
6658                                CostKind) +
6659            TTI.getShuffleCost(TargetTransformInfo::SK_Broadcast, VectorTy);
6660   }
6661   StoreInst *SI = cast<StoreInst>(I);
6662 
6663   bool isLoopInvariantStoreValue = Legal->isUniform(SI->getValueOperand());
6664   return TTI.getAddressComputationCost(ValTy) +
6665          TTI.getMemoryOpCost(Instruction::Store, ValTy, Alignment, AS,
6666                              CostKind) +
6667          (isLoopInvariantStoreValue
6668               ? 0
6669               : TTI.getVectorInstrCost(Instruction::ExtractElement, VectorTy,
6670                                        VF.getKnownMinValue() - 1));
6671 }
6672 
6673 InstructionCost
6674 LoopVectorizationCostModel::getGatherScatterCost(Instruction *I,
6675                                                  ElementCount VF) {
6676   Type *ValTy = getMemInstValueType(I);
6677   auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
6678   const Align Alignment = getLoadStoreAlignment(I);
6679   const Value *Ptr = getLoadStorePointerOperand(I);
6680 
6681   return TTI.getAddressComputationCost(VectorTy) +
6682          TTI.getGatherScatterOpCost(
6683              I->getOpcode(), VectorTy, Ptr, Legal->isMaskRequired(I), Alignment,
6684              TargetTransformInfo::TCK_RecipThroughput, I);
6685 }
6686 
6687 InstructionCost
6688 LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I,
6689                                                    ElementCount VF) {
6690   // TODO: Once we have support for interleaving with scalable vectors
6691   // we can calculate the cost properly here.
6692   if (VF.isScalable())
6693     return InstructionCost::getInvalid();
6694 
6695   Type *ValTy = getMemInstValueType(I);
6696   auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
6697   unsigned AS = getLoadStoreAddressSpace(I);
6698 
6699   auto Group = getInterleavedAccessGroup(I);
6700   assert(Group && "Fail to get an interleaved access group.");
6701 
6702   unsigned InterleaveFactor = Group->getFactor();
6703   auto *WideVecTy = VectorType::get(ValTy, VF * InterleaveFactor);
6704 
6705   // Holds the indices of existing members in an interleaved load group.
6706   // An interleaved store group doesn't need this as it doesn't allow gaps.
6707   SmallVector<unsigned, 4> Indices;
6708   if (isa<LoadInst>(I)) {
6709     for (unsigned i = 0; i < InterleaveFactor; i++)
6710       if (Group->getMember(i))
6711         Indices.push_back(i);
6712   }
6713 
6714   // Calculate the cost of the whole interleaved group.
6715   bool UseMaskForGaps =
6716       Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed();
6717   InstructionCost Cost = TTI.getInterleavedMemoryOpCost(
6718       I->getOpcode(), WideVecTy, Group->getFactor(), Indices, Group->getAlign(),
6719       AS, TTI::TCK_RecipThroughput, Legal->isMaskRequired(I), UseMaskForGaps);
6720 
6721   if (Group->isReverse()) {
6722     // TODO: Add support for reversed masked interleaved access.
6723     assert(!Legal->isMaskRequired(I) &&
6724            "Reverse masked interleaved access not supported.");
6725     Cost += Group->getNumMembers() *
6726             TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, 0);
6727   }
6728   return Cost;
6729 }
6730 
6731 InstructionCost LoopVectorizationCostModel::getReductionPatternCost(
6732     Instruction *I, ElementCount VF, Type *Ty, TTI::TargetCostKind CostKind) {
6733   // Early exit for no inloop reductions
6734   if (InLoopReductionChains.empty() || VF.isScalar() || !isa<VectorType>(Ty))
6735     return InstructionCost::getInvalid();
6736   auto *VectorTy = cast<VectorType>(Ty);
6737 
6738   // We are looking for a pattern of, and finding the minimal acceptable cost:
6739   //  reduce(mul(ext(A), ext(B))) or
6740   //  reduce(mul(A, B)) or
6741   //  reduce(ext(A)) or
6742   //  reduce(A).
6743   // The basic idea is that we walk down the tree to do that, finding the root
6744   // reduction instruction in InLoopReductionImmediateChains. From there we find
6745   // the pattern of mul/ext and test the cost of the entire pattern vs the cost
6746   // of the components. If the reduction cost is lower then we return it for the
6747   // reduction instruction and 0 for the other instructions in the pattern. If
6748   // it is not we return an invalid cost specifying the orignal cost method
6749   // should be used.
6750   Instruction *RetI = I;
6751   if ((RetI->getOpcode() == Instruction::SExt ||
6752        RetI->getOpcode() == Instruction::ZExt)) {
6753     if (!RetI->hasOneUser())
6754       return InstructionCost::getInvalid();
6755     RetI = RetI->user_back();
6756   }
6757   if (RetI->getOpcode() == Instruction::Mul &&
6758       RetI->user_back()->getOpcode() == Instruction::Add) {
6759     if (!RetI->hasOneUser())
6760       return InstructionCost::getInvalid();
6761     RetI = RetI->user_back();
6762   }
6763 
6764   // Test if the found instruction is a reduction, and if not return an invalid
6765   // cost specifying the parent to use the original cost modelling.
6766   if (!InLoopReductionImmediateChains.count(RetI))
6767     return InstructionCost::getInvalid();
6768 
6769   // Find the reduction this chain is a part of and calculate the basic cost of
6770   // the reduction on its own.
6771   Instruction *LastChain = InLoopReductionImmediateChains[RetI];
6772   Instruction *ReductionPhi = LastChain;
6773   while (!isa<PHINode>(ReductionPhi))
6774     ReductionPhi = InLoopReductionImmediateChains[ReductionPhi];
6775 
6776   RecurrenceDescriptor RdxDesc =
6777       Legal->getReductionVars()[cast<PHINode>(ReductionPhi)];
6778   unsigned BaseCost = TTI.getArithmeticReductionCost(RdxDesc.getOpcode(),
6779                                                      VectorTy, false, CostKind);
6780 
6781   // Get the operand that was not the reduction chain and match it to one of the
6782   // patterns, returning the better cost if it is found.
6783   Instruction *RedOp = RetI->getOperand(1) == LastChain
6784                            ? dyn_cast<Instruction>(RetI->getOperand(0))
6785                            : dyn_cast<Instruction>(RetI->getOperand(1));
6786 
6787   VectorTy = VectorType::get(I->getOperand(0)->getType(), VectorTy);
6788 
6789   if (RedOp && (isa<SExtInst>(RedOp) || isa<ZExtInst>(RedOp)) &&
6790       !TheLoop->isLoopInvariant(RedOp)) {
6791     bool IsUnsigned = isa<ZExtInst>(RedOp);
6792     auto *ExtType = VectorType::get(RedOp->getOperand(0)->getType(), VectorTy);
6793     InstructionCost RedCost = TTI.getExtendedAddReductionCost(
6794         /*IsMLA=*/false, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType,
6795         CostKind);
6796 
6797     unsigned ExtCost =
6798         TTI.getCastInstrCost(RedOp->getOpcode(), VectorTy, ExtType,
6799                              TTI::CastContextHint::None, CostKind, RedOp);
6800     if (RedCost.isValid() && RedCost < BaseCost + ExtCost)
6801       return I == RetI ? *RedCost.getValue() : 0;
6802   } else if (RedOp && RedOp->getOpcode() == Instruction::Mul) {
6803     Instruction *Mul = RedOp;
6804     Instruction *Op0 = dyn_cast<Instruction>(Mul->getOperand(0));
6805     Instruction *Op1 = dyn_cast<Instruction>(Mul->getOperand(1));
6806     if (Op0 && Op1 && (isa<SExtInst>(Op0) || isa<ZExtInst>(Op0)) &&
6807         Op0->getOpcode() == Op1->getOpcode() &&
6808         Op0->getOperand(0)->getType() == Op1->getOperand(0)->getType() &&
6809         !TheLoop->isLoopInvariant(Op0) && !TheLoop->isLoopInvariant(Op1)) {
6810       bool IsUnsigned = isa<ZExtInst>(Op0);
6811       auto *ExtType = VectorType::get(Op0->getOperand(0)->getType(), VectorTy);
6812       // reduce(mul(ext, ext))
6813       unsigned ExtCost =
6814           TTI.getCastInstrCost(Op0->getOpcode(), VectorTy, ExtType,
6815                                TTI::CastContextHint::None, CostKind, Op0);
6816       unsigned MulCost =
6817           TTI.getArithmeticInstrCost(Mul->getOpcode(), VectorTy, CostKind);
6818 
6819       InstructionCost RedCost = TTI.getExtendedAddReductionCost(
6820           /*IsMLA=*/true, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType,
6821           CostKind);
6822 
6823       if (RedCost.isValid() && RedCost < ExtCost * 2 + MulCost + BaseCost)
6824         return I == RetI ? *RedCost.getValue() : 0;
6825     } else {
6826       unsigned MulCost =
6827           TTI.getArithmeticInstrCost(Mul->getOpcode(), VectorTy, CostKind);
6828 
6829       InstructionCost RedCost = TTI.getExtendedAddReductionCost(
6830           /*IsMLA=*/true, true, RdxDesc.getRecurrenceType(), VectorTy,
6831           CostKind);
6832 
6833       if (RedCost.isValid() && RedCost < MulCost + BaseCost)
6834         return I == RetI ? *RedCost.getValue() : 0;
6835     }
6836   }
6837 
6838   return I == RetI ? BaseCost : InstructionCost::getInvalid();
6839 }
6840 
6841 InstructionCost
6842 LoopVectorizationCostModel::getMemoryInstructionCost(Instruction *I,
6843                                                      ElementCount VF) {
6844   // Calculate scalar cost only. Vectorization cost should be ready at this
6845   // moment.
6846   if (VF.isScalar()) {
6847     Type *ValTy = getMemInstValueType(I);
6848     const Align Alignment = getLoadStoreAlignment(I);
6849     unsigned AS = getLoadStoreAddressSpace(I);
6850 
6851     return TTI.getAddressComputationCost(ValTy) +
6852            TTI.getMemoryOpCost(I->getOpcode(), ValTy, Alignment, AS,
6853                                TTI::TCK_RecipThroughput, I);
6854   }
6855   return getWideningCost(I, VF);
6856 }
6857 
6858 LoopVectorizationCostModel::VectorizationCostTy
6859 LoopVectorizationCostModel::getInstructionCost(Instruction *I,
6860                                                ElementCount VF) {
6861   // If we know that this instruction will remain uniform, check the cost of
6862   // the scalar version.
6863   if (isUniformAfterVectorization(I, VF))
6864     VF = ElementCount::getFixed(1);
6865 
6866   if (VF.isVector() && isProfitableToScalarize(I, VF))
6867     return VectorizationCostTy(InstsToScalarize[VF][I], false);
6868 
6869   // Forced scalars do not have any scalarization overhead.
6870   auto ForcedScalar = ForcedScalars.find(VF);
6871   if (VF.isVector() && ForcedScalar != ForcedScalars.end()) {
6872     auto InstSet = ForcedScalar->second;
6873     if (InstSet.count(I))
6874       return VectorizationCostTy(
6875           (getInstructionCost(I, ElementCount::getFixed(1)).first *
6876            VF.getKnownMinValue()),
6877           false);
6878   }
6879 
6880   Type *VectorTy;
6881   InstructionCost C = getInstructionCost(I, VF, VectorTy);
6882 
6883   bool TypeNotScalarized =
6884       VF.isVector() && VectorTy->isVectorTy() &&
6885       TTI.getNumberOfParts(VectorTy) < VF.getKnownMinValue();
6886   return VectorizationCostTy(C, TypeNotScalarized);
6887 }
6888 
6889 InstructionCost
6890 LoopVectorizationCostModel::getScalarizationOverhead(Instruction *I,
6891                                                      ElementCount VF) {
6892 
6893   if (VF.isScalable())
6894     return InstructionCost::getInvalid();
6895 
6896   if (VF.isScalar())
6897     return 0;
6898 
6899   InstructionCost Cost = 0;
6900   Type *RetTy = ToVectorTy(I->getType(), VF);
6901   if (!RetTy->isVoidTy() &&
6902       (!isa<LoadInst>(I) || !TTI.supportsEfficientVectorElementLoadStore()))
6903     Cost += TTI.getScalarizationOverhead(
6904         cast<VectorType>(RetTy), APInt::getAllOnesValue(VF.getKnownMinValue()),
6905         true, false);
6906 
6907   // Some targets keep addresses scalar.
6908   if (isa<LoadInst>(I) && !TTI.prefersVectorizedAddressing())
6909     return Cost;
6910 
6911   // Some targets support efficient element stores.
6912   if (isa<StoreInst>(I) && TTI.supportsEfficientVectorElementLoadStore())
6913     return Cost;
6914 
6915   // Collect operands to consider.
6916   CallInst *CI = dyn_cast<CallInst>(I);
6917   Instruction::op_range Ops = CI ? CI->arg_operands() : I->operands();
6918 
6919   // Skip operands that do not require extraction/scalarization and do not incur
6920   // any overhead.
6921   return Cost + TTI.getOperandsScalarizationOverhead(
6922                     filterExtractingOperands(Ops, VF), VF.getKnownMinValue());
6923 }
6924 
6925 void LoopVectorizationCostModel::setCostBasedWideningDecision(ElementCount VF) {
6926   if (VF.isScalar())
6927     return;
6928   NumPredStores = 0;
6929   for (BasicBlock *BB : TheLoop->blocks()) {
6930     // For each instruction in the old loop.
6931     for (Instruction &I : *BB) {
6932       Value *Ptr =  getLoadStorePointerOperand(&I);
6933       if (!Ptr)
6934         continue;
6935 
6936       // TODO: We should generate better code and update the cost model for
6937       // predicated uniform stores. Today they are treated as any other
6938       // predicated store (see added test cases in
6939       // invariant-store-vectorization.ll).
6940       if (isa<StoreInst>(&I) && isScalarWithPredication(&I))
6941         NumPredStores++;
6942 
6943       if (Legal->isUniformMemOp(I)) {
6944         // TODO: Avoid replicating loads and stores instead of
6945         // relying on instcombine to remove them.
6946         // Load: Scalar load + broadcast
6947         // Store: Scalar store + isLoopInvariantStoreValue ? 0 : extract
6948         InstructionCost Cost = getUniformMemOpCost(&I, VF);
6949         setWideningDecision(&I, VF, CM_Scalarize, Cost);
6950         continue;
6951       }
6952 
6953       // We assume that widening is the best solution when possible.
6954       if (memoryInstructionCanBeWidened(&I, VF)) {
6955         InstructionCost Cost = getConsecutiveMemOpCost(&I, VF);
6956         int ConsecutiveStride =
6957                Legal->isConsecutivePtr(getLoadStorePointerOperand(&I));
6958         assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) &&
6959                "Expected consecutive stride.");
6960         InstWidening Decision =
6961             ConsecutiveStride == 1 ? CM_Widen : CM_Widen_Reverse;
6962         setWideningDecision(&I, VF, Decision, Cost);
6963         continue;
6964       }
6965 
6966       // Choose between Interleaving, Gather/Scatter or Scalarization.
6967       InstructionCost InterleaveCost = InstructionCost::getInvalid();
6968       unsigned NumAccesses = 1;
6969       if (isAccessInterleaved(&I)) {
6970         auto Group = getInterleavedAccessGroup(&I);
6971         assert(Group && "Fail to get an interleaved access group.");
6972 
6973         // Make one decision for the whole group.
6974         if (getWideningDecision(&I, VF) != CM_Unknown)
6975           continue;
6976 
6977         NumAccesses = Group->getNumMembers();
6978         if (interleavedAccessCanBeWidened(&I, VF))
6979           InterleaveCost = getInterleaveGroupCost(&I, VF);
6980       }
6981 
6982       InstructionCost GatherScatterCost =
6983           isLegalGatherOrScatter(&I)
6984               ? getGatherScatterCost(&I, VF) * NumAccesses
6985               : InstructionCost::getInvalid();
6986 
6987       InstructionCost ScalarizationCost =
6988           !VF.isScalable() ? getMemInstScalarizationCost(&I, VF) * NumAccesses
6989                            : InstructionCost::getInvalid();
6990 
6991       // Choose better solution for the current VF,
6992       // write down this decision and use it during vectorization.
6993       InstructionCost Cost;
6994       InstWidening Decision;
6995       if (InterleaveCost <= GatherScatterCost &&
6996           InterleaveCost < ScalarizationCost) {
6997         Decision = CM_Interleave;
6998         Cost = InterleaveCost;
6999       } else if (GatherScatterCost < ScalarizationCost) {
7000         Decision = CM_GatherScatter;
7001         Cost = GatherScatterCost;
7002       } else {
7003         assert(!VF.isScalable() &&
7004                "We cannot yet scalarise for scalable vectors");
7005         Decision = CM_Scalarize;
7006         Cost = ScalarizationCost;
7007       }
7008       // If the instructions belongs to an interleave group, the whole group
7009       // receives the same decision. The whole group receives the cost, but
7010       // the cost will actually be assigned to one instruction.
7011       if (auto Group = getInterleavedAccessGroup(&I))
7012         setWideningDecision(Group, VF, Decision, Cost);
7013       else
7014         setWideningDecision(&I, VF, Decision, Cost);
7015     }
7016   }
7017 
7018   // Make sure that any load of address and any other address computation
7019   // remains scalar unless there is gather/scatter support. This avoids
7020   // inevitable extracts into address registers, and also has the benefit of
7021   // activating LSR more, since that pass can't optimize vectorized
7022   // addresses.
7023   if (TTI.prefersVectorizedAddressing())
7024     return;
7025 
7026   // Start with all scalar pointer uses.
7027   SmallPtrSet<Instruction *, 8> AddrDefs;
7028   for (BasicBlock *BB : TheLoop->blocks())
7029     for (Instruction &I : *BB) {
7030       Instruction *PtrDef =
7031         dyn_cast_or_null<Instruction>(getLoadStorePointerOperand(&I));
7032       if (PtrDef && TheLoop->contains(PtrDef) &&
7033           getWideningDecision(&I, VF) != CM_GatherScatter)
7034         AddrDefs.insert(PtrDef);
7035     }
7036 
7037   // Add all instructions used to generate the addresses.
7038   SmallVector<Instruction *, 4> Worklist;
7039   append_range(Worklist, AddrDefs);
7040   while (!Worklist.empty()) {
7041     Instruction *I = Worklist.pop_back_val();
7042     for (auto &Op : I->operands())
7043       if (auto *InstOp = dyn_cast<Instruction>(Op))
7044         if ((InstOp->getParent() == I->getParent()) && !isa<PHINode>(InstOp) &&
7045             AddrDefs.insert(InstOp).second)
7046           Worklist.push_back(InstOp);
7047   }
7048 
7049   for (auto *I : AddrDefs) {
7050     if (isa<LoadInst>(I)) {
7051       // Setting the desired widening decision should ideally be handled in
7052       // by cost functions, but since this involves the task of finding out
7053       // if the loaded register is involved in an address computation, it is
7054       // instead changed here when we know this is the case.
7055       InstWidening Decision = getWideningDecision(I, VF);
7056       if (Decision == CM_Widen || Decision == CM_Widen_Reverse)
7057         // Scalarize a widened load of address.
7058         setWideningDecision(
7059             I, VF, CM_Scalarize,
7060             (VF.getKnownMinValue() *
7061              getMemoryInstructionCost(I, ElementCount::getFixed(1))));
7062       else if (auto Group = getInterleavedAccessGroup(I)) {
7063         // Scalarize an interleave group of address loads.
7064         for (unsigned I = 0; I < Group->getFactor(); ++I) {
7065           if (Instruction *Member = Group->getMember(I))
7066             setWideningDecision(
7067                 Member, VF, CM_Scalarize,
7068                 (VF.getKnownMinValue() *
7069                  getMemoryInstructionCost(Member, ElementCount::getFixed(1))));
7070         }
7071       }
7072     } else
7073       // Make sure I gets scalarized and a cost estimate without
7074       // scalarization overhead.
7075       ForcedScalars[VF].insert(I);
7076   }
7077 }
7078 
7079 InstructionCost
7080 LoopVectorizationCostModel::getInstructionCost(Instruction *I, ElementCount VF,
7081                                                Type *&VectorTy) {
7082   Type *RetTy = I->getType();
7083   if (canTruncateToMinimalBitwidth(I, VF))
7084     RetTy = IntegerType::get(RetTy->getContext(), MinBWs[I]);
7085   VectorTy = isScalarAfterVectorization(I, VF) ? RetTy : ToVectorTy(RetTy, VF);
7086   auto SE = PSE.getSE();
7087   TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
7088 
7089   // TODO: We need to estimate the cost of intrinsic calls.
7090   switch (I->getOpcode()) {
7091   case Instruction::GetElementPtr:
7092     // We mark this instruction as zero-cost because the cost of GEPs in
7093     // vectorized code depends on whether the corresponding memory instruction
7094     // is scalarized or not. Therefore, we handle GEPs with the memory
7095     // instruction cost.
7096     return 0;
7097   case Instruction::Br: {
7098     // In cases of scalarized and predicated instructions, there will be VF
7099     // predicated blocks in the vectorized loop. Each branch around these
7100     // blocks requires also an extract of its vector compare i1 element.
7101     bool ScalarPredicatedBB = false;
7102     BranchInst *BI = cast<BranchInst>(I);
7103     if (VF.isVector() && BI->isConditional() &&
7104         (PredicatedBBsAfterVectorization.count(BI->getSuccessor(0)) ||
7105          PredicatedBBsAfterVectorization.count(BI->getSuccessor(1))))
7106       ScalarPredicatedBB = true;
7107 
7108     if (ScalarPredicatedBB) {
7109       // Return cost for branches around scalarized and predicated blocks.
7110       assert(!VF.isScalable() && "scalable vectors not yet supported.");
7111       auto *Vec_i1Ty =
7112           VectorType::get(IntegerType::getInt1Ty(RetTy->getContext()), VF);
7113       return (TTI.getScalarizationOverhead(
7114                   Vec_i1Ty, APInt::getAllOnesValue(VF.getKnownMinValue()),
7115                   false, true) +
7116               (TTI.getCFInstrCost(Instruction::Br, CostKind) *
7117                VF.getKnownMinValue()));
7118     } else if (I->getParent() == TheLoop->getLoopLatch() || VF.isScalar())
7119       // The back-edge branch will remain, as will all scalar branches.
7120       return TTI.getCFInstrCost(Instruction::Br, CostKind);
7121     else
7122       // This branch will be eliminated by if-conversion.
7123       return 0;
7124     // Note: We currently assume zero cost for an unconditional branch inside
7125     // a predicated block since it will become a fall-through, although we
7126     // may decide in the future to call TTI for all branches.
7127   }
7128   case Instruction::PHI: {
7129     auto *Phi = cast<PHINode>(I);
7130 
7131     // First-order recurrences are replaced by vector shuffles inside the loop.
7132     // NOTE: Don't use ToVectorTy as SK_ExtractSubvector expects a vector type.
7133     if (VF.isVector() && Legal->isFirstOrderRecurrence(Phi))
7134       return TTI.getShuffleCost(
7135           TargetTransformInfo::SK_ExtractSubvector, cast<VectorType>(VectorTy),
7136           VF.getKnownMinValue() - 1, FixedVectorType::get(RetTy, 1));
7137 
7138     // Phi nodes in non-header blocks (not inductions, reductions, etc.) are
7139     // converted into select instructions. We require N - 1 selects per phi
7140     // node, where N is the number of incoming values.
7141     if (VF.isVector() && Phi->getParent() != TheLoop->getHeader())
7142       return (Phi->getNumIncomingValues() - 1) *
7143              TTI.getCmpSelInstrCost(
7144                  Instruction::Select, ToVectorTy(Phi->getType(), VF),
7145                  ToVectorTy(Type::getInt1Ty(Phi->getContext()), VF),
7146                  CmpInst::BAD_ICMP_PREDICATE, CostKind);
7147 
7148     return TTI.getCFInstrCost(Instruction::PHI, CostKind);
7149   }
7150   case Instruction::UDiv:
7151   case Instruction::SDiv:
7152   case Instruction::URem:
7153   case Instruction::SRem:
7154     // If we have a predicated instruction, it may not be executed for each
7155     // vector lane. Get the scalarization cost and scale this amount by the
7156     // probability of executing the predicated block. If the instruction is not
7157     // predicated, we fall through to the next case.
7158     if (VF.isVector() && isScalarWithPredication(I)) {
7159       InstructionCost Cost = 0;
7160 
7161       // These instructions have a non-void type, so account for the phi nodes
7162       // that we will create. This cost is likely to be zero. The phi node
7163       // cost, if any, should be scaled by the block probability because it
7164       // models a copy at the end of each predicated block.
7165       Cost += VF.getKnownMinValue() *
7166               TTI.getCFInstrCost(Instruction::PHI, CostKind);
7167 
7168       // The cost of the non-predicated instruction.
7169       Cost += VF.getKnownMinValue() *
7170               TTI.getArithmeticInstrCost(I->getOpcode(), RetTy, CostKind);
7171 
7172       // The cost of insertelement and extractelement instructions needed for
7173       // scalarization.
7174       Cost += getScalarizationOverhead(I, VF);
7175 
7176       // Scale the cost by the probability of executing the predicated blocks.
7177       // This assumes the predicated block for each vector lane is equally
7178       // likely.
7179       return Cost / getReciprocalPredBlockProb();
7180     }
7181     LLVM_FALLTHROUGH;
7182   case Instruction::Add:
7183   case Instruction::FAdd:
7184   case Instruction::Sub:
7185   case Instruction::FSub:
7186   case Instruction::Mul:
7187   case Instruction::FMul:
7188   case Instruction::FDiv:
7189   case Instruction::FRem:
7190   case Instruction::Shl:
7191   case Instruction::LShr:
7192   case Instruction::AShr:
7193   case Instruction::And:
7194   case Instruction::Or:
7195   case Instruction::Xor: {
7196     // Since we will replace the stride by 1 the multiplication should go away.
7197     if (I->getOpcode() == Instruction::Mul && isStrideMul(I, Legal))
7198       return 0;
7199 
7200     // Detect reduction patterns
7201     InstructionCost RedCost;
7202     if ((RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind))
7203             .isValid())
7204       return RedCost;
7205 
7206     // Certain instructions can be cheaper to vectorize if they have a constant
7207     // second vector operand. One example of this are shifts on x86.
7208     Value *Op2 = I->getOperand(1);
7209     TargetTransformInfo::OperandValueProperties Op2VP;
7210     TargetTransformInfo::OperandValueKind Op2VK =
7211         TTI.getOperandInfo(Op2, Op2VP);
7212     if (Op2VK == TargetTransformInfo::OK_AnyValue && Legal->isUniform(Op2))
7213       Op2VK = TargetTransformInfo::OK_UniformValue;
7214 
7215     SmallVector<const Value *, 4> Operands(I->operand_values());
7216     unsigned N = isScalarAfterVectorization(I, VF) ? VF.getKnownMinValue() : 1;
7217     return N * TTI.getArithmeticInstrCost(
7218                    I->getOpcode(), VectorTy, CostKind,
7219                    TargetTransformInfo::OK_AnyValue,
7220                    Op2VK, TargetTransformInfo::OP_None, Op2VP, Operands, I);
7221   }
7222   case Instruction::FNeg: {
7223     assert(!VF.isScalable() && "VF is assumed to be non scalable.");
7224     unsigned N = isScalarAfterVectorization(I, VF) ? VF.getKnownMinValue() : 1;
7225     return N * TTI.getArithmeticInstrCost(
7226                    I->getOpcode(), VectorTy, CostKind,
7227                    TargetTransformInfo::OK_AnyValue,
7228                    TargetTransformInfo::OK_AnyValue,
7229                    TargetTransformInfo::OP_None, TargetTransformInfo::OP_None,
7230                    I->getOperand(0), I);
7231   }
7232   case Instruction::Select: {
7233     SelectInst *SI = cast<SelectInst>(I);
7234     const SCEV *CondSCEV = SE->getSCEV(SI->getCondition());
7235     bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop));
7236     Type *CondTy = SI->getCondition()->getType();
7237     if (!ScalarCond)
7238       CondTy = VectorType::get(CondTy, VF);
7239     return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, CondTy,
7240                                   CmpInst::BAD_ICMP_PREDICATE, CostKind, I);
7241   }
7242   case Instruction::ICmp:
7243   case Instruction::FCmp: {
7244     Type *ValTy = I->getOperand(0)->getType();
7245     Instruction *Op0AsInstruction = dyn_cast<Instruction>(I->getOperand(0));
7246     if (canTruncateToMinimalBitwidth(Op0AsInstruction, VF))
7247       ValTy = IntegerType::get(ValTy->getContext(), MinBWs[Op0AsInstruction]);
7248     VectorTy = ToVectorTy(ValTy, VF);
7249     return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, nullptr,
7250                                   CmpInst::BAD_ICMP_PREDICATE, CostKind, I);
7251   }
7252   case Instruction::Store:
7253   case Instruction::Load: {
7254     ElementCount Width = VF;
7255     if (Width.isVector()) {
7256       InstWidening Decision = getWideningDecision(I, Width);
7257       assert(Decision != CM_Unknown &&
7258              "CM decision should be taken at this point");
7259       if (Decision == CM_Scalarize)
7260         Width = ElementCount::getFixed(1);
7261     }
7262     VectorTy = ToVectorTy(getMemInstValueType(I), Width);
7263     return getMemoryInstructionCost(I, VF);
7264   }
7265   case Instruction::ZExt:
7266   case Instruction::SExt:
7267   case Instruction::FPToUI:
7268   case Instruction::FPToSI:
7269   case Instruction::FPExt:
7270   case Instruction::PtrToInt:
7271   case Instruction::IntToPtr:
7272   case Instruction::SIToFP:
7273   case Instruction::UIToFP:
7274   case Instruction::Trunc:
7275   case Instruction::FPTrunc:
7276   case Instruction::BitCast: {
7277     // Computes the CastContextHint from a Load/Store instruction.
7278     auto ComputeCCH = [&](Instruction *I) -> TTI::CastContextHint {
7279       assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
7280              "Expected a load or a store!");
7281 
7282       if (VF.isScalar() || !TheLoop->contains(I))
7283         return TTI::CastContextHint::Normal;
7284 
7285       switch (getWideningDecision(I, VF)) {
7286       case LoopVectorizationCostModel::CM_GatherScatter:
7287         return TTI::CastContextHint::GatherScatter;
7288       case LoopVectorizationCostModel::CM_Interleave:
7289         return TTI::CastContextHint::Interleave;
7290       case LoopVectorizationCostModel::CM_Scalarize:
7291       case LoopVectorizationCostModel::CM_Widen:
7292         return Legal->isMaskRequired(I) ? TTI::CastContextHint::Masked
7293                                         : TTI::CastContextHint::Normal;
7294       case LoopVectorizationCostModel::CM_Widen_Reverse:
7295         return TTI::CastContextHint::Reversed;
7296       case LoopVectorizationCostModel::CM_Unknown:
7297         llvm_unreachable("Instr did not go through cost modelling?");
7298       }
7299 
7300       llvm_unreachable("Unhandled case!");
7301     };
7302 
7303     unsigned Opcode = I->getOpcode();
7304     TTI::CastContextHint CCH = TTI::CastContextHint::None;
7305     // For Trunc, the context is the only user, which must be a StoreInst.
7306     if (Opcode == Instruction::Trunc || Opcode == Instruction::FPTrunc) {
7307       if (I->hasOneUse())
7308         if (StoreInst *Store = dyn_cast<StoreInst>(*I->user_begin()))
7309           CCH = ComputeCCH(Store);
7310     }
7311     // For Z/Sext, the context is the operand, which must be a LoadInst.
7312     else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt ||
7313              Opcode == Instruction::FPExt) {
7314       if (LoadInst *Load = dyn_cast<LoadInst>(I->getOperand(0)))
7315         CCH = ComputeCCH(Load);
7316     }
7317 
7318     // We optimize the truncation of induction variables having constant
7319     // integer steps. The cost of these truncations is the same as the scalar
7320     // operation.
7321     if (isOptimizableIVTruncate(I, VF)) {
7322       auto *Trunc = cast<TruncInst>(I);
7323       return TTI.getCastInstrCost(Instruction::Trunc, Trunc->getDestTy(),
7324                                   Trunc->getSrcTy(), CCH, CostKind, Trunc);
7325     }
7326 
7327     // Detect reduction patterns
7328     InstructionCost RedCost;
7329     if ((RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind))
7330             .isValid())
7331       return RedCost;
7332 
7333     Type *SrcScalarTy = I->getOperand(0)->getType();
7334     Type *SrcVecTy =
7335         VectorTy->isVectorTy() ? ToVectorTy(SrcScalarTy, VF) : SrcScalarTy;
7336     if (canTruncateToMinimalBitwidth(I, VF)) {
7337       // This cast is going to be shrunk. This may remove the cast or it might
7338       // turn it into slightly different cast. For example, if MinBW == 16,
7339       // "zext i8 %1 to i32" becomes "zext i8 %1 to i16".
7340       //
7341       // Calculate the modified src and dest types.
7342       Type *MinVecTy = VectorTy;
7343       if (Opcode == Instruction::Trunc) {
7344         SrcVecTy = smallestIntegerVectorType(SrcVecTy, MinVecTy);
7345         VectorTy =
7346             largestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy);
7347       } else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt) {
7348         SrcVecTy = largestIntegerVectorType(SrcVecTy, MinVecTy);
7349         VectorTy =
7350             smallestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy);
7351       }
7352     }
7353 
7354     unsigned N;
7355     if (isScalarAfterVectorization(I, VF)) {
7356       assert(!VF.isScalable() && "VF is assumed to be non scalable");
7357       N = VF.getKnownMinValue();
7358     } else
7359       N = 1;
7360     return N *
7361            TTI.getCastInstrCost(Opcode, VectorTy, SrcVecTy, CCH, CostKind, I);
7362   }
7363   case Instruction::Call: {
7364     bool NeedToScalarize;
7365     CallInst *CI = cast<CallInst>(I);
7366     InstructionCost CallCost = getVectorCallCost(CI, VF, NeedToScalarize);
7367     if (getVectorIntrinsicIDForCall(CI, TLI)) {
7368       InstructionCost IntrinsicCost = getVectorIntrinsicCost(CI, VF);
7369       return std::min(CallCost, IntrinsicCost);
7370     }
7371     return CallCost;
7372   }
7373   case Instruction::ExtractValue:
7374     return TTI.getInstructionCost(I, TTI::TCK_RecipThroughput);
7375   default:
7376     // The cost of executing VF copies of the scalar instruction. This opcode
7377     // is unknown. Assume that it is the same as 'mul'.
7378     return VF.getKnownMinValue() * TTI.getArithmeticInstrCost(
7379                                        Instruction::Mul, VectorTy, CostKind) +
7380            getScalarizationOverhead(I, VF);
7381   } // end of switch.
7382 }
7383 
7384 char LoopVectorize::ID = 0;
7385 
7386 static const char lv_name[] = "Loop Vectorization";
7387 
7388 INITIALIZE_PASS_BEGIN(LoopVectorize, LV_NAME, lv_name, false, false)
7389 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
7390 INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass)
7391 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
7392 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
7393 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
7394 INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass)
7395 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
7396 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
7397 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
7398 INITIALIZE_PASS_DEPENDENCY(LoopAccessLegacyAnalysis)
7399 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass)
7400 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass)
7401 INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass)
7402 INITIALIZE_PASS_DEPENDENCY(InjectTLIMappingsLegacy)
7403 INITIALIZE_PASS_END(LoopVectorize, LV_NAME, lv_name, false, false)
7404 
7405 namespace llvm {
7406 
7407 Pass *createLoopVectorizePass() { return new LoopVectorize(); }
7408 
7409 Pass *createLoopVectorizePass(bool InterleaveOnlyWhenForced,
7410                               bool VectorizeOnlyWhenForced) {
7411   return new LoopVectorize(InterleaveOnlyWhenForced, VectorizeOnlyWhenForced);
7412 }
7413 
7414 } // end namespace llvm
7415 
7416 bool LoopVectorizationCostModel::isConsecutiveLoadOrStore(Instruction *Inst) {
7417   // Check if the pointer operand of a load or store instruction is
7418   // consecutive.
7419   if (auto *Ptr = getLoadStorePointerOperand(Inst))
7420     return Legal->isConsecutivePtr(Ptr);
7421   return false;
7422 }
7423 
7424 void LoopVectorizationCostModel::collectValuesToIgnore() {
7425   // Ignore ephemeral values.
7426   CodeMetrics::collectEphemeralValues(TheLoop, AC, ValuesToIgnore);
7427 
7428   // Ignore type-promoting instructions we identified during reduction
7429   // detection.
7430   for (auto &Reduction : Legal->getReductionVars()) {
7431     RecurrenceDescriptor &RedDes = Reduction.second;
7432     const SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts();
7433     VecValuesToIgnore.insert(Casts.begin(), Casts.end());
7434   }
7435   // Ignore type-casting instructions we identified during induction
7436   // detection.
7437   for (auto &Induction : Legal->getInductionVars()) {
7438     InductionDescriptor &IndDes = Induction.second;
7439     const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts();
7440     VecValuesToIgnore.insert(Casts.begin(), Casts.end());
7441   }
7442 }
7443 
7444 void LoopVectorizationCostModel::collectInLoopReductions() {
7445   for (auto &Reduction : Legal->getReductionVars()) {
7446     PHINode *Phi = Reduction.first;
7447     RecurrenceDescriptor &RdxDesc = Reduction.second;
7448 
7449     // We don't collect reductions that are type promoted (yet).
7450     if (RdxDesc.getRecurrenceType() != Phi->getType())
7451       continue;
7452 
7453     // If the target would prefer this reduction to happen "in-loop", then we
7454     // want to record it as such.
7455     unsigned Opcode = RdxDesc.getOpcode();
7456     if (!PreferInLoopReductions &&
7457         !TTI.preferInLoopReduction(Opcode, Phi->getType(),
7458                                    TargetTransformInfo::ReductionFlags()))
7459       continue;
7460 
7461     // Check that we can correctly put the reductions into the loop, by
7462     // finding the chain of operations that leads from the phi to the loop
7463     // exit value.
7464     SmallVector<Instruction *, 4> ReductionOperations =
7465         RdxDesc.getReductionOpChain(Phi, TheLoop);
7466     bool InLoop = !ReductionOperations.empty();
7467     if (InLoop) {
7468       InLoopReductionChains[Phi] = ReductionOperations;
7469       // Add the elements to InLoopReductionImmediateChains for cost modelling.
7470       Instruction *LastChain = Phi;
7471       for (auto *I : ReductionOperations) {
7472         InLoopReductionImmediateChains[I] = LastChain;
7473         LastChain = I;
7474       }
7475     }
7476     LLVM_DEBUG(dbgs() << "LV: Using " << (InLoop ? "inloop" : "out of loop")
7477                       << " reduction for phi: " << *Phi << "\n");
7478   }
7479 }
7480 
7481 // TODO: we could return a pair of values that specify the max VF and
7482 // min VF, to be used in `buildVPlans(MinVF, MaxVF)` instead of
7483 // `buildVPlans(VF, VF)`. We cannot do it because VPLAN at the moment
7484 // doesn't have a cost model that can choose which plan to execute if
7485 // more than one is generated.
7486 static unsigned determineVPlanVF(const unsigned WidestVectorRegBits,
7487                                  LoopVectorizationCostModel &CM) {
7488   unsigned WidestType;
7489   std::tie(std::ignore, WidestType) = CM.getSmallestAndWidestTypes();
7490   return WidestVectorRegBits / WidestType;
7491 }
7492 
7493 VectorizationFactor
7494 LoopVectorizationPlanner::planInVPlanNativePath(ElementCount UserVF) {
7495   assert(!UserVF.isScalable() && "scalable vectors not yet supported");
7496   ElementCount VF = UserVF;
7497   // Outer loop handling: They may require CFG and instruction level
7498   // transformations before even evaluating whether vectorization is profitable.
7499   // Since we cannot modify the incoming IR, we need to build VPlan upfront in
7500   // the vectorization pipeline.
7501   if (!OrigLoop->isInnermost()) {
7502     // If the user doesn't provide a vectorization factor, determine a
7503     // reasonable one.
7504     if (UserVF.isZero()) {
7505       VF = ElementCount::getFixed(
7506           determineVPlanVF(TTI->getRegisterBitWidth(true /* Vector*/), CM));
7507       LLVM_DEBUG(dbgs() << "LV: VPlan computed VF " << VF << ".\n");
7508 
7509       // Make sure we have a VF > 1 for stress testing.
7510       if (VPlanBuildStressTest && (VF.isScalar() || VF.isZero())) {
7511         LLVM_DEBUG(dbgs() << "LV: VPlan stress testing: "
7512                           << "overriding computed VF.\n");
7513         VF = ElementCount::getFixed(4);
7514       }
7515     }
7516     assert(EnableVPlanNativePath && "VPlan-native path is not enabled.");
7517     assert(isPowerOf2_32(VF.getKnownMinValue()) &&
7518            "VF needs to be a power of two");
7519     LLVM_DEBUG(dbgs() << "LV: Using " << (!UserVF.isZero() ? "user " : "")
7520                       << "VF " << VF << " to build VPlans.\n");
7521     buildVPlans(VF, VF);
7522 
7523     // For VPlan build stress testing, we bail out after VPlan construction.
7524     if (VPlanBuildStressTest)
7525       return VectorizationFactor::Disabled();
7526 
7527     return {VF, 0 /*Cost*/};
7528   }
7529 
7530   LLVM_DEBUG(
7531       dbgs() << "LV: Not vectorizing. Inner loops aren't supported in the "
7532                 "VPlan-native path.\n");
7533   return VectorizationFactor::Disabled();
7534 }
7535 
7536 Optional<VectorizationFactor>
7537 LoopVectorizationPlanner::plan(ElementCount UserVF, unsigned UserIC) {
7538   assert(OrigLoop->isInnermost() && "Inner loop expected.");
7539   Optional<ElementCount> MaybeMaxVF = CM.computeMaxVF(UserVF, UserIC);
7540   if (!MaybeMaxVF) // Cases that should not to be vectorized nor interleaved.
7541     return None;
7542 
7543   // Invalidate interleave groups if all blocks of loop will be predicated.
7544   if (CM.blockNeedsPredication(OrigLoop->getHeader()) &&
7545       !useMaskedInterleavedAccesses(*TTI)) {
7546     LLVM_DEBUG(
7547         dbgs()
7548         << "LV: Invalidate all interleaved groups due to fold-tail by masking "
7549            "which requires masked-interleaved support.\n");
7550     if (CM.InterleaveInfo.invalidateGroups())
7551       // Invalidating interleave groups also requires invalidating all decisions
7552       // based on them, which includes widening decisions and uniform and scalar
7553       // values.
7554       CM.invalidateCostModelingDecisions();
7555   }
7556 
7557   ElementCount MaxVF = MaybeMaxVF.getValue();
7558   assert(MaxVF.isNonZero() && "MaxVF is zero.");
7559 
7560   bool UserVFIsLegal = ElementCount::isKnownLE(UserVF, MaxVF);
7561   if (!UserVF.isZero() &&
7562       (UserVFIsLegal || (UserVF.isScalable() && MaxVF.isScalable()))) {
7563     // FIXME: MaxVF is temporarily used inplace of UserVF for illegal scalable
7564     // VFs here, this should be reverted to only use legal UserVFs once the
7565     // loop below supports scalable VFs.
7566     ElementCount VF = UserVFIsLegal ? UserVF : MaxVF;
7567     LLVM_DEBUG(dbgs() << "LV: Using " << (UserVFIsLegal ? "user" : "max")
7568                       << " VF " << VF << ".\n");
7569     assert(isPowerOf2_32(VF.getKnownMinValue()) &&
7570            "VF needs to be a power of two");
7571     // Collect the instructions (and their associated costs) that will be more
7572     // profitable to scalarize.
7573     CM.selectUserVectorizationFactor(VF);
7574     CM.collectInLoopReductions();
7575     buildVPlansWithVPRecipes(VF, VF);
7576     LLVM_DEBUG(printPlans(dbgs()));
7577     return {{VF, 0}};
7578   }
7579 
7580   assert(!MaxVF.isScalable() &&
7581          "Scalable vectors not yet supported beyond this point");
7582 
7583   for (ElementCount VF = ElementCount::getFixed(1);
7584        ElementCount::isKnownLE(VF, MaxVF); VF *= 2) {
7585     // Collect Uniform and Scalar instructions after vectorization with VF.
7586     CM.collectUniformsAndScalars(VF);
7587 
7588     // Collect the instructions (and their associated costs) that will be more
7589     // profitable to scalarize.
7590     if (VF.isVector())
7591       CM.collectInstsToScalarize(VF);
7592   }
7593 
7594   CM.collectInLoopReductions();
7595 
7596   buildVPlansWithVPRecipes(ElementCount::getFixed(1), MaxVF);
7597   LLVM_DEBUG(printPlans(dbgs()));
7598   if (MaxVF.isScalar())
7599     return VectorizationFactor::Disabled();
7600 
7601   // Select the optimal vectorization factor.
7602   return CM.selectVectorizationFactor(MaxVF);
7603 }
7604 
7605 void LoopVectorizationPlanner::setBestPlan(ElementCount VF, unsigned UF) {
7606   LLVM_DEBUG(dbgs() << "Setting best plan to VF=" << VF << ", UF=" << UF
7607                     << '\n');
7608   BestVF = VF;
7609   BestUF = UF;
7610 
7611   erase_if(VPlans, [VF](const VPlanPtr &Plan) {
7612     return !Plan->hasVF(VF);
7613   });
7614   assert(VPlans.size() == 1 && "Best VF has not a single VPlan.");
7615 }
7616 
7617 void LoopVectorizationPlanner::executePlan(InnerLoopVectorizer &ILV,
7618                                            DominatorTree *DT) {
7619   // Perform the actual loop transformation.
7620 
7621   // 1. Create a new empty loop. Unlink the old loop and connect the new one.
7622   assert(BestVF.hasValue() && "Vectorization Factor is missing");
7623   assert(VPlans.size() == 1 && "Not a single VPlan to execute.");
7624 
7625   VPTransformState State{
7626       *BestVF, BestUF, LI, DT, ILV.Builder, &ILV, VPlans.front().get()};
7627   State.CFG.PrevBB = ILV.createVectorizedLoopSkeleton();
7628   State.TripCount = ILV.getOrCreateTripCount(nullptr);
7629   State.CanonicalIV = ILV.Induction;
7630 
7631   ILV.printDebugTracesAtStart();
7632 
7633   //===------------------------------------------------===//
7634   //
7635   // Notice: any optimization or new instruction that go
7636   // into the code below should also be implemented in
7637   // the cost-model.
7638   //
7639   //===------------------------------------------------===//
7640 
7641   // 2. Copy and widen instructions from the old loop into the new loop.
7642   VPlans.front()->execute(&State);
7643 
7644   // 3. Fix the vectorized code: take care of header phi's, live-outs,
7645   //    predication, updating analyses.
7646   ILV.fixVectorizedLoop(State);
7647 
7648   ILV.printDebugTracesAtEnd();
7649 }
7650 
7651 void LoopVectorizationPlanner::collectTriviallyDeadInstructions(
7652     SmallPtrSetImpl<Instruction *> &DeadInstructions) {
7653 
7654   // We create new control-flow for the vectorized loop, so the original exit
7655   // conditions will be dead after vectorization if it's only used by the
7656   // terminator
7657   SmallVector<BasicBlock*> ExitingBlocks;
7658   OrigLoop->getExitingBlocks(ExitingBlocks);
7659   for (auto *BB : ExitingBlocks) {
7660     auto *Cmp = dyn_cast<Instruction>(BB->getTerminator()->getOperand(0));
7661     if (!Cmp || !Cmp->hasOneUse())
7662       continue;
7663 
7664     // TODO: we should introduce a getUniqueExitingBlocks on Loop
7665     if (!DeadInstructions.insert(Cmp).second)
7666       continue;
7667 
7668     // The operands of the icmp is often a dead trunc, used by IndUpdate.
7669     // TODO: can recurse through operands in general
7670     for (Value *Op : Cmp->operands()) {
7671       if (isa<TruncInst>(Op) && Op->hasOneUse())
7672           DeadInstructions.insert(cast<Instruction>(Op));
7673     }
7674   }
7675 
7676   // We create new "steps" for induction variable updates to which the original
7677   // induction variables map. An original update instruction will be dead if
7678   // all its users except the induction variable are dead.
7679   auto *Latch = OrigLoop->getLoopLatch();
7680   for (auto &Induction : Legal->getInductionVars()) {
7681     PHINode *Ind = Induction.first;
7682     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
7683 
7684     // If the tail is to be folded by masking, the primary induction variable,
7685     // if exists, isn't dead: it will be used for masking. Don't kill it.
7686     if (CM.foldTailByMasking() && IndUpdate == Legal->getPrimaryInduction())
7687       continue;
7688 
7689     if (llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
7690           return U == Ind || DeadInstructions.count(cast<Instruction>(U));
7691         }))
7692       DeadInstructions.insert(IndUpdate);
7693 
7694     // We record as "Dead" also the type-casting instructions we had identified
7695     // during induction analysis. We don't need any handling for them in the
7696     // vectorized loop because we have proven that, under a proper runtime
7697     // test guarding the vectorized loop, the value of the phi, and the casted
7698     // value of the phi, are the same. The last instruction in this casting chain
7699     // will get its scalar/vector/widened def from the scalar/vector/widened def
7700     // of the respective phi node. Any other casts in the induction def-use chain
7701     // have no other uses outside the phi update chain, and will be ignored.
7702     InductionDescriptor &IndDes = Induction.second;
7703     const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts();
7704     DeadInstructions.insert(Casts.begin(), Casts.end());
7705   }
7706 }
7707 
7708 Value *InnerLoopUnroller::reverseVector(Value *Vec) { return Vec; }
7709 
7710 Value *InnerLoopUnroller::getBroadcastInstrs(Value *V) { return V; }
7711 
7712 Value *InnerLoopUnroller::getStepVector(Value *Val, int StartIdx, Value *Step,
7713                                         Instruction::BinaryOps BinOp) {
7714   // When unrolling and the VF is 1, we only need to add a simple scalar.
7715   Type *Ty = Val->getType();
7716   assert(!Ty->isVectorTy() && "Val must be a scalar");
7717 
7718   if (Ty->isFloatingPointTy()) {
7719     Constant *C = ConstantFP::get(Ty, (double)StartIdx);
7720 
7721     // Floating point operations had to be 'fast' to enable the unrolling.
7722     Value *MulOp = addFastMathFlag(Builder.CreateFMul(C, Step));
7723     return addFastMathFlag(Builder.CreateBinOp(BinOp, Val, MulOp));
7724   }
7725   Constant *C = ConstantInt::get(Ty, StartIdx);
7726   return Builder.CreateAdd(Val, Builder.CreateMul(C, Step), "induction");
7727 }
7728 
7729 static void AddRuntimeUnrollDisableMetaData(Loop *L) {
7730   SmallVector<Metadata *, 4> MDs;
7731   // Reserve first location for self reference to the LoopID metadata node.
7732   MDs.push_back(nullptr);
7733   bool IsUnrollMetadata = false;
7734   MDNode *LoopID = L->getLoopID();
7735   if (LoopID) {
7736     // First find existing loop unrolling disable metadata.
7737     for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) {
7738       auto *MD = dyn_cast<MDNode>(LoopID->getOperand(i));
7739       if (MD) {
7740         const auto *S = dyn_cast<MDString>(MD->getOperand(0));
7741         IsUnrollMetadata =
7742             S && S->getString().startswith("llvm.loop.unroll.disable");
7743       }
7744       MDs.push_back(LoopID->getOperand(i));
7745     }
7746   }
7747 
7748   if (!IsUnrollMetadata) {
7749     // Add runtime unroll disable metadata.
7750     LLVMContext &Context = L->getHeader()->getContext();
7751     SmallVector<Metadata *, 1> DisableOperands;
7752     DisableOperands.push_back(
7753         MDString::get(Context, "llvm.loop.unroll.runtime.disable"));
7754     MDNode *DisableNode = MDNode::get(Context, DisableOperands);
7755     MDs.push_back(DisableNode);
7756     MDNode *NewLoopID = MDNode::get(Context, MDs);
7757     // Set operand 0 to refer to the loop id itself.
7758     NewLoopID->replaceOperandWith(0, NewLoopID);
7759     L->setLoopID(NewLoopID);
7760   }
7761 }
7762 
7763 //===--------------------------------------------------------------------===//
7764 // EpilogueVectorizerMainLoop
7765 //===--------------------------------------------------------------------===//
7766 
7767 /// This function is partially responsible for generating the control flow
7768 /// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization.
7769 BasicBlock *EpilogueVectorizerMainLoop::createEpilogueVectorizedLoopSkeleton() {
7770   MDNode *OrigLoopID = OrigLoop->getLoopID();
7771   Loop *Lp = createVectorLoopSkeleton("");
7772 
7773   // Generate the code to check the minimum iteration count of the vector
7774   // epilogue (see below).
7775   EPI.EpilogueIterationCountCheck =
7776       emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader, true);
7777   EPI.EpilogueIterationCountCheck->setName("iter.check");
7778 
7779   // Generate the code to check any assumptions that we've made for SCEV
7780   // expressions.
7781   BasicBlock *SavedPreHeader = LoopVectorPreHeader;
7782   emitSCEVChecks(Lp, LoopScalarPreHeader);
7783 
7784   // If a safety check was generated save it.
7785   if (SavedPreHeader != LoopVectorPreHeader)
7786     EPI.SCEVSafetyCheck = SavedPreHeader;
7787 
7788   // Generate the code that checks at runtime if arrays overlap. We put the
7789   // checks into a separate block to make the more common case of few elements
7790   // faster.
7791   SavedPreHeader = LoopVectorPreHeader;
7792   emitMemRuntimeChecks(Lp, LoopScalarPreHeader);
7793 
7794   // If a safety check was generated save/overwite it.
7795   if (SavedPreHeader != LoopVectorPreHeader)
7796     EPI.MemSafetyCheck = SavedPreHeader;
7797 
7798   // Generate the iteration count check for the main loop, *after* the check
7799   // for the epilogue loop, so that the path-length is shorter for the case
7800   // that goes directly through the vector epilogue. The longer-path length for
7801   // the main loop is compensated for, by the gain from vectorizing the larger
7802   // trip count. Note: the branch will get updated later on when we vectorize
7803   // the epilogue.
7804   EPI.MainLoopIterationCountCheck =
7805       emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader, false);
7806 
7807   // Generate the induction variable.
7808   OldInduction = Legal->getPrimaryInduction();
7809   Type *IdxTy = Legal->getWidestInductionType();
7810   Value *StartIdx = ConstantInt::get(IdxTy, 0);
7811   Constant *Step = ConstantInt::get(IdxTy, VF.getKnownMinValue() * UF);
7812   Value *CountRoundDown = getOrCreateVectorTripCount(Lp);
7813   EPI.VectorTripCount = CountRoundDown;
7814   Induction =
7815       createInductionVariable(Lp, StartIdx, CountRoundDown, Step,
7816                               getDebugLocFromInstOrOperands(OldInduction));
7817 
7818   // Skip induction resume value creation here because they will be created in
7819   // the second pass. If we created them here, they wouldn't be used anyway,
7820   // because the vplan in the second pass still contains the inductions from the
7821   // original loop.
7822 
7823   return completeLoopSkeleton(Lp, OrigLoopID);
7824 }
7825 
7826 void EpilogueVectorizerMainLoop::printDebugTracesAtStart() {
7827   LLVM_DEBUG({
7828     dbgs() << "Create Skeleton for epilogue vectorized loop (first pass)\n"
7829            << "Main Loop VF:" << EPI.MainLoopVF.getKnownMinValue()
7830            << ", Main Loop UF:" << EPI.MainLoopUF
7831            << ", Epilogue Loop VF:" << EPI.EpilogueVF.getKnownMinValue()
7832            << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n";
7833   });
7834 }
7835 
7836 void EpilogueVectorizerMainLoop::printDebugTracesAtEnd() {
7837   DEBUG_WITH_TYPE(VerboseDebug, {
7838     dbgs() << "intermediate fn:\n" << *Induction->getFunction() << "\n";
7839   });
7840 }
7841 
7842 BasicBlock *EpilogueVectorizerMainLoop::emitMinimumIterationCountCheck(
7843     Loop *L, BasicBlock *Bypass, bool ForEpilogue) {
7844   assert(L && "Expected valid Loop.");
7845   assert(Bypass && "Expected valid bypass basic block.");
7846   unsigned VFactor =
7847       ForEpilogue ? EPI.EpilogueVF.getKnownMinValue() : VF.getKnownMinValue();
7848   unsigned UFactor = ForEpilogue ? EPI.EpilogueUF : UF;
7849   Value *Count = getOrCreateTripCount(L);
7850   // Reuse existing vector loop preheader for TC checks.
7851   // Note that new preheader block is generated for vector loop.
7852   BasicBlock *const TCCheckBlock = LoopVectorPreHeader;
7853   IRBuilder<> Builder(TCCheckBlock->getTerminator());
7854 
7855   // Generate code to check if the loop's trip count is less than VF * UF of the
7856   // main vector loop.
7857   auto P =
7858       Cost->requiresScalarEpilogue() ? ICmpInst::ICMP_ULE : ICmpInst::ICMP_ULT;
7859 
7860   Value *CheckMinIters = Builder.CreateICmp(
7861       P, Count, ConstantInt::get(Count->getType(), VFactor * UFactor),
7862       "min.iters.check");
7863 
7864   if (!ForEpilogue)
7865     TCCheckBlock->setName("vector.main.loop.iter.check");
7866 
7867   // Create new preheader for vector loop.
7868   LoopVectorPreHeader = SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(),
7869                                    DT, LI, nullptr, "vector.ph");
7870 
7871   if (ForEpilogue) {
7872     assert(DT->properlyDominates(DT->getNode(TCCheckBlock),
7873                                  DT->getNode(Bypass)->getIDom()) &&
7874            "TC check is expected to dominate Bypass");
7875 
7876     // Update dominator for Bypass & LoopExit.
7877     DT->changeImmediateDominator(Bypass, TCCheckBlock);
7878     DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock);
7879 
7880     LoopBypassBlocks.push_back(TCCheckBlock);
7881 
7882     // Save the trip count so we don't have to regenerate it in the
7883     // vec.epilog.iter.check. This is safe to do because the trip count
7884     // generated here dominates the vector epilog iter check.
7885     EPI.TripCount = Count;
7886   }
7887 
7888   ReplaceInstWithInst(
7889       TCCheckBlock->getTerminator(),
7890       BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters));
7891 
7892   return TCCheckBlock;
7893 }
7894 
7895 //===--------------------------------------------------------------------===//
7896 // EpilogueVectorizerEpilogueLoop
7897 //===--------------------------------------------------------------------===//
7898 
7899 /// This function is partially responsible for generating the control flow
7900 /// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization.
7901 BasicBlock *
7902 EpilogueVectorizerEpilogueLoop::createEpilogueVectorizedLoopSkeleton() {
7903   MDNode *OrigLoopID = OrigLoop->getLoopID();
7904   Loop *Lp = createVectorLoopSkeleton("vec.epilog.");
7905 
7906   // Now, compare the remaining count and if there aren't enough iterations to
7907   // execute the vectorized epilogue skip to the scalar part.
7908   BasicBlock *VecEpilogueIterationCountCheck = LoopVectorPreHeader;
7909   VecEpilogueIterationCountCheck->setName("vec.epilog.iter.check");
7910   LoopVectorPreHeader =
7911       SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT,
7912                  LI, nullptr, "vec.epilog.ph");
7913   emitMinimumVectorEpilogueIterCountCheck(Lp, LoopScalarPreHeader,
7914                                           VecEpilogueIterationCountCheck);
7915 
7916   // Adjust the control flow taking the state info from the main loop
7917   // vectorization into account.
7918   assert(EPI.MainLoopIterationCountCheck && EPI.EpilogueIterationCountCheck &&
7919          "expected this to be saved from the previous pass.");
7920   EPI.MainLoopIterationCountCheck->getTerminator()->replaceUsesOfWith(
7921       VecEpilogueIterationCountCheck, LoopVectorPreHeader);
7922 
7923   DT->changeImmediateDominator(LoopVectorPreHeader,
7924                                EPI.MainLoopIterationCountCheck);
7925 
7926   EPI.EpilogueIterationCountCheck->getTerminator()->replaceUsesOfWith(
7927       VecEpilogueIterationCountCheck, LoopScalarPreHeader);
7928 
7929   if (EPI.SCEVSafetyCheck)
7930     EPI.SCEVSafetyCheck->getTerminator()->replaceUsesOfWith(
7931         VecEpilogueIterationCountCheck, LoopScalarPreHeader);
7932   if (EPI.MemSafetyCheck)
7933     EPI.MemSafetyCheck->getTerminator()->replaceUsesOfWith(
7934         VecEpilogueIterationCountCheck, LoopScalarPreHeader);
7935 
7936   DT->changeImmediateDominator(
7937       VecEpilogueIterationCountCheck,
7938       VecEpilogueIterationCountCheck->getSinglePredecessor());
7939 
7940   DT->changeImmediateDominator(LoopScalarPreHeader,
7941                                EPI.EpilogueIterationCountCheck);
7942   DT->changeImmediateDominator(LoopExitBlock, EPI.EpilogueIterationCountCheck);
7943 
7944   // Keep track of bypass blocks, as they feed start values to the induction
7945   // phis in the scalar loop preheader.
7946   if (EPI.SCEVSafetyCheck)
7947     LoopBypassBlocks.push_back(EPI.SCEVSafetyCheck);
7948   if (EPI.MemSafetyCheck)
7949     LoopBypassBlocks.push_back(EPI.MemSafetyCheck);
7950   LoopBypassBlocks.push_back(EPI.EpilogueIterationCountCheck);
7951 
7952   // Generate a resume induction for the vector epilogue and put it in the
7953   // vector epilogue preheader
7954   Type *IdxTy = Legal->getWidestInductionType();
7955   PHINode *EPResumeVal = PHINode::Create(IdxTy, 2, "vec.epilog.resume.val",
7956                                          LoopVectorPreHeader->getFirstNonPHI());
7957   EPResumeVal->addIncoming(EPI.VectorTripCount, VecEpilogueIterationCountCheck);
7958   EPResumeVal->addIncoming(ConstantInt::get(IdxTy, 0),
7959                            EPI.MainLoopIterationCountCheck);
7960 
7961   // Generate the induction variable.
7962   OldInduction = Legal->getPrimaryInduction();
7963   Value *CountRoundDown = getOrCreateVectorTripCount(Lp);
7964   Constant *Step = ConstantInt::get(IdxTy, VF.getKnownMinValue() * UF);
7965   Value *StartIdx = EPResumeVal;
7966   Induction =
7967       createInductionVariable(Lp, StartIdx, CountRoundDown, Step,
7968                               getDebugLocFromInstOrOperands(OldInduction));
7969 
7970   // Generate induction resume values. These variables save the new starting
7971   // indexes for the scalar loop. They are used to test if there are any tail
7972   // iterations left once the vector loop has completed.
7973   // Note that when the vectorized epilogue is skipped due to iteration count
7974   // check, then the resume value for the induction variable comes from
7975   // the trip count of the main vector loop, hence passing the AdditionalBypass
7976   // argument.
7977   createInductionResumeValues(Lp, CountRoundDown,
7978                               {VecEpilogueIterationCountCheck,
7979                                EPI.VectorTripCount} /* AdditionalBypass */);
7980 
7981   AddRuntimeUnrollDisableMetaData(Lp);
7982   return completeLoopSkeleton(Lp, OrigLoopID);
7983 }
7984 
7985 BasicBlock *
7986 EpilogueVectorizerEpilogueLoop::emitMinimumVectorEpilogueIterCountCheck(
7987     Loop *L, BasicBlock *Bypass, BasicBlock *Insert) {
7988 
7989   assert(EPI.TripCount &&
7990          "Expected trip count to have been safed in the first pass.");
7991   assert(
7992       (!isa<Instruction>(EPI.TripCount) ||
7993        DT->dominates(cast<Instruction>(EPI.TripCount)->getParent(), Insert)) &&
7994       "saved trip count does not dominate insertion point.");
7995   Value *TC = EPI.TripCount;
7996   IRBuilder<> Builder(Insert->getTerminator());
7997   Value *Count = Builder.CreateSub(TC, EPI.VectorTripCount, "n.vec.remaining");
7998 
7999   // Generate code to check if the loop's trip count is less than VF * UF of the
8000   // vector epilogue loop.
8001   auto P =
8002       Cost->requiresScalarEpilogue() ? ICmpInst::ICMP_ULE : ICmpInst::ICMP_ULT;
8003 
8004   Value *CheckMinIters = Builder.CreateICmp(
8005       P, Count,
8006       ConstantInt::get(Count->getType(),
8007                        EPI.EpilogueVF.getKnownMinValue() * EPI.EpilogueUF),
8008       "min.epilog.iters.check");
8009 
8010   ReplaceInstWithInst(
8011       Insert->getTerminator(),
8012       BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters));
8013 
8014   LoopBypassBlocks.push_back(Insert);
8015   return Insert;
8016 }
8017 
8018 void EpilogueVectorizerEpilogueLoop::printDebugTracesAtStart() {
8019   LLVM_DEBUG({
8020     dbgs() << "Create Skeleton for epilogue vectorized loop (second pass)\n"
8021            << "Main Loop VF:" << EPI.MainLoopVF.getKnownMinValue()
8022            << ", Main Loop UF:" << EPI.MainLoopUF
8023            << ", Epilogue Loop VF:" << EPI.EpilogueVF.getKnownMinValue()
8024            << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n";
8025   });
8026 }
8027 
8028 void EpilogueVectorizerEpilogueLoop::printDebugTracesAtEnd() {
8029   DEBUG_WITH_TYPE(VerboseDebug, {
8030     dbgs() << "final fn:\n" << *Induction->getFunction() << "\n";
8031   });
8032 }
8033 
8034 bool LoopVectorizationPlanner::getDecisionAndClampRange(
8035     const std::function<bool(ElementCount)> &Predicate, VFRange &Range) {
8036   assert(!Range.isEmpty() && "Trying to test an empty VF range.");
8037   bool PredicateAtRangeStart = Predicate(Range.Start);
8038 
8039   for (ElementCount TmpVF = Range.Start * 2;
8040        ElementCount::isKnownLT(TmpVF, Range.End); TmpVF *= 2)
8041     if (Predicate(TmpVF) != PredicateAtRangeStart) {
8042       Range.End = TmpVF;
8043       break;
8044     }
8045 
8046   return PredicateAtRangeStart;
8047 }
8048 
8049 /// Build VPlans for the full range of feasible VF's = {\p MinVF, 2 * \p MinVF,
8050 /// 4 * \p MinVF, ..., \p MaxVF} by repeatedly building a VPlan for a sub-range
8051 /// of VF's starting at a given VF and extending it as much as possible. Each
8052 /// vectorization decision can potentially shorten this sub-range during
8053 /// buildVPlan().
8054 void LoopVectorizationPlanner::buildVPlans(ElementCount MinVF,
8055                                            ElementCount MaxVF) {
8056   auto MaxVFPlusOne = MaxVF.getWithIncrement(1);
8057   for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFPlusOne);) {
8058     VFRange SubRange = {VF, MaxVFPlusOne};
8059     VPlans.push_back(buildVPlan(SubRange));
8060     VF = SubRange.End;
8061   }
8062 }
8063 
8064 VPValue *VPRecipeBuilder::createEdgeMask(BasicBlock *Src, BasicBlock *Dst,
8065                                          VPlanPtr &Plan) {
8066   assert(is_contained(predecessors(Dst), Src) && "Invalid edge");
8067 
8068   // Look for cached value.
8069   std::pair<BasicBlock *, BasicBlock *> Edge(Src, Dst);
8070   EdgeMaskCacheTy::iterator ECEntryIt = EdgeMaskCache.find(Edge);
8071   if (ECEntryIt != EdgeMaskCache.end())
8072     return ECEntryIt->second;
8073 
8074   VPValue *SrcMask = createBlockInMask(Src, Plan);
8075 
8076   // The terminator has to be a branch inst!
8077   BranchInst *BI = dyn_cast<BranchInst>(Src->getTerminator());
8078   assert(BI && "Unexpected terminator found");
8079 
8080   if (!BI->isConditional() || BI->getSuccessor(0) == BI->getSuccessor(1))
8081     return EdgeMaskCache[Edge] = SrcMask;
8082 
8083   // If source is an exiting block, we know the exit edge is dynamically dead
8084   // in the vector loop, and thus we don't need to restrict the mask.  Avoid
8085   // adding uses of an otherwise potentially dead instruction.
8086   if (OrigLoop->isLoopExiting(Src))
8087     return EdgeMaskCache[Edge] = SrcMask;
8088 
8089   VPValue *EdgeMask = Plan->getOrAddVPValue(BI->getCondition());
8090   assert(EdgeMask && "No Edge Mask found for condition");
8091 
8092   if (BI->getSuccessor(0) != Dst)
8093     EdgeMask = Builder.createNot(EdgeMask);
8094 
8095   if (SrcMask) { // Otherwise block in-mask is all-one, no need to AND.
8096     // The condition is 'SrcMask && EdgeMask', which is equivalent to
8097     // 'select i1 SrcMask, i1 EdgeMask, i1 false'.
8098     // The select version does not introduce new UB if SrcMask is false and
8099     // EdgeMask is poison. Using 'and' here introduces undefined behavior.
8100     VPValue *False = Plan->getOrAddVPValue(
8101         ConstantInt::getFalse(BI->getCondition()->getType()));
8102     EdgeMask = Builder.createSelect(SrcMask, EdgeMask, False);
8103   }
8104 
8105   return EdgeMaskCache[Edge] = EdgeMask;
8106 }
8107 
8108 VPValue *VPRecipeBuilder::createBlockInMask(BasicBlock *BB, VPlanPtr &Plan) {
8109   assert(OrigLoop->contains(BB) && "Block is not a part of a loop");
8110 
8111   // Look for cached value.
8112   BlockMaskCacheTy::iterator BCEntryIt = BlockMaskCache.find(BB);
8113   if (BCEntryIt != BlockMaskCache.end())
8114     return BCEntryIt->second;
8115 
8116   // All-one mask is modelled as no-mask following the convention for masked
8117   // load/store/gather/scatter. Initialize BlockMask to no-mask.
8118   VPValue *BlockMask = nullptr;
8119 
8120   if (OrigLoop->getHeader() == BB) {
8121     if (!CM.blockNeedsPredication(BB))
8122       return BlockMaskCache[BB] = BlockMask; // Loop incoming mask is all-one.
8123 
8124     // Create the block in mask as the first non-phi instruction in the block.
8125     VPBuilder::InsertPointGuard Guard(Builder);
8126     auto NewInsertionPoint = Builder.getInsertBlock()->getFirstNonPhi();
8127     Builder.setInsertPoint(Builder.getInsertBlock(), NewInsertionPoint);
8128 
8129     // Introduce the early-exit compare IV <= BTC to form header block mask.
8130     // This is used instead of IV < TC because TC may wrap, unlike BTC.
8131     // Start by constructing the desired canonical IV.
8132     VPValue *IV = nullptr;
8133     if (Legal->getPrimaryInduction())
8134       IV = Plan->getOrAddVPValue(Legal->getPrimaryInduction());
8135     else {
8136       auto IVRecipe = new VPWidenCanonicalIVRecipe();
8137       Builder.getInsertBlock()->insert(IVRecipe, NewInsertionPoint);
8138       IV = IVRecipe->getVPValue();
8139     }
8140     VPValue *BTC = Plan->getOrCreateBackedgeTakenCount();
8141     bool TailFolded = !CM.isScalarEpilogueAllowed();
8142 
8143     if (TailFolded && CM.TTI.emitGetActiveLaneMask()) {
8144       // While ActiveLaneMask is a binary op that consumes the loop tripcount
8145       // as a second argument, we only pass the IV here and extract the
8146       // tripcount from the transform state where codegen of the VP instructions
8147       // happen.
8148       BlockMask = Builder.createNaryOp(VPInstruction::ActiveLaneMask, {IV});
8149     } else {
8150       BlockMask = Builder.createNaryOp(VPInstruction::ICmpULE, {IV, BTC});
8151     }
8152     return BlockMaskCache[BB] = BlockMask;
8153   }
8154 
8155   // This is the block mask. We OR all incoming edges.
8156   for (auto *Predecessor : predecessors(BB)) {
8157     VPValue *EdgeMask = createEdgeMask(Predecessor, BB, Plan);
8158     if (!EdgeMask) // Mask of predecessor is all-one so mask of block is too.
8159       return BlockMaskCache[BB] = EdgeMask;
8160 
8161     if (!BlockMask) { // BlockMask has its initialized nullptr value.
8162       BlockMask = EdgeMask;
8163       continue;
8164     }
8165 
8166     BlockMask = Builder.createOr(BlockMask, EdgeMask);
8167   }
8168 
8169   return BlockMaskCache[BB] = BlockMask;
8170 }
8171 
8172 VPRecipeBase *VPRecipeBuilder::tryToWidenMemory(Instruction *I, VFRange &Range,
8173                                                 VPlanPtr &Plan) {
8174   assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
8175          "Must be called with either a load or store");
8176 
8177   auto willWiden = [&](ElementCount VF) -> bool {
8178     if (VF.isScalar())
8179       return false;
8180     LoopVectorizationCostModel::InstWidening Decision =
8181         CM.getWideningDecision(I, VF);
8182     assert(Decision != LoopVectorizationCostModel::CM_Unknown &&
8183            "CM decision should be taken at this point.");
8184     if (Decision == LoopVectorizationCostModel::CM_Interleave)
8185       return true;
8186     if (CM.isScalarAfterVectorization(I, VF) ||
8187         CM.isProfitableToScalarize(I, VF))
8188       return false;
8189     return Decision != LoopVectorizationCostModel::CM_Scalarize;
8190   };
8191 
8192   if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range))
8193     return nullptr;
8194 
8195   VPValue *Mask = nullptr;
8196   if (Legal->isMaskRequired(I))
8197     Mask = createBlockInMask(I->getParent(), Plan);
8198 
8199   VPValue *Addr = Plan->getOrAddVPValue(getLoadStorePointerOperand(I));
8200   if (LoadInst *Load = dyn_cast<LoadInst>(I))
8201     return new VPWidenMemoryInstructionRecipe(*Load, Addr, Mask);
8202 
8203   StoreInst *Store = cast<StoreInst>(I);
8204   VPValue *StoredValue = Plan->getOrAddVPValue(Store->getValueOperand());
8205   return new VPWidenMemoryInstructionRecipe(*Store, Addr, StoredValue, Mask);
8206 }
8207 
8208 VPWidenIntOrFpInductionRecipe *
8209 VPRecipeBuilder::tryToOptimizeInductionPHI(PHINode *Phi, VPlan &Plan) const {
8210   // Check if this is an integer or fp induction. If so, build the recipe that
8211   // produces its scalar and vector values.
8212   InductionDescriptor II = Legal->getInductionVars().lookup(Phi);
8213   if (II.getKind() == InductionDescriptor::IK_IntInduction ||
8214       II.getKind() == InductionDescriptor::IK_FpInduction) {
8215     VPValue *Start = Plan.getOrAddVPValue(II.getStartValue());
8216     const SmallVectorImpl<Instruction *> &Casts = II.getCastInsts();
8217     return new VPWidenIntOrFpInductionRecipe(
8218         Phi, Start, Casts.empty() ? nullptr : Casts.front());
8219   }
8220 
8221   return nullptr;
8222 }
8223 
8224 VPWidenIntOrFpInductionRecipe *
8225 VPRecipeBuilder::tryToOptimizeInductionTruncate(TruncInst *I, VFRange &Range,
8226                                                 VPlan &Plan) const {
8227   // Optimize the special case where the source is a constant integer
8228   // induction variable. Notice that we can only optimize the 'trunc' case
8229   // because (a) FP conversions lose precision, (b) sext/zext may wrap, and
8230   // (c) other casts depend on pointer size.
8231 
8232   // Determine whether \p K is a truncation based on an induction variable that
8233   // can be optimized.
8234   auto isOptimizableIVTruncate =
8235       [&](Instruction *K) -> std::function<bool(ElementCount)> {
8236     return [=](ElementCount VF) -> bool {
8237       return CM.isOptimizableIVTruncate(K, VF);
8238     };
8239   };
8240 
8241   if (LoopVectorizationPlanner::getDecisionAndClampRange(
8242           isOptimizableIVTruncate(I), Range)) {
8243 
8244     InductionDescriptor II =
8245         Legal->getInductionVars().lookup(cast<PHINode>(I->getOperand(0)));
8246     VPValue *Start = Plan.getOrAddVPValue(II.getStartValue());
8247     return new VPWidenIntOrFpInductionRecipe(cast<PHINode>(I->getOperand(0)),
8248                                              Start, nullptr, I);
8249   }
8250   return nullptr;
8251 }
8252 
8253 VPBlendRecipe *VPRecipeBuilder::tryToBlend(PHINode *Phi, VPlanPtr &Plan) {
8254   // We know that all PHIs in non-header blocks are converted into selects, so
8255   // we don't have to worry about the insertion order and we can just use the
8256   // builder. At this point we generate the predication tree. There may be
8257   // duplications since this is a simple recursive scan, but future
8258   // optimizations will clean it up.
8259 
8260   SmallVector<VPValue *, 2> Operands;
8261   unsigned NumIncoming = Phi->getNumIncomingValues();
8262   for (unsigned In = 0; In < NumIncoming; In++) {
8263     VPValue *EdgeMask =
8264       createEdgeMask(Phi->getIncomingBlock(In), Phi->getParent(), Plan);
8265     assert((EdgeMask || NumIncoming == 1) &&
8266            "Multiple predecessors with one having a full mask");
8267     Operands.push_back(Plan->getOrAddVPValue(Phi->getIncomingValue(In)));
8268     if (EdgeMask)
8269       Operands.push_back(EdgeMask);
8270   }
8271   return new VPBlendRecipe(Phi, Operands);
8272 }
8273 
8274 VPWidenCallRecipe *VPRecipeBuilder::tryToWidenCall(CallInst *CI, VFRange &Range,
8275                                                    VPlan &Plan) const {
8276 
8277   bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange(
8278       [this, CI](ElementCount VF) {
8279         return CM.isScalarWithPredication(CI, VF);
8280       },
8281       Range);
8282 
8283   if (IsPredicated)
8284     return nullptr;
8285 
8286   Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
8287   if (ID && (ID == Intrinsic::assume || ID == Intrinsic::lifetime_end ||
8288              ID == Intrinsic::lifetime_start || ID == Intrinsic::sideeffect ||
8289              ID == Intrinsic::pseudoprobe ||
8290              ID == Intrinsic::experimental_noalias_scope_decl))
8291     return nullptr;
8292 
8293   auto willWiden = [&](ElementCount VF) -> bool {
8294     Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
8295     // The following case may be scalarized depending on the VF.
8296     // The flag shows whether we use Intrinsic or a usual Call for vectorized
8297     // version of the instruction.
8298     // Is it beneficial to perform intrinsic call compared to lib call?
8299     bool NeedToScalarize = false;
8300     InstructionCost CallCost = CM.getVectorCallCost(CI, VF, NeedToScalarize);
8301     InstructionCost IntrinsicCost = ID ? CM.getVectorIntrinsicCost(CI, VF) : 0;
8302     bool UseVectorIntrinsic = ID && IntrinsicCost <= CallCost;
8303     assert(IntrinsicCost.isValid() && CallCost.isValid() &&
8304            "Cannot have invalid costs while widening");
8305     return UseVectorIntrinsic || !NeedToScalarize;
8306   };
8307 
8308   if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range))
8309     return nullptr;
8310 
8311   return new VPWidenCallRecipe(*CI, Plan.mapToVPValues(CI->arg_operands()));
8312 }
8313 
8314 bool VPRecipeBuilder::shouldWiden(Instruction *I, VFRange &Range) const {
8315   assert(!isa<BranchInst>(I) && !isa<PHINode>(I) && !isa<LoadInst>(I) &&
8316          !isa<StoreInst>(I) && "Instruction should have been handled earlier");
8317   // Instruction should be widened, unless it is scalar after vectorization,
8318   // scalarization is profitable or it is predicated.
8319   auto WillScalarize = [this, I](ElementCount VF) -> bool {
8320     return CM.isScalarAfterVectorization(I, VF) ||
8321            CM.isProfitableToScalarize(I, VF) ||
8322            CM.isScalarWithPredication(I, VF);
8323   };
8324   return !LoopVectorizationPlanner::getDecisionAndClampRange(WillScalarize,
8325                                                              Range);
8326 }
8327 
8328 VPWidenRecipe *VPRecipeBuilder::tryToWiden(Instruction *I, VPlan &Plan) const {
8329   auto IsVectorizableOpcode = [](unsigned Opcode) {
8330     switch (Opcode) {
8331     case Instruction::Add:
8332     case Instruction::And:
8333     case Instruction::AShr:
8334     case Instruction::BitCast:
8335     case Instruction::FAdd:
8336     case Instruction::FCmp:
8337     case Instruction::FDiv:
8338     case Instruction::FMul:
8339     case Instruction::FNeg:
8340     case Instruction::FPExt:
8341     case Instruction::FPToSI:
8342     case Instruction::FPToUI:
8343     case Instruction::FPTrunc:
8344     case Instruction::FRem:
8345     case Instruction::FSub:
8346     case Instruction::ICmp:
8347     case Instruction::IntToPtr:
8348     case Instruction::LShr:
8349     case Instruction::Mul:
8350     case Instruction::Or:
8351     case Instruction::PtrToInt:
8352     case Instruction::SDiv:
8353     case Instruction::Select:
8354     case Instruction::SExt:
8355     case Instruction::Shl:
8356     case Instruction::SIToFP:
8357     case Instruction::SRem:
8358     case Instruction::Sub:
8359     case Instruction::Trunc:
8360     case Instruction::UDiv:
8361     case Instruction::UIToFP:
8362     case Instruction::URem:
8363     case Instruction::Xor:
8364     case Instruction::ZExt:
8365       return true;
8366     }
8367     return false;
8368   };
8369 
8370   if (!IsVectorizableOpcode(I->getOpcode()))
8371     return nullptr;
8372 
8373   // Success: widen this instruction.
8374   return new VPWidenRecipe(*I, Plan.mapToVPValues(I->operands()));
8375 }
8376 
8377 VPBasicBlock *VPRecipeBuilder::handleReplication(
8378     Instruction *I, VFRange &Range, VPBasicBlock *VPBB,
8379     DenseMap<Instruction *, VPReplicateRecipe *> &PredInst2Recipe,
8380     VPlanPtr &Plan) {
8381   bool IsUniform = LoopVectorizationPlanner::getDecisionAndClampRange(
8382       [&](ElementCount VF) { return CM.isUniformAfterVectorization(I, VF); },
8383       Range);
8384 
8385   bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange(
8386       [&](ElementCount VF) { return CM.isScalarWithPredication(I, VF); },
8387       Range);
8388 
8389   auto *Recipe = new VPReplicateRecipe(I, Plan->mapToVPValues(I->operands()),
8390                                        IsUniform, IsPredicated);
8391   setRecipe(I, Recipe);
8392   Plan->addVPValue(I, Recipe);
8393 
8394   // Find if I uses a predicated instruction. If so, it will use its scalar
8395   // value. Avoid hoisting the insert-element which packs the scalar value into
8396   // a vector value, as that happens iff all users use the vector value.
8397   for (auto &Op : I->operands())
8398     if (auto *PredInst = dyn_cast<Instruction>(Op))
8399       if (PredInst2Recipe.find(PredInst) != PredInst2Recipe.end())
8400         PredInst2Recipe[PredInst]->setAlsoPack(false);
8401 
8402   // Finalize the recipe for Instr, first if it is not predicated.
8403   if (!IsPredicated) {
8404     LLVM_DEBUG(dbgs() << "LV: Scalarizing:" << *I << "\n");
8405     VPBB->appendRecipe(Recipe);
8406     return VPBB;
8407   }
8408   LLVM_DEBUG(dbgs() << "LV: Scalarizing and predicating:" << *I << "\n");
8409   assert(VPBB->getSuccessors().empty() &&
8410          "VPBB has successors when handling predicated replication.");
8411   // Record predicated instructions for above packing optimizations.
8412   PredInst2Recipe[I] = Recipe;
8413   VPBlockBase *Region = createReplicateRegion(I, Recipe, Plan);
8414   VPBlockUtils::insertBlockAfter(Region, VPBB);
8415   auto *RegSucc = new VPBasicBlock();
8416   VPBlockUtils::insertBlockAfter(RegSucc, Region);
8417   return RegSucc;
8418 }
8419 
8420 VPRegionBlock *VPRecipeBuilder::createReplicateRegion(Instruction *Instr,
8421                                                       VPRecipeBase *PredRecipe,
8422                                                       VPlanPtr &Plan) {
8423   // Instructions marked for predication are replicated and placed under an
8424   // if-then construct to prevent side-effects.
8425 
8426   // Generate recipes to compute the block mask for this region.
8427   VPValue *BlockInMask = createBlockInMask(Instr->getParent(), Plan);
8428 
8429   // Build the triangular if-then region.
8430   std::string RegionName = (Twine("pred.") + Instr->getOpcodeName()).str();
8431   assert(Instr->getParent() && "Predicated instruction not in any basic block");
8432   auto *BOMRecipe = new VPBranchOnMaskRecipe(BlockInMask);
8433   auto *Entry = new VPBasicBlock(Twine(RegionName) + ".entry", BOMRecipe);
8434   auto *PHIRecipe = Instr->getType()->isVoidTy()
8435                         ? nullptr
8436                         : new VPPredInstPHIRecipe(Plan->getOrAddVPValue(Instr));
8437   if (PHIRecipe) {
8438     Plan->removeVPValueFor(Instr);
8439     Plan->addVPValue(Instr, PHIRecipe);
8440   }
8441   auto *Exit = new VPBasicBlock(Twine(RegionName) + ".continue", PHIRecipe);
8442   auto *Pred = new VPBasicBlock(Twine(RegionName) + ".if", PredRecipe);
8443   VPRegionBlock *Region = new VPRegionBlock(Entry, Exit, RegionName, true);
8444 
8445   // Note: first set Entry as region entry and then connect successors starting
8446   // from it in order, to propagate the "parent" of each VPBasicBlock.
8447   VPBlockUtils::insertTwoBlocksAfter(Pred, Exit, BlockInMask, Entry);
8448   VPBlockUtils::connectBlocks(Pred, Exit);
8449 
8450   return Region;
8451 }
8452 
8453 VPRecipeBase *VPRecipeBuilder::tryToCreateWidenRecipe(Instruction *Instr,
8454                                                       VFRange &Range,
8455                                                       VPlanPtr &Plan) {
8456   // First, check for specific widening recipes that deal with calls, memory
8457   // operations, inductions and Phi nodes.
8458   if (auto *CI = dyn_cast<CallInst>(Instr))
8459     return tryToWidenCall(CI, Range, *Plan);
8460 
8461   if (isa<LoadInst>(Instr) || isa<StoreInst>(Instr))
8462     return tryToWidenMemory(Instr, Range, Plan);
8463 
8464   VPRecipeBase *Recipe;
8465   if (auto Phi = dyn_cast<PHINode>(Instr)) {
8466     if (Phi->getParent() != OrigLoop->getHeader())
8467       return tryToBlend(Phi, Plan);
8468     if ((Recipe = tryToOptimizeInductionPHI(Phi, *Plan)))
8469       return Recipe;
8470 
8471     if (Legal->isReductionVariable(Phi)) {
8472       RecurrenceDescriptor &RdxDesc = Legal->getReductionVars()[Phi];
8473       VPValue *StartV =
8474           Plan->getOrAddVPValue(RdxDesc.getRecurrenceStartValue());
8475       return new VPWidenPHIRecipe(Phi, RdxDesc, *StartV);
8476     }
8477 
8478     return new VPWidenPHIRecipe(Phi);
8479   }
8480 
8481   if (isa<TruncInst>(Instr) && (Recipe = tryToOptimizeInductionTruncate(
8482                                     cast<TruncInst>(Instr), Range, *Plan)))
8483     return Recipe;
8484 
8485   if (!shouldWiden(Instr, Range))
8486     return nullptr;
8487 
8488   if (auto GEP = dyn_cast<GetElementPtrInst>(Instr))
8489     return new VPWidenGEPRecipe(GEP, Plan->mapToVPValues(GEP->operands()),
8490                                 OrigLoop);
8491 
8492   if (auto *SI = dyn_cast<SelectInst>(Instr)) {
8493     bool InvariantCond =
8494         PSE.getSE()->isLoopInvariant(PSE.getSCEV(SI->getOperand(0)), OrigLoop);
8495     return new VPWidenSelectRecipe(*SI, Plan->mapToVPValues(SI->operands()),
8496                                    InvariantCond);
8497   }
8498 
8499   return tryToWiden(Instr, *Plan);
8500 }
8501 
8502 void LoopVectorizationPlanner::buildVPlansWithVPRecipes(ElementCount MinVF,
8503                                                         ElementCount MaxVF) {
8504   assert(OrigLoop->isInnermost() && "Inner loop expected.");
8505 
8506   // Collect instructions from the original loop that will become trivially dead
8507   // in the vectorized loop. We don't need to vectorize these instructions. For
8508   // example, original induction update instructions can become dead because we
8509   // separately emit induction "steps" when generating code for the new loop.
8510   // Similarly, we create a new latch condition when setting up the structure
8511   // of the new loop, so the old one can become dead.
8512   SmallPtrSet<Instruction *, 4> DeadInstructions;
8513   collectTriviallyDeadInstructions(DeadInstructions);
8514 
8515   // Add assume instructions we need to drop to DeadInstructions, to prevent
8516   // them from being added to the VPlan.
8517   // TODO: We only need to drop assumes in blocks that get flattend. If the
8518   // control flow is preserved, we should keep them.
8519   auto &ConditionalAssumes = Legal->getConditionalAssumes();
8520   DeadInstructions.insert(ConditionalAssumes.begin(), ConditionalAssumes.end());
8521 
8522   DenseMap<Instruction *, Instruction *> &SinkAfter = Legal->getSinkAfter();
8523   // Dead instructions do not need sinking. Remove them from SinkAfter.
8524   for (Instruction *I : DeadInstructions)
8525     SinkAfter.erase(I);
8526 
8527   auto MaxVFPlusOne = MaxVF.getWithIncrement(1);
8528   for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFPlusOne);) {
8529     VFRange SubRange = {VF, MaxVFPlusOne};
8530     VPlans.push_back(
8531         buildVPlanWithVPRecipes(SubRange, DeadInstructions, SinkAfter));
8532     VF = SubRange.End;
8533   }
8534 }
8535 
8536 VPlanPtr LoopVectorizationPlanner::buildVPlanWithVPRecipes(
8537     VFRange &Range, SmallPtrSetImpl<Instruction *> &DeadInstructions,
8538     const DenseMap<Instruction *, Instruction *> &SinkAfter) {
8539 
8540   // Hold a mapping from predicated instructions to their recipes, in order to
8541   // fix their AlsoPack behavior if a user is determined to replicate and use a
8542   // scalar instead of vector value.
8543   DenseMap<Instruction *, VPReplicateRecipe *> PredInst2Recipe;
8544 
8545   SmallPtrSet<const InterleaveGroup<Instruction> *, 1> InterleaveGroups;
8546 
8547   VPRecipeBuilder RecipeBuilder(OrigLoop, TLI, Legal, CM, PSE, Builder);
8548 
8549   // ---------------------------------------------------------------------------
8550   // Pre-construction: record ingredients whose recipes we'll need to further
8551   // process after constructing the initial VPlan.
8552   // ---------------------------------------------------------------------------
8553 
8554   // Mark instructions we'll need to sink later and their targets as
8555   // ingredients whose recipe we'll need to record.
8556   for (auto &Entry : SinkAfter) {
8557     RecipeBuilder.recordRecipeOf(Entry.first);
8558     RecipeBuilder.recordRecipeOf(Entry.second);
8559   }
8560   for (auto &Reduction : CM.getInLoopReductionChains()) {
8561     PHINode *Phi = Reduction.first;
8562     RecurKind Kind = Legal->getReductionVars()[Phi].getRecurrenceKind();
8563     const SmallVector<Instruction *, 4> &ReductionOperations = Reduction.second;
8564 
8565     RecipeBuilder.recordRecipeOf(Phi);
8566     for (auto &R : ReductionOperations) {
8567       RecipeBuilder.recordRecipeOf(R);
8568       // For min/max reducitons, where we have a pair of icmp/select, we also
8569       // need to record the ICmp recipe, so it can be removed later.
8570       if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind))
8571         RecipeBuilder.recordRecipeOf(cast<Instruction>(R->getOperand(0)));
8572     }
8573   }
8574 
8575   // For each interleave group which is relevant for this (possibly trimmed)
8576   // Range, add it to the set of groups to be later applied to the VPlan and add
8577   // placeholders for its members' Recipes which we'll be replacing with a
8578   // single VPInterleaveRecipe.
8579   for (InterleaveGroup<Instruction> *IG : IAI.getInterleaveGroups()) {
8580     auto applyIG = [IG, this](ElementCount VF) -> bool {
8581       return (VF.isVector() && // Query is illegal for VF == 1
8582               CM.getWideningDecision(IG->getInsertPos(), VF) ==
8583                   LoopVectorizationCostModel::CM_Interleave);
8584     };
8585     if (!getDecisionAndClampRange(applyIG, Range))
8586       continue;
8587     InterleaveGroups.insert(IG);
8588     for (unsigned i = 0; i < IG->getFactor(); i++)
8589       if (Instruction *Member = IG->getMember(i))
8590         RecipeBuilder.recordRecipeOf(Member);
8591   };
8592 
8593   // ---------------------------------------------------------------------------
8594   // Build initial VPlan: Scan the body of the loop in a topological order to
8595   // visit each basic block after having visited its predecessor basic blocks.
8596   // ---------------------------------------------------------------------------
8597 
8598   // Create a dummy pre-entry VPBasicBlock to start building the VPlan.
8599   auto Plan = std::make_unique<VPlan>();
8600   VPBasicBlock *VPBB = new VPBasicBlock("Pre-Entry");
8601   Plan->setEntry(VPBB);
8602 
8603   // Scan the body of the loop in a topological order to visit each basic block
8604   // after having visited its predecessor basic blocks.
8605   LoopBlocksDFS DFS(OrigLoop);
8606   DFS.perform(LI);
8607 
8608   for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) {
8609     // Relevant instructions from basic block BB will be grouped into VPRecipe
8610     // ingredients and fill a new VPBasicBlock.
8611     unsigned VPBBsForBB = 0;
8612     auto *FirstVPBBForBB = new VPBasicBlock(BB->getName());
8613     VPBlockUtils::insertBlockAfter(FirstVPBBForBB, VPBB);
8614     VPBB = FirstVPBBForBB;
8615     Builder.setInsertPoint(VPBB);
8616 
8617     // Introduce each ingredient into VPlan.
8618     // TODO: Model and preserve debug instrinsics in VPlan.
8619     for (Instruction &I : BB->instructionsWithoutDebug()) {
8620       Instruction *Instr = &I;
8621 
8622       // First filter out irrelevant instructions, to ensure no recipes are
8623       // built for them.
8624       if (isa<BranchInst>(Instr) || DeadInstructions.count(Instr))
8625         continue;
8626 
8627       if (auto Recipe =
8628               RecipeBuilder.tryToCreateWidenRecipe(Instr, Range, Plan)) {
8629         for (auto *Def : Recipe->definedValues()) {
8630           auto *UV = Def->getUnderlyingValue();
8631           Plan->addVPValue(UV, Def);
8632         }
8633 
8634         RecipeBuilder.setRecipe(Instr, Recipe);
8635         VPBB->appendRecipe(Recipe);
8636         continue;
8637       }
8638 
8639       // Otherwise, if all widening options failed, Instruction is to be
8640       // replicated. This may create a successor for VPBB.
8641       VPBasicBlock *NextVPBB = RecipeBuilder.handleReplication(
8642           Instr, Range, VPBB, PredInst2Recipe, Plan);
8643       if (NextVPBB != VPBB) {
8644         VPBB = NextVPBB;
8645         VPBB->setName(BB->hasName() ? BB->getName() + "." + Twine(VPBBsForBB++)
8646                                     : "");
8647       }
8648     }
8649   }
8650 
8651   // Discard empty dummy pre-entry VPBasicBlock. Note that other VPBasicBlocks
8652   // may also be empty, such as the last one VPBB, reflecting original
8653   // basic-blocks with no recipes.
8654   VPBasicBlock *PreEntry = cast<VPBasicBlock>(Plan->getEntry());
8655   assert(PreEntry->empty() && "Expecting empty pre-entry block.");
8656   VPBlockBase *Entry = Plan->setEntry(PreEntry->getSingleSuccessor());
8657   VPBlockUtils::disconnectBlocks(PreEntry, Entry);
8658   delete PreEntry;
8659 
8660   // ---------------------------------------------------------------------------
8661   // Transform initial VPlan: Apply previously taken decisions, in order, to
8662   // bring the VPlan to its final state.
8663   // ---------------------------------------------------------------------------
8664 
8665   // Apply Sink-After legal constraints.
8666   for (auto &Entry : SinkAfter) {
8667     VPRecipeBase *Sink = RecipeBuilder.getRecipe(Entry.first);
8668     VPRecipeBase *Target = RecipeBuilder.getRecipe(Entry.second);
8669     // If the target is in a replication region, make sure to move Sink to the
8670     // block after it, not into the replication region itself.
8671     if (auto *Region =
8672             dyn_cast_or_null<VPRegionBlock>(Target->getParent()->getParent())) {
8673       if (Region->isReplicator()) {
8674         assert(Region->getNumSuccessors() == 1 && "Expected SESE region!");
8675         VPBasicBlock *NextBlock =
8676             cast<VPBasicBlock>(Region->getSuccessors().front());
8677         Sink->moveBefore(*NextBlock, NextBlock->getFirstNonPhi());
8678         continue;
8679       }
8680     }
8681     Sink->moveAfter(Target);
8682   }
8683 
8684   // Interleave memory: for each Interleave Group we marked earlier as relevant
8685   // for this VPlan, replace the Recipes widening its memory instructions with a
8686   // single VPInterleaveRecipe at its insertion point.
8687   for (auto IG : InterleaveGroups) {
8688     auto *Recipe = cast<VPWidenMemoryInstructionRecipe>(
8689         RecipeBuilder.getRecipe(IG->getInsertPos()));
8690     SmallVector<VPValue *, 4> StoredValues;
8691     for (unsigned i = 0; i < IG->getFactor(); ++i)
8692       if (auto *SI = dyn_cast_or_null<StoreInst>(IG->getMember(i)))
8693         StoredValues.push_back(Plan->getOrAddVPValue(SI->getOperand(0)));
8694 
8695     auto *VPIG = new VPInterleaveRecipe(IG, Recipe->getAddr(), StoredValues,
8696                                         Recipe->getMask());
8697     VPIG->insertBefore(Recipe);
8698     unsigned J = 0;
8699     for (unsigned i = 0; i < IG->getFactor(); ++i)
8700       if (Instruction *Member = IG->getMember(i)) {
8701         if (!Member->getType()->isVoidTy()) {
8702           VPValue *OriginalV = Plan->getVPValue(Member);
8703           Plan->removeVPValueFor(Member);
8704           Plan->addVPValue(Member, VPIG->getVPValue(J));
8705           OriginalV->replaceAllUsesWith(VPIG->getVPValue(J));
8706           J++;
8707         }
8708         RecipeBuilder.getRecipe(Member)->eraseFromParent();
8709       }
8710   }
8711 
8712   // Adjust the recipes for any inloop reductions.
8713   if (Range.Start.isVector())
8714     adjustRecipesForInLoopReductions(Plan, RecipeBuilder);
8715 
8716   // Finally, if tail is folded by masking, introduce selects between the phi
8717   // and the live-out instruction of each reduction, at the end of the latch.
8718   if (CM.foldTailByMasking() && !Legal->getReductionVars().empty()) {
8719     Builder.setInsertPoint(VPBB);
8720     auto *Cond = RecipeBuilder.createBlockInMask(OrigLoop->getHeader(), Plan);
8721     for (auto &Reduction : Legal->getReductionVars()) {
8722       if (CM.isInLoopReduction(Reduction.first))
8723         continue;
8724       VPValue *Phi = Plan->getOrAddVPValue(Reduction.first);
8725       VPValue *Red = Plan->getOrAddVPValue(Reduction.second.getLoopExitInstr());
8726       Builder.createNaryOp(Instruction::Select, {Cond, Red, Phi});
8727     }
8728   }
8729 
8730   std::string PlanName;
8731   raw_string_ostream RSO(PlanName);
8732   ElementCount VF = Range.Start;
8733   Plan->addVF(VF);
8734   RSO << "Initial VPlan for VF={" << VF;
8735   for (VF *= 2; ElementCount::isKnownLT(VF, Range.End); VF *= 2) {
8736     Plan->addVF(VF);
8737     RSO << "," << VF;
8738   }
8739   RSO << "},UF>=1";
8740   RSO.flush();
8741   Plan->setName(PlanName);
8742 
8743   return Plan;
8744 }
8745 
8746 VPlanPtr LoopVectorizationPlanner::buildVPlan(VFRange &Range) {
8747   // Outer loop handling: They may require CFG and instruction level
8748   // transformations before even evaluating whether vectorization is profitable.
8749   // Since we cannot modify the incoming IR, we need to build VPlan upfront in
8750   // the vectorization pipeline.
8751   assert(!OrigLoop->isInnermost());
8752   assert(EnableVPlanNativePath && "VPlan-native path is not enabled.");
8753 
8754   // Create new empty VPlan
8755   auto Plan = std::make_unique<VPlan>();
8756 
8757   // Build hierarchical CFG
8758   VPlanHCFGBuilder HCFGBuilder(OrigLoop, LI, *Plan);
8759   HCFGBuilder.buildHierarchicalCFG();
8760 
8761   for (ElementCount VF = Range.Start; ElementCount::isKnownLT(VF, Range.End);
8762        VF *= 2)
8763     Plan->addVF(VF);
8764 
8765   if (EnableVPlanPredication) {
8766     VPlanPredicator VPP(*Plan);
8767     VPP.predicate();
8768 
8769     // Avoid running transformation to recipes until masked code generation in
8770     // VPlan-native path is in place.
8771     return Plan;
8772   }
8773 
8774   SmallPtrSet<Instruction *, 1> DeadInstructions;
8775   VPlanTransforms::VPInstructionsToVPRecipes(
8776       OrigLoop, Plan, Legal->getInductionVars(), DeadInstructions);
8777   return Plan;
8778 }
8779 
8780 // Adjust the recipes for any inloop reductions. The chain of instructions
8781 // leading from the loop exit instr to the phi need to be converted to
8782 // reductions, with one operand being vector and the other being the scalar
8783 // reduction chain.
8784 void LoopVectorizationPlanner::adjustRecipesForInLoopReductions(
8785     VPlanPtr &Plan, VPRecipeBuilder &RecipeBuilder) {
8786   for (auto &Reduction : CM.getInLoopReductionChains()) {
8787     PHINode *Phi = Reduction.first;
8788     RecurrenceDescriptor &RdxDesc = Legal->getReductionVars()[Phi];
8789     const SmallVector<Instruction *, 4> &ReductionOperations = Reduction.second;
8790 
8791     // ReductionOperations are orders top-down from the phi's use to the
8792     // LoopExitValue. We keep a track of the previous item (the Chain) to tell
8793     // which of the two operands will remain scalar and which will be reduced.
8794     // For minmax the chain will be the select instructions.
8795     Instruction *Chain = Phi;
8796     for (Instruction *R : ReductionOperations) {
8797       VPRecipeBase *WidenRecipe = RecipeBuilder.getRecipe(R);
8798       RecurKind Kind = RdxDesc.getRecurrenceKind();
8799 
8800       VPValue *ChainOp = Plan->getVPValue(Chain);
8801       unsigned FirstOpId;
8802       if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) {
8803         assert(isa<VPWidenSelectRecipe>(WidenRecipe) &&
8804                "Expected to replace a VPWidenSelectSC");
8805         FirstOpId = 1;
8806       } else {
8807         assert(isa<VPWidenRecipe>(WidenRecipe) &&
8808                "Expected to replace a VPWidenSC");
8809         FirstOpId = 0;
8810       }
8811       unsigned VecOpId =
8812           R->getOperand(FirstOpId) == Chain ? FirstOpId + 1 : FirstOpId;
8813       VPValue *VecOp = Plan->getVPValue(R->getOperand(VecOpId));
8814 
8815       auto *CondOp = CM.foldTailByMasking()
8816                          ? RecipeBuilder.createBlockInMask(R->getParent(), Plan)
8817                          : nullptr;
8818       VPReductionRecipe *RedRecipe = new VPReductionRecipe(
8819           &RdxDesc, R, ChainOp, VecOp, CondOp, TTI);
8820       WidenRecipe->getVPValue()->replaceAllUsesWith(RedRecipe);
8821       Plan->removeVPValueFor(R);
8822       Plan->addVPValue(R, RedRecipe);
8823       WidenRecipe->getParent()->insert(RedRecipe, WidenRecipe->getIterator());
8824       WidenRecipe->getVPValue()->replaceAllUsesWith(RedRecipe);
8825       WidenRecipe->eraseFromParent();
8826 
8827       if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) {
8828         VPRecipeBase *CompareRecipe =
8829             RecipeBuilder.getRecipe(cast<Instruction>(R->getOperand(0)));
8830         assert(isa<VPWidenRecipe>(CompareRecipe) &&
8831                "Expected to replace a VPWidenSC");
8832         assert(cast<VPWidenRecipe>(CompareRecipe)->getNumUsers() == 0 &&
8833                "Expected no remaining users");
8834         CompareRecipe->eraseFromParent();
8835       }
8836       Chain = R;
8837     }
8838   }
8839 }
8840 
8841 void VPInterleaveRecipe::print(raw_ostream &O, const Twine &Indent,
8842                                VPSlotTracker &SlotTracker) const {
8843   O << "\"INTERLEAVE-GROUP with factor " << IG->getFactor() << " at ";
8844   IG->getInsertPos()->printAsOperand(O, false);
8845   O << ", ";
8846   getAddr()->printAsOperand(O, SlotTracker);
8847   VPValue *Mask = getMask();
8848   if (Mask) {
8849     O << ", ";
8850     Mask->printAsOperand(O, SlotTracker);
8851   }
8852   for (unsigned i = 0; i < IG->getFactor(); ++i)
8853     if (Instruction *I = IG->getMember(i))
8854       O << "\\l\" +\n" << Indent << "\"  " << VPlanIngredient(I) << " " << i;
8855 }
8856 
8857 void VPWidenCallRecipe::execute(VPTransformState &State) {
8858   State.ILV->widenCallInstruction(*cast<CallInst>(getUnderlyingInstr()), this,
8859                                   *this, State);
8860 }
8861 
8862 void VPWidenSelectRecipe::execute(VPTransformState &State) {
8863   State.ILV->widenSelectInstruction(*cast<SelectInst>(getUnderlyingInstr()),
8864                                     this, *this, InvariantCond, State);
8865 }
8866 
8867 void VPWidenRecipe::execute(VPTransformState &State) {
8868   State.ILV->widenInstruction(*getUnderlyingInstr(), this, *this, State);
8869 }
8870 
8871 void VPWidenGEPRecipe::execute(VPTransformState &State) {
8872   State.ILV->widenGEP(cast<GetElementPtrInst>(getUnderlyingInstr()), this,
8873                       *this, State.UF, State.VF, IsPtrLoopInvariant,
8874                       IsIndexLoopInvariant, State);
8875 }
8876 
8877 void VPWidenIntOrFpInductionRecipe::execute(VPTransformState &State) {
8878   assert(!State.Instance && "Int or FP induction being replicated.");
8879   State.ILV->widenIntOrFpInduction(IV, getStartValue()->getLiveInIRValue(),
8880                                    getTruncInst(), getVPValue(0),
8881                                    getCastValue(), State);
8882 }
8883 
8884 void VPWidenPHIRecipe::execute(VPTransformState &State) {
8885   Value *StartV =
8886       getStartValue() ? getStartValue()->getLiveInIRValue() : nullptr;
8887   State.ILV->widenPHIInstruction(cast<PHINode>(getUnderlyingValue()), RdxDesc,
8888                                  StartV, this, State);
8889 }
8890 
8891 void VPBlendRecipe::execute(VPTransformState &State) {
8892   State.ILV->setDebugLocFromInst(State.Builder, Phi);
8893   // We know that all PHIs in non-header blocks are converted into
8894   // selects, so we don't have to worry about the insertion order and we
8895   // can just use the builder.
8896   // At this point we generate the predication tree. There may be
8897   // duplications since this is a simple recursive scan, but future
8898   // optimizations will clean it up.
8899 
8900   unsigned NumIncoming = getNumIncomingValues();
8901 
8902   // Generate a sequence of selects of the form:
8903   // SELECT(Mask3, In3,
8904   //        SELECT(Mask2, In2,
8905   //               SELECT(Mask1, In1,
8906   //                      In0)))
8907   // Note that Mask0 is never used: lanes for which no path reaches this phi and
8908   // are essentially undef are taken from In0.
8909   InnerLoopVectorizer::VectorParts Entry(State.UF);
8910   for (unsigned In = 0; In < NumIncoming; ++In) {
8911     for (unsigned Part = 0; Part < State.UF; ++Part) {
8912       // We might have single edge PHIs (blocks) - use an identity
8913       // 'select' for the first PHI operand.
8914       Value *In0 = State.get(getIncomingValue(In), Part);
8915       if (In == 0)
8916         Entry[Part] = In0; // Initialize with the first incoming value.
8917       else {
8918         // Select between the current value and the previous incoming edge
8919         // based on the incoming mask.
8920         Value *Cond = State.get(getMask(In), Part);
8921         Entry[Part] =
8922             State.Builder.CreateSelect(Cond, In0, Entry[Part], "predphi");
8923       }
8924     }
8925   }
8926   for (unsigned Part = 0; Part < State.UF; ++Part)
8927     State.set(this, Entry[Part], Part);
8928 }
8929 
8930 void VPInterleaveRecipe::execute(VPTransformState &State) {
8931   assert(!State.Instance && "Interleave group being replicated.");
8932   State.ILV->vectorizeInterleaveGroup(IG, definedValues(), State, getAddr(),
8933                                       getStoredValues(), getMask());
8934 }
8935 
8936 void VPReductionRecipe::execute(VPTransformState &State) {
8937   assert(!State.Instance && "Reduction being replicated.");
8938   for (unsigned Part = 0; Part < State.UF; ++Part) {
8939     RecurKind Kind = RdxDesc->getRecurrenceKind();
8940     Value *NewVecOp = State.get(getVecOp(), Part);
8941     if (VPValue *Cond = getCondOp()) {
8942       Value *NewCond = State.get(Cond, Part);
8943       VectorType *VecTy = cast<VectorType>(NewVecOp->getType());
8944       Constant *Iden = RecurrenceDescriptor::getRecurrenceIdentity(
8945           Kind, VecTy->getElementType());
8946       Constant *IdenVec =
8947           ConstantVector::getSplat(VecTy->getElementCount(), Iden);
8948       Value *Select = State.Builder.CreateSelect(NewCond, NewVecOp, IdenVec);
8949       NewVecOp = Select;
8950     }
8951     Value *NewRed =
8952         createTargetReduction(State.Builder, TTI, *RdxDesc, NewVecOp);
8953     Value *PrevInChain = State.get(getChainOp(), Part);
8954     Value *NextInChain;
8955     if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) {
8956       NextInChain =
8957           createMinMaxOp(State.Builder, RdxDesc->getRecurrenceKind(),
8958                          NewRed, PrevInChain);
8959     } else {
8960       NextInChain = State.Builder.CreateBinOp(
8961           (Instruction::BinaryOps)getUnderlyingInstr()->getOpcode(), NewRed,
8962           PrevInChain);
8963     }
8964     State.set(this, NextInChain, Part);
8965   }
8966 }
8967 
8968 void VPReplicateRecipe::execute(VPTransformState &State) {
8969   if (State.Instance) { // Generate a single instance.
8970     assert(!State.VF.isScalable() && "Can't scalarize a scalable vector");
8971     State.ILV->scalarizeInstruction(getUnderlyingInstr(), this, *this,
8972                                     *State.Instance, IsPredicated, State);
8973     // Insert scalar instance packing it into a vector.
8974     if (AlsoPack && State.VF.isVector()) {
8975       // If we're constructing lane 0, initialize to start from poison.
8976       if (State.Instance->Lane == 0) {
8977         assert(!State.VF.isScalable() && "VF is assumed to be non scalable.");
8978         Value *Poison = PoisonValue::get(
8979             VectorType::get(getUnderlyingValue()->getType(), State.VF));
8980         State.set(this, Poison, State.Instance->Part);
8981       }
8982       State.ILV->packScalarIntoVectorValue(this, *State.Instance, State);
8983     }
8984     return;
8985   }
8986 
8987   // Generate scalar instances for all VF lanes of all UF parts, unless the
8988   // instruction is uniform inwhich case generate only the first lane for each
8989   // of the UF parts.
8990   unsigned EndLane = IsUniform ? 1 : State.VF.getKnownMinValue();
8991   assert((!State.VF.isScalable() || IsUniform) &&
8992          "Can't scalarize a scalable vector");
8993   for (unsigned Part = 0; Part < State.UF; ++Part)
8994     for (unsigned Lane = 0; Lane < EndLane; ++Lane)
8995       State.ILV->scalarizeInstruction(getUnderlyingInstr(), this, *this,
8996                                       VPIteration(Part, Lane), IsPredicated,
8997                                       State);
8998 }
8999 
9000 void VPBranchOnMaskRecipe::execute(VPTransformState &State) {
9001   assert(State.Instance && "Branch on Mask works only on single instance.");
9002 
9003   unsigned Part = State.Instance->Part;
9004   unsigned Lane = State.Instance->Lane;
9005 
9006   Value *ConditionBit = nullptr;
9007   VPValue *BlockInMask = getMask();
9008   if (BlockInMask) {
9009     ConditionBit = State.get(BlockInMask, Part);
9010     if (ConditionBit->getType()->isVectorTy())
9011       ConditionBit = State.Builder.CreateExtractElement(
9012           ConditionBit, State.Builder.getInt32(Lane));
9013   } else // Block in mask is all-one.
9014     ConditionBit = State.Builder.getTrue();
9015 
9016   // Replace the temporary unreachable terminator with a new conditional branch,
9017   // whose two destinations will be set later when they are created.
9018   auto *CurrentTerminator = State.CFG.PrevBB->getTerminator();
9019   assert(isa<UnreachableInst>(CurrentTerminator) &&
9020          "Expected to replace unreachable terminator with conditional branch.");
9021   auto *CondBr = BranchInst::Create(State.CFG.PrevBB, nullptr, ConditionBit);
9022   CondBr->setSuccessor(0, nullptr);
9023   ReplaceInstWithInst(CurrentTerminator, CondBr);
9024 }
9025 
9026 void VPPredInstPHIRecipe::execute(VPTransformState &State) {
9027   assert(State.Instance && "Predicated instruction PHI works per instance.");
9028   Instruction *ScalarPredInst =
9029       cast<Instruction>(State.get(getOperand(0), *State.Instance));
9030   BasicBlock *PredicatedBB = ScalarPredInst->getParent();
9031   BasicBlock *PredicatingBB = PredicatedBB->getSinglePredecessor();
9032   assert(PredicatingBB && "Predicated block has no single predecessor.");
9033   assert(isa<VPReplicateRecipe>(getOperand(0)) &&
9034          "operand must be VPReplicateRecipe");
9035 
9036   // By current pack/unpack logic we need to generate only a single phi node: if
9037   // a vector value for the predicated instruction exists at this point it means
9038   // the instruction has vector users only, and a phi for the vector value is
9039   // needed. In this case the recipe of the predicated instruction is marked to
9040   // also do that packing, thereby "hoisting" the insert-element sequence.
9041   // Otherwise, a phi node for the scalar value is needed.
9042   unsigned Part = State.Instance->Part;
9043   if (State.hasVectorValue(getOperand(0), Part)) {
9044     Value *VectorValue = State.get(getOperand(0), Part);
9045     InsertElementInst *IEI = cast<InsertElementInst>(VectorValue);
9046     PHINode *VPhi = State.Builder.CreatePHI(IEI->getType(), 2);
9047     VPhi->addIncoming(IEI->getOperand(0), PredicatingBB); // Unmodified vector.
9048     VPhi->addIncoming(IEI, PredicatedBB); // New vector with inserted element.
9049     if (State.hasVectorValue(this, Part))
9050       State.reset(this, VPhi, Part);
9051     else
9052       State.set(this, VPhi, Part);
9053     // NOTE: Currently we need to update the value of the operand, so the next
9054     // predicated iteration inserts its generated value in the correct vector.
9055     State.reset(getOperand(0), VPhi, Part);
9056   } else {
9057     Type *PredInstType = getOperand(0)->getUnderlyingValue()->getType();
9058     PHINode *Phi = State.Builder.CreatePHI(PredInstType, 2);
9059     Phi->addIncoming(PoisonValue::get(ScalarPredInst->getType()),
9060                      PredicatingBB);
9061     Phi->addIncoming(ScalarPredInst, PredicatedBB);
9062     if (State.hasScalarValue(this, *State.Instance))
9063       State.reset(this, Phi, *State.Instance);
9064     else
9065       State.set(this, Phi, *State.Instance);
9066     // NOTE: Currently we need to update the value of the operand, so the next
9067     // predicated iteration inserts its generated value in the correct vector.
9068     State.reset(getOperand(0), Phi, *State.Instance);
9069   }
9070 }
9071 
9072 void VPWidenMemoryInstructionRecipe::execute(VPTransformState &State) {
9073   VPValue *StoredValue = isStore() ? getStoredValue() : nullptr;
9074   State.ILV->vectorizeMemoryInstruction(&Ingredient, State,
9075                                         StoredValue ? nullptr : getVPValue(),
9076                                         getAddr(), StoredValue, getMask());
9077 }
9078 
9079 // Determine how to lower the scalar epilogue, which depends on 1) optimising
9080 // for minimum code-size, 2) predicate compiler options, 3) loop hints forcing
9081 // predication, and 4) a TTI hook that analyses whether the loop is suitable
9082 // for predication.
9083 static ScalarEpilogueLowering getScalarEpilogueLowering(
9084     Function *F, Loop *L, LoopVectorizeHints &Hints, ProfileSummaryInfo *PSI,
9085     BlockFrequencyInfo *BFI, TargetTransformInfo *TTI, TargetLibraryInfo *TLI,
9086     AssumptionCache *AC, LoopInfo *LI, ScalarEvolution *SE, DominatorTree *DT,
9087     LoopVectorizationLegality &LVL) {
9088   // 1) OptSize takes precedence over all other options, i.e. if this is set,
9089   // don't look at hints or options, and don't request a scalar epilogue.
9090   // (For PGSO, as shouldOptimizeForSize isn't currently accessible from
9091   // LoopAccessInfo (due to code dependency and not being able to reliably get
9092   // PSI/BFI from a loop analysis under NPM), we cannot suppress the collection
9093   // of strides in LoopAccessInfo::analyzeLoop() and vectorize without
9094   // versioning when the vectorization is forced, unlike hasOptSize. So revert
9095   // back to the old way and vectorize with versioning when forced. See D81345.)
9096   if (F->hasOptSize() || (llvm::shouldOptimizeForSize(L->getHeader(), PSI, BFI,
9097                                                       PGSOQueryType::IRPass) &&
9098                           Hints.getForce() != LoopVectorizeHints::FK_Enabled))
9099     return CM_ScalarEpilogueNotAllowedOptSize;
9100 
9101   // 2) If set, obey the directives
9102   if (PreferPredicateOverEpilogue.getNumOccurrences()) {
9103     switch (PreferPredicateOverEpilogue) {
9104     case PreferPredicateTy::ScalarEpilogue:
9105       return CM_ScalarEpilogueAllowed;
9106     case PreferPredicateTy::PredicateElseScalarEpilogue:
9107       return CM_ScalarEpilogueNotNeededUsePredicate;
9108     case PreferPredicateTy::PredicateOrDontVectorize:
9109       return CM_ScalarEpilogueNotAllowedUsePredicate;
9110     };
9111   }
9112 
9113   // 3) If set, obey the hints
9114   switch (Hints.getPredicate()) {
9115   case LoopVectorizeHints::FK_Enabled:
9116     return CM_ScalarEpilogueNotNeededUsePredicate;
9117   case LoopVectorizeHints::FK_Disabled:
9118     return CM_ScalarEpilogueAllowed;
9119   };
9120 
9121   // 4) if the TTI hook indicates this is profitable, request predication.
9122   if (TTI->preferPredicateOverEpilogue(L, LI, *SE, *AC, TLI, DT,
9123                                        LVL.getLAI()))
9124     return CM_ScalarEpilogueNotNeededUsePredicate;
9125 
9126   return CM_ScalarEpilogueAllowed;
9127 }
9128 
9129 Value *VPTransformState::get(VPValue *Def, unsigned Part) {
9130   // If Values have been set for this Def return the one relevant for \p Part.
9131   if (hasVectorValue(Def, Part))
9132     return Data.PerPartOutput[Def][Part];
9133 
9134   if (!hasScalarValue(Def, {Part, 0})) {
9135     Value *IRV = Def->getLiveInIRValue();
9136     Value *B = ILV->getBroadcastInstrs(IRV);
9137     set(Def, B, Part);
9138     return B;
9139   }
9140 
9141   Value *ScalarValue = get(Def, {Part, 0});
9142   // If we aren't vectorizing, we can just copy the scalar map values over
9143   // to the vector map.
9144   if (VF.isScalar()) {
9145     set(Def, ScalarValue, Part);
9146     return ScalarValue;
9147   }
9148 
9149   auto *RepR = dyn_cast<VPReplicateRecipe>(Def);
9150   bool IsUniform = RepR && RepR->isUniform();
9151 
9152   unsigned LastLane = IsUniform ? 0 : VF.getKnownMinValue() - 1;
9153   auto *LastInst = cast<Instruction>(get(Def, {Part, LastLane}));
9154 
9155   // Set the insert point after the last scalarized instruction. This
9156   // ensures the insertelement sequence will directly follow the scalar
9157   // definitions.
9158   auto OldIP = Builder.saveIP();
9159   auto NewIP = std::next(BasicBlock::iterator(LastInst));
9160   Builder.SetInsertPoint(&*NewIP);
9161 
9162   // However, if we are vectorizing, we need to construct the vector values.
9163   // If the value is known to be uniform after vectorization, we can just
9164   // broadcast the scalar value corresponding to lane zero for each unroll
9165   // iteration. Otherwise, we construct the vector values using
9166   // insertelement instructions. Since the resulting vectors are stored in
9167   // State, we will only generate the insertelements once.
9168   Value *VectorValue = nullptr;
9169   if (IsUniform) {
9170     VectorValue = ILV->getBroadcastInstrs(ScalarValue);
9171     set(Def, VectorValue, Part);
9172   } else {
9173     // Initialize packing with insertelements to start from undef.
9174     assert(!VF.isScalable() && "VF is assumed to be non scalable.");
9175     Value *Undef = PoisonValue::get(VectorType::get(LastInst->getType(), VF));
9176     set(Def, Undef, Part);
9177     for (unsigned Lane = 0; Lane < VF.getKnownMinValue(); ++Lane)
9178       ILV->packScalarIntoVectorValue(Def, {Part, Lane}, *this);
9179     VectorValue = get(Def, Part);
9180   }
9181   Builder.restoreIP(OldIP);
9182   return VectorValue;
9183 }
9184 
9185 // Process the loop in the VPlan-native vectorization path. This path builds
9186 // VPlan upfront in the vectorization pipeline, which allows to apply
9187 // VPlan-to-VPlan transformations from the very beginning without modifying the
9188 // input LLVM IR.
9189 static bool processLoopInVPlanNativePath(
9190     Loop *L, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT,
9191     LoopVectorizationLegality *LVL, TargetTransformInfo *TTI,
9192     TargetLibraryInfo *TLI, DemandedBits *DB, AssumptionCache *AC,
9193     OptimizationRemarkEmitter *ORE, BlockFrequencyInfo *BFI,
9194     ProfileSummaryInfo *PSI, LoopVectorizeHints &Hints) {
9195 
9196   if (isa<SCEVCouldNotCompute>(PSE.getBackedgeTakenCount())) {
9197     LLVM_DEBUG(dbgs() << "LV: cannot compute the outer-loop trip count\n");
9198     return false;
9199   }
9200   assert(EnableVPlanNativePath && "VPlan-native path is disabled.");
9201   Function *F = L->getHeader()->getParent();
9202   InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL->getLAI());
9203 
9204   ScalarEpilogueLowering SEL = getScalarEpilogueLowering(
9205       F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, *LVL);
9206 
9207   LoopVectorizationCostModel CM(SEL, L, PSE, LI, LVL, *TTI, TLI, DB, AC, ORE, F,
9208                                 &Hints, IAI);
9209   // Use the planner for outer loop vectorization.
9210   // TODO: CM is not used at this point inside the planner. Turn CM into an
9211   // optional argument if we don't need it in the future.
9212   LoopVectorizationPlanner LVP(L, LI, TLI, TTI, LVL, CM, IAI, PSE);
9213 
9214   // Get user vectorization factor.
9215   ElementCount UserVF = Hints.getWidth();
9216 
9217   // Plan how to best vectorize, return the best VF and its cost.
9218   const VectorizationFactor VF = LVP.planInVPlanNativePath(UserVF);
9219 
9220   // If we are stress testing VPlan builds, do not attempt to generate vector
9221   // code. Masked vector code generation support will follow soon.
9222   // Also, do not attempt to vectorize if no vector code will be produced.
9223   if (VPlanBuildStressTest || EnableVPlanPredication ||
9224       VectorizationFactor::Disabled() == VF)
9225     return false;
9226 
9227   LVP.setBestPlan(VF.Width, 1);
9228 
9229   InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, 1, LVL,
9230                          &CM, BFI, PSI);
9231   LLVM_DEBUG(dbgs() << "Vectorizing outer loop in \""
9232                     << L->getHeader()->getParent()->getName() << "\"\n");
9233   LVP.executePlan(LB, DT);
9234 
9235   // Mark the loop as already vectorized to avoid vectorizing again.
9236   Hints.setAlreadyVectorized();
9237 
9238   assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs()));
9239   return true;
9240 }
9241 
9242 // Emit a remark if there are stores to floats that required a floating point
9243 // extension. If the vectorized loop was generated with floating point there
9244 // will be a performance penalty from the conversion overhead and the change in
9245 // the vector width.
9246 static void checkMixedPrecision(Loop *L, OptimizationRemarkEmitter *ORE) {
9247   SmallVector<Instruction *, 4> Worklist;
9248   for (BasicBlock *BB : L->getBlocks()) {
9249     for (Instruction &Inst : *BB) {
9250       if (auto *S = dyn_cast<StoreInst>(&Inst)) {
9251         if (S->getValueOperand()->getType()->isFloatTy())
9252           Worklist.push_back(S);
9253       }
9254     }
9255   }
9256 
9257   // Traverse the floating point stores upwards searching, for floating point
9258   // conversions.
9259   SmallPtrSet<const Instruction *, 4> Visited;
9260   SmallPtrSet<const Instruction *, 4> EmittedRemark;
9261   while (!Worklist.empty()) {
9262     auto *I = Worklist.pop_back_val();
9263     if (!L->contains(I))
9264       continue;
9265     if (!Visited.insert(I).second)
9266       continue;
9267 
9268     // Emit a remark if the floating point store required a floating
9269     // point conversion.
9270     // TODO: More work could be done to identify the root cause such as a
9271     // constant or a function return type and point the user to it.
9272     if (isa<FPExtInst>(I) && EmittedRemark.insert(I).second)
9273       ORE->emit([&]() {
9274         return OptimizationRemarkAnalysis(LV_NAME, "VectorMixedPrecision",
9275                                           I->getDebugLoc(), L->getHeader())
9276                << "floating point conversion changes vector width. "
9277                << "Mixed floating point precision requires an up/down "
9278                << "cast that will negatively impact performance.";
9279       });
9280 
9281     for (Use &Op : I->operands())
9282       if (auto *OpI = dyn_cast<Instruction>(Op))
9283         Worklist.push_back(OpI);
9284   }
9285 }
9286 
9287 LoopVectorizePass::LoopVectorizePass(LoopVectorizeOptions Opts)
9288     : InterleaveOnlyWhenForced(Opts.InterleaveOnlyWhenForced ||
9289                                !EnableLoopInterleaving),
9290       VectorizeOnlyWhenForced(Opts.VectorizeOnlyWhenForced ||
9291                               !EnableLoopVectorization) {}
9292 
9293 bool LoopVectorizePass::processLoop(Loop *L) {
9294   assert((EnableVPlanNativePath || L->isInnermost()) &&
9295          "VPlan-native path is not enabled. Only process inner loops.");
9296 
9297 #ifndef NDEBUG
9298   const std::string DebugLocStr = getDebugLocString(L);
9299 #endif /* NDEBUG */
9300 
9301   LLVM_DEBUG(dbgs() << "\nLV: Checking a loop in \""
9302                     << L->getHeader()->getParent()->getName() << "\" from "
9303                     << DebugLocStr << "\n");
9304 
9305   LoopVectorizeHints Hints(L, InterleaveOnlyWhenForced, *ORE);
9306 
9307   LLVM_DEBUG(
9308       dbgs() << "LV: Loop hints:"
9309              << " force="
9310              << (Hints.getForce() == LoopVectorizeHints::FK_Disabled
9311                      ? "disabled"
9312                      : (Hints.getForce() == LoopVectorizeHints::FK_Enabled
9313                             ? "enabled"
9314                             : "?"))
9315              << " width=" << Hints.getWidth()
9316              << " unroll=" << Hints.getInterleave() << "\n");
9317 
9318   // Function containing loop
9319   Function *F = L->getHeader()->getParent();
9320 
9321   // Looking at the diagnostic output is the only way to determine if a loop
9322   // was vectorized (other than looking at the IR or machine code), so it
9323   // is important to generate an optimization remark for each loop. Most of
9324   // these messages are generated as OptimizationRemarkAnalysis. Remarks
9325   // generated as OptimizationRemark and OptimizationRemarkMissed are
9326   // less verbose reporting vectorized loops and unvectorized loops that may
9327   // benefit from vectorization, respectively.
9328 
9329   if (!Hints.allowVectorization(F, L, VectorizeOnlyWhenForced)) {
9330     LLVM_DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n");
9331     return false;
9332   }
9333 
9334   PredicatedScalarEvolution PSE(*SE, *L);
9335 
9336   // Check if it is legal to vectorize the loop.
9337   LoopVectorizationRequirements Requirements(*ORE);
9338   LoopVectorizationLegality LVL(L, PSE, DT, TTI, TLI, AA, F, GetLAA, LI, ORE,
9339                                 &Requirements, &Hints, DB, AC, BFI, PSI);
9340   if (!LVL.canVectorize(EnableVPlanNativePath)) {
9341     LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n");
9342     Hints.emitRemarkWithHints();
9343     return false;
9344   }
9345 
9346   // Check the function attributes and profiles to find out if this function
9347   // should be optimized for size.
9348   ScalarEpilogueLowering SEL = getScalarEpilogueLowering(
9349       F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, LVL);
9350 
9351   // Entrance to the VPlan-native vectorization path. Outer loops are processed
9352   // here. They may require CFG and instruction level transformations before
9353   // even evaluating whether vectorization is profitable. Since we cannot modify
9354   // the incoming IR, we need to build VPlan upfront in the vectorization
9355   // pipeline.
9356   if (!L->isInnermost())
9357     return processLoopInVPlanNativePath(L, PSE, LI, DT, &LVL, TTI, TLI, DB, AC,
9358                                         ORE, BFI, PSI, Hints);
9359 
9360   assert(L->isInnermost() && "Inner loop expected.");
9361 
9362   // Check the loop for a trip count threshold: vectorize loops with a tiny trip
9363   // count by optimizing for size, to minimize overheads.
9364   auto ExpectedTC = getSmallBestKnownTC(*SE, L);
9365   if (ExpectedTC && *ExpectedTC < TinyTripCountVectorThreshold) {
9366     LLVM_DEBUG(dbgs() << "LV: Found a loop with a very small trip count. "
9367                       << "This loop is worth vectorizing only if no scalar "
9368                       << "iteration overheads are incurred.");
9369     if (Hints.getForce() == LoopVectorizeHints::FK_Enabled)
9370       LLVM_DEBUG(dbgs() << " But vectorizing was explicitly forced.\n");
9371     else {
9372       LLVM_DEBUG(dbgs() << "\n");
9373       SEL = CM_ScalarEpilogueNotAllowedLowTripLoop;
9374     }
9375   }
9376 
9377   // Check the function attributes to see if implicit floats are allowed.
9378   // FIXME: This check doesn't seem possibly correct -- what if the loop is
9379   // an integer loop and the vector instructions selected are purely integer
9380   // vector instructions?
9381   if (F->hasFnAttribute(Attribute::NoImplicitFloat)) {
9382     reportVectorizationFailure(
9383         "Can't vectorize when the NoImplicitFloat attribute is used",
9384         "loop not vectorized due to NoImplicitFloat attribute",
9385         "NoImplicitFloat", ORE, L);
9386     Hints.emitRemarkWithHints();
9387     return false;
9388   }
9389 
9390   // Check if the target supports potentially unsafe FP vectorization.
9391   // FIXME: Add a check for the type of safety issue (denormal, signaling)
9392   // for the target we're vectorizing for, to make sure none of the
9393   // additional fp-math flags can help.
9394   if (Hints.isPotentiallyUnsafe() &&
9395       TTI->isFPVectorizationPotentiallyUnsafe()) {
9396     reportVectorizationFailure(
9397         "Potentially unsafe FP op prevents vectorization",
9398         "loop not vectorized due to unsafe FP support.",
9399         "UnsafeFP", ORE, L);
9400     Hints.emitRemarkWithHints();
9401     return false;
9402   }
9403 
9404   bool UseInterleaved = TTI->enableInterleavedAccessVectorization();
9405   InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL.getLAI());
9406 
9407   // If an override option has been passed in for interleaved accesses, use it.
9408   if (EnableInterleavedMemAccesses.getNumOccurrences() > 0)
9409     UseInterleaved = EnableInterleavedMemAccesses;
9410 
9411   // Analyze interleaved memory accesses.
9412   if (UseInterleaved) {
9413     IAI.analyzeInterleaving(useMaskedInterleavedAccesses(*TTI));
9414   }
9415 
9416   // Use the cost model.
9417   LoopVectorizationCostModel CM(SEL, L, PSE, LI, &LVL, *TTI, TLI, DB, AC, ORE,
9418                                 F, &Hints, IAI);
9419   CM.collectValuesToIgnore();
9420 
9421   // Use the planner for vectorization.
9422   LoopVectorizationPlanner LVP(L, LI, TLI, TTI, &LVL, CM, IAI, PSE);
9423 
9424   // Get user vectorization factor and interleave count.
9425   ElementCount UserVF = Hints.getWidth();
9426   unsigned UserIC = Hints.getInterleave();
9427 
9428   // Plan how to best vectorize, return the best VF and its cost.
9429   Optional<VectorizationFactor> MaybeVF = LVP.plan(UserVF, UserIC);
9430 
9431   VectorizationFactor VF = VectorizationFactor::Disabled();
9432   unsigned IC = 1;
9433 
9434   if (MaybeVF) {
9435     VF = *MaybeVF;
9436     // Select the interleave count.
9437     IC = CM.selectInterleaveCount(VF.Width, VF.Cost);
9438   }
9439 
9440   // Identify the diagnostic messages that should be produced.
9441   std::pair<StringRef, std::string> VecDiagMsg, IntDiagMsg;
9442   bool VectorizeLoop = true, InterleaveLoop = true;
9443   if (Requirements.doesNotMeet(F, L, Hints)) {
9444     LLVM_DEBUG(dbgs() << "LV: Not vectorizing: loop did not meet vectorization "
9445                          "requirements.\n");
9446     Hints.emitRemarkWithHints();
9447     return false;
9448   }
9449 
9450   if (VF.Width.isScalar()) {
9451     LLVM_DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n");
9452     VecDiagMsg = std::make_pair(
9453         "VectorizationNotBeneficial",
9454         "the cost-model indicates that vectorization is not beneficial");
9455     VectorizeLoop = false;
9456   }
9457 
9458   if (!MaybeVF && UserIC > 1) {
9459     // Tell the user interleaving was avoided up-front, despite being explicitly
9460     // requested.
9461     LLVM_DEBUG(dbgs() << "LV: Ignoring UserIC, because vectorization and "
9462                          "interleaving should be avoided up front\n");
9463     IntDiagMsg = std::make_pair(
9464         "InterleavingAvoided",
9465         "Ignoring UserIC, because interleaving was avoided up front");
9466     InterleaveLoop = false;
9467   } else if (IC == 1 && UserIC <= 1) {
9468     // Tell the user interleaving is not beneficial.
9469     LLVM_DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n");
9470     IntDiagMsg = std::make_pair(
9471         "InterleavingNotBeneficial",
9472         "the cost-model indicates that interleaving is not beneficial");
9473     InterleaveLoop = false;
9474     if (UserIC == 1) {
9475       IntDiagMsg.first = "InterleavingNotBeneficialAndDisabled";
9476       IntDiagMsg.second +=
9477           " and is explicitly disabled or interleave count is set to 1";
9478     }
9479   } else if (IC > 1 && UserIC == 1) {
9480     // Tell the user interleaving is beneficial, but it explicitly disabled.
9481     LLVM_DEBUG(
9482         dbgs() << "LV: Interleaving is beneficial but is explicitly disabled.");
9483     IntDiagMsg = std::make_pair(
9484         "InterleavingBeneficialButDisabled",
9485         "the cost-model indicates that interleaving is beneficial "
9486         "but is explicitly disabled or interleave count is set to 1");
9487     InterleaveLoop = false;
9488   }
9489 
9490   // Override IC if user provided an interleave count.
9491   IC = UserIC > 0 ? UserIC : IC;
9492 
9493   // Emit diagnostic messages, if any.
9494   const char *VAPassName = Hints.vectorizeAnalysisPassName();
9495   if (!VectorizeLoop && !InterleaveLoop) {
9496     // Do not vectorize or interleaving the loop.
9497     ORE->emit([&]() {
9498       return OptimizationRemarkMissed(VAPassName, VecDiagMsg.first,
9499                                       L->getStartLoc(), L->getHeader())
9500              << VecDiagMsg.second;
9501     });
9502     ORE->emit([&]() {
9503       return OptimizationRemarkMissed(LV_NAME, IntDiagMsg.first,
9504                                       L->getStartLoc(), L->getHeader())
9505              << IntDiagMsg.second;
9506     });
9507     return false;
9508   } else if (!VectorizeLoop && InterleaveLoop) {
9509     LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n');
9510     ORE->emit([&]() {
9511       return OptimizationRemarkAnalysis(VAPassName, VecDiagMsg.first,
9512                                         L->getStartLoc(), L->getHeader())
9513              << VecDiagMsg.second;
9514     });
9515   } else if (VectorizeLoop && !InterleaveLoop) {
9516     LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width
9517                       << ") in " << DebugLocStr << '\n');
9518     ORE->emit([&]() {
9519       return OptimizationRemarkAnalysis(LV_NAME, IntDiagMsg.first,
9520                                         L->getStartLoc(), L->getHeader())
9521              << IntDiagMsg.second;
9522     });
9523   } else if (VectorizeLoop && InterleaveLoop) {
9524     LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width
9525                       << ") in " << DebugLocStr << '\n');
9526     LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n');
9527   }
9528 
9529   LVP.setBestPlan(VF.Width, IC);
9530 
9531   using namespace ore;
9532   bool DisableRuntimeUnroll = false;
9533   MDNode *OrigLoopID = L->getLoopID();
9534 
9535   if (!VectorizeLoop) {
9536     assert(IC > 1 && "interleave count should not be 1 or 0");
9537     // If we decided that it is not legal to vectorize the loop, then
9538     // interleave it.
9539     InnerLoopUnroller Unroller(L, PSE, LI, DT, TLI, TTI, AC, ORE, IC, &LVL, &CM,
9540                                BFI, PSI);
9541     LVP.executePlan(Unroller, DT);
9542 
9543     ORE->emit([&]() {
9544       return OptimizationRemark(LV_NAME, "Interleaved", L->getStartLoc(),
9545                                 L->getHeader())
9546              << "interleaved loop (interleaved count: "
9547              << NV("InterleaveCount", IC) << ")";
9548     });
9549   } else {
9550     // If we decided that it is *legal* to vectorize the loop, then do it.
9551 
9552     // Consider vectorizing the epilogue too if it's profitable.
9553     VectorizationFactor EpilogueVF =
9554       CM.selectEpilogueVectorizationFactor(VF.Width, LVP);
9555     if (EpilogueVF.Width.isVector()) {
9556 
9557       // The first pass vectorizes the main loop and creates a scalar epilogue
9558       // to be vectorized by executing the plan (potentially with a different
9559       // factor) again shortly afterwards.
9560       EpilogueLoopVectorizationInfo EPI(VF.Width.getKnownMinValue(), IC,
9561                                         EpilogueVF.Width.getKnownMinValue(), 1);
9562       EpilogueVectorizerMainLoop MainILV(L, PSE, LI, DT, TLI, TTI, AC, ORE, EPI,
9563                                          &LVL, &CM, BFI, PSI);
9564 
9565       LVP.setBestPlan(EPI.MainLoopVF, EPI.MainLoopUF);
9566       LVP.executePlan(MainILV, DT);
9567       ++LoopsVectorized;
9568 
9569       simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */);
9570       formLCSSARecursively(*L, *DT, LI, SE);
9571 
9572       // Second pass vectorizes the epilogue and adjusts the control flow
9573       // edges from the first pass.
9574       LVP.setBestPlan(EPI.EpilogueVF, EPI.EpilogueUF);
9575       EPI.MainLoopVF = EPI.EpilogueVF;
9576       EPI.MainLoopUF = EPI.EpilogueUF;
9577       EpilogueVectorizerEpilogueLoop EpilogILV(L, PSE, LI, DT, TLI, TTI, AC,
9578                                                ORE, EPI, &LVL, &CM, BFI, PSI);
9579       LVP.executePlan(EpilogILV, DT);
9580       ++LoopsEpilogueVectorized;
9581 
9582       if (!MainILV.areSafetyChecksAdded())
9583         DisableRuntimeUnroll = true;
9584     } else {
9585       InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, IC,
9586                              &LVL, &CM, BFI, PSI);
9587       LVP.executePlan(LB, DT);
9588       ++LoopsVectorized;
9589 
9590       // Add metadata to disable runtime unrolling a scalar loop when there are
9591       // no runtime checks about strides and memory. A scalar loop that is
9592       // rarely used is not worth unrolling.
9593       if (!LB.areSafetyChecksAdded())
9594         DisableRuntimeUnroll = true;
9595     }
9596 
9597     // Report the vectorization decision.
9598     ORE->emit([&]() {
9599       return OptimizationRemark(LV_NAME, "Vectorized", L->getStartLoc(),
9600                                 L->getHeader())
9601              << "vectorized loop (vectorization width: "
9602              << NV("VectorizationFactor", VF.Width)
9603              << ", interleaved count: " << NV("InterleaveCount", IC) << ")";
9604     });
9605 
9606     if (ORE->allowExtraAnalysis(LV_NAME))
9607       checkMixedPrecision(L, ORE);
9608   }
9609 
9610   Optional<MDNode *> RemainderLoopID =
9611       makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll,
9612                                       LLVMLoopVectorizeFollowupEpilogue});
9613   if (RemainderLoopID.hasValue()) {
9614     L->setLoopID(RemainderLoopID.getValue());
9615   } else {
9616     if (DisableRuntimeUnroll)
9617       AddRuntimeUnrollDisableMetaData(L);
9618 
9619     // Mark the loop as already vectorized to avoid vectorizing again.
9620     Hints.setAlreadyVectorized();
9621   }
9622 
9623   assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs()));
9624   return true;
9625 }
9626 
9627 LoopVectorizeResult LoopVectorizePass::runImpl(
9628     Function &F, ScalarEvolution &SE_, LoopInfo &LI_, TargetTransformInfo &TTI_,
9629     DominatorTree &DT_, BlockFrequencyInfo &BFI_, TargetLibraryInfo *TLI_,
9630     DemandedBits &DB_, AAResults &AA_, AssumptionCache &AC_,
9631     std::function<const LoopAccessInfo &(Loop &)> &GetLAA_,
9632     OptimizationRemarkEmitter &ORE_, ProfileSummaryInfo *PSI_) {
9633   SE = &SE_;
9634   LI = &LI_;
9635   TTI = &TTI_;
9636   DT = &DT_;
9637   BFI = &BFI_;
9638   TLI = TLI_;
9639   AA = &AA_;
9640   AC = &AC_;
9641   GetLAA = &GetLAA_;
9642   DB = &DB_;
9643   ORE = &ORE_;
9644   PSI = PSI_;
9645 
9646   // Don't attempt if
9647   // 1. the target claims to have no vector registers, and
9648   // 2. interleaving won't help ILP.
9649   //
9650   // The second condition is necessary because, even if the target has no
9651   // vector registers, loop vectorization may still enable scalar
9652   // interleaving.
9653   if (!TTI->getNumberOfRegisters(TTI->getRegisterClassForType(true)) &&
9654       TTI->getMaxInterleaveFactor(1) < 2)
9655     return LoopVectorizeResult(false, false);
9656 
9657   bool Changed = false, CFGChanged = false;
9658 
9659   // The vectorizer requires loops to be in simplified form.
9660   // Since simplification may add new inner loops, it has to run before the
9661   // legality and profitability checks. This means running the loop vectorizer
9662   // will simplify all loops, regardless of whether anything end up being
9663   // vectorized.
9664   for (auto &L : *LI)
9665     Changed |= CFGChanged |=
9666         simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */);
9667 
9668   // Build up a worklist of inner-loops to vectorize. This is necessary as
9669   // the act of vectorizing or partially unrolling a loop creates new loops
9670   // and can invalidate iterators across the loops.
9671   SmallVector<Loop *, 8> Worklist;
9672 
9673   for (Loop *L : *LI)
9674     collectSupportedLoops(*L, LI, ORE, Worklist);
9675 
9676   LoopsAnalyzed += Worklist.size();
9677 
9678   // Now walk the identified inner loops.
9679   while (!Worklist.empty()) {
9680     Loop *L = Worklist.pop_back_val();
9681 
9682     // For the inner loops we actually process, form LCSSA to simplify the
9683     // transform.
9684     Changed |= formLCSSARecursively(*L, *DT, LI, SE);
9685 
9686     Changed |= CFGChanged |= processLoop(L);
9687   }
9688 
9689   // Process each loop nest in the function.
9690   return LoopVectorizeResult(Changed, CFGChanged);
9691 }
9692 
9693 PreservedAnalyses LoopVectorizePass::run(Function &F,
9694                                          FunctionAnalysisManager &AM) {
9695     auto &SE = AM.getResult<ScalarEvolutionAnalysis>(F);
9696     auto &LI = AM.getResult<LoopAnalysis>(F);
9697     auto &TTI = AM.getResult<TargetIRAnalysis>(F);
9698     auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
9699     auto &BFI = AM.getResult<BlockFrequencyAnalysis>(F);
9700     auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
9701     auto &AA = AM.getResult<AAManager>(F);
9702     auto &AC = AM.getResult<AssumptionAnalysis>(F);
9703     auto &DB = AM.getResult<DemandedBitsAnalysis>(F);
9704     auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F);
9705     MemorySSA *MSSA = EnableMSSALoopDependency
9706                           ? &AM.getResult<MemorySSAAnalysis>(F).getMSSA()
9707                           : nullptr;
9708 
9709     auto &LAM = AM.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager();
9710     std::function<const LoopAccessInfo &(Loop &)> GetLAA =
9711         [&](Loop &L) -> const LoopAccessInfo & {
9712       LoopStandardAnalysisResults AR = {AA,  AC,  DT,      LI,  SE,
9713                                         TLI, TTI, nullptr, MSSA};
9714       return LAM.getResult<LoopAccessAnalysis>(L, AR);
9715     };
9716     auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F);
9717     ProfileSummaryInfo *PSI =
9718         MAMProxy.getCachedResult<ProfileSummaryAnalysis>(*F.getParent());
9719     LoopVectorizeResult Result =
9720         runImpl(F, SE, LI, TTI, DT, BFI, &TLI, DB, AA, AC, GetLAA, ORE, PSI);
9721     if (!Result.MadeAnyChange)
9722       return PreservedAnalyses::all();
9723     PreservedAnalyses PA;
9724 
9725     // We currently do not preserve loopinfo/dominator analyses with outer loop
9726     // vectorization. Until this is addressed, mark these analyses as preserved
9727     // only for non-VPlan-native path.
9728     // TODO: Preserve Loop and Dominator analyses for VPlan-native path.
9729     if (!EnableVPlanNativePath) {
9730       PA.preserve<LoopAnalysis>();
9731       PA.preserve<DominatorTreeAnalysis>();
9732     }
9733     PA.preserve<BasicAA>();
9734     PA.preserve<GlobalsAA>();
9735     if (!Result.MadeCFGChange)
9736       PA.preserveSet<CFGAnalyses>();
9737     return PA;
9738 }
9739