1 //===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops
10 // and generates target-independent LLVM-IR.
11 // The vectorizer uses the TargetTransformInfo analysis to estimate the costs
12 // of instructions in order to estimate the profitability of vectorization.
13 //
14 // The loop vectorizer combines consecutive loop iterations into a single
15 // 'wide' iteration. After this transformation the index is incremented
16 // by the SIMD vector width, and not by one.
17 //
18 // This pass has three parts:
19 // 1. The main loop pass that drives the different parts.
20 // 2. LoopVectorizationLegality - A unit that checks for the legality
21 //    of the vectorization.
22 // 3. InnerLoopVectorizer - A unit that performs the actual
23 //    widening of instructions.
24 // 4. LoopVectorizationCostModel - A unit that checks for the profitability
25 //    of vectorization. It decides on the optimal vector width, which
26 //    can be one, if vectorization is not profitable.
27 //
28 // There is a development effort going on to migrate loop vectorizer to the
29 // VPlan infrastructure and to introduce outer loop vectorization support (see
30 // docs/Proposal/VectorizationPlan.rst and
31 // http://lists.llvm.org/pipermail/llvm-dev/2017-December/119523.html). For this
32 // purpose, we temporarily introduced the VPlan-native vectorization path: an
33 // alternative vectorization path that is natively implemented on top of the
34 // VPlan infrastructure. See EnableVPlanNativePath for enabling.
35 //
36 //===----------------------------------------------------------------------===//
37 //
38 // The reduction-variable vectorization is based on the paper:
39 //  D. Nuzman and R. Henderson. Multi-platform Auto-vectorization.
40 //
41 // Variable uniformity checks are inspired by:
42 //  Karrenberg, R. and Hack, S. Whole Function Vectorization.
43 //
44 // The interleaved access vectorization is based on the paper:
45 //  Dorit Nuzman, Ira Rosen and Ayal Zaks.  Auto-Vectorization of Interleaved
46 //  Data for SIMD
47 //
48 // Other ideas/concepts are from:
49 //  A. Zaks and D. Nuzman. Autovectorization in GCC-two years later.
50 //
51 //  S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua.  An Evaluation of
52 //  Vectorizing Compilers.
53 //
54 //===----------------------------------------------------------------------===//
55 
56 #include "llvm/Transforms/Vectorize/LoopVectorize.h"
57 #include "LoopVectorizationPlanner.h"
58 #include "VPRecipeBuilder.h"
59 #include "VPlan.h"
60 #include "VPlanHCFGBuilder.h"
61 #include "VPlanPredicator.h"
62 #include "VPlanTransforms.h"
63 #include "llvm/ADT/APInt.h"
64 #include "llvm/ADT/ArrayRef.h"
65 #include "llvm/ADT/DenseMap.h"
66 #include "llvm/ADT/DenseMapInfo.h"
67 #include "llvm/ADT/Hashing.h"
68 #include "llvm/ADT/MapVector.h"
69 #include "llvm/ADT/None.h"
70 #include "llvm/ADT/Optional.h"
71 #include "llvm/ADT/STLExtras.h"
72 #include "llvm/ADT/SetVector.h"
73 #include "llvm/ADT/SmallPtrSet.h"
74 #include "llvm/ADT/SmallVector.h"
75 #include "llvm/ADT/Statistic.h"
76 #include "llvm/ADT/StringRef.h"
77 #include "llvm/ADT/Twine.h"
78 #include "llvm/ADT/iterator_range.h"
79 #include "llvm/Analysis/AssumptionCache.h"
80 #include "llvm/Analysis/BasicAliasAnalysis.h"
81 #include "llvm/Analysis/BlockFrequencyInfo.h"
82 #include "llvm/Analysis/CFG.h"
83 #include "llvm/Analysis/CodeMetrics.h"
84 #include "llvm/Analysis/DemandedBits.h"
85 #include "llvm/Analysis/GlobalsModRef.h"
86 #include "llvm/Analysis/LoopAccessAnalysis.h"
87 #include "llvm/Analysis/LoopAnalysisManager.h"
88 #include "llvm/Analysis/LoopInfo.h"
89 #include "llvm/Analysis/LoopIterator.h"
90 #include "llvm/Analysis/MemorySSA.h"
91 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
92 #include "llvm/Analysis/ProfileSummaryInfo.h"
93 #include "llvm/Analysis/ScalarEvolution.h"
94 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
95 #include "llvm/Analysis/TargetLibraryInfo.h"
96 #include "llvm/Analysis/TargetTransformInfo.h"
97 #include "llvm/Analysis/VectorUtils.h"
98 #include "llvm/IR/Attributes.h"
99 #include "llvm/IR/BasicBlock.h"
100 #include "llvm/IR/CFG.h"
101 #include "llvm/IR/Constant.h"
102 #include "llvm/IR/Constants.h"
103 #include "llvm/IR/DataLayout.h"
104 #include "llvm/IR/DebugInfoMetadata.h"
105 #include "llvm/IR/DebugLoc.h"
106 #include "llvm/IR/DerivedTypes.h"
107 #include "llvm/IR/DiagnosticInfo.h"
108 #include "llvm/IR/Dominators.h"
109 #include "llvm/IR/Function.h"
110 #include "llvm/IR/IRBuilder.h"
111 #include "llvm/IR/InstrTypes.h"
112 #include "llvm/IR/Instruction.h"
113 #include "llvm/IR/Instructions.h"
114 #include "llvm/IR/IntrinsicInst.h"
115 #include "llvm/IR/Intrinsics.h"
116 #include "llvm/IR/LLVMContext.h"
117 #include "llvm/IR/Metadata.h"
118 #include "llvm/IR/Module.h"
119 #include "llvm/IR/Operator.h"
120 #include "llvm/IR/Type.h"
121 #include "llvm/IR/Use.h"
122 #include "llvm/IR/User.h"
123 #include "llvm/IR/Value.h"
124 #include "llvm/IR/ValueHandle.h"
125 #include "llvm/IR/Verifier.h"
126 #include "llvm/InitializePasses.h"
127 #include "llvm/Pass.h"
128 #include "llvm/Support/Casting.h"
129 #include "llvm/Support/CommandLine.h"
130 #include "llvm/Support/Compiler.h"
131 #include "llvm/Support/Debug.h"
132 #include "llvm/Support/ErrorHandling.h"
133 #include "llvm/Support/InstructionCost.h"
134 #include "llvm/Support/MathExtras.h"
135 #include "llvm/Support/raw_ostream.h"
136 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
137 #include "llvm/Transforms/Utils/InjectTLIMappings.h"
138 #include "llvm/Transforms/Utils/LoopSimplify.h"
139 #include "llvm/Transforms/Utils/LoopUtils.h"
140 #include "llvm/Transforms/Utils/LoopVersioning.h"
141 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
142 #include "llvm/Transforms/Utils/SizeOpts.h"
143 #include "llvm/Transforms/Vectorize/LoopVectorizationLegality.h"
144 #include <algorithm>
145 #include <cassert>
146 #include <cstdint>
147 #include <cstdlib>
148 #include <functional>
149 #include <iterator>
150 #include <limits>
151 #include <memory>
152 #include <string>
153 #include <tuple>
154 #include <utility>
155 
156 using namespace llvm;
157 
158 #define LV_NAME "loop-vectorize"
159 #define DEBUG_TYPE LV_NAME
160 
161 #ifndef NDEBUG
162 const char VerboseDebug[] = DEBUG_TYPE "-verbose";
163 #endif
164 
165 /// @{
166 /// Metadata attribute names
167 const char LLVMLoopVectorizeFollowupAll[] = "llvm.loop.vectorize.followup_all";
168 const char LLVMLoopVectorizeFollowupVectorized[] =
169     "llvm.loop.vectorize.followup_vectorized";
170 const char LLVMLoopVectorizeFollowupEpilogue[] =
171     "llvm.loop.vectorize.followup_epilogue";
172 /// @}
173 
174 STATISTIC(LoopsVectorized, "Number of loops vectorized");
175 STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization");
176 STATISTIC(LoopsEpilogueVectorized, "Number of epilogues vectorized");
177 
178 static cl::opt<bool> EnableEpilogueVectorization(
179     "enable-epilogue-vectorization", cl::init(true), cl::Hidden,
180     cl::desc("Enable vectorization of epilogue loops."));
181 
182 static cl::opt<unsigned> EpilogueVectorizationForceVF(
183     "epilogue-vectorization-force-VF", cl::init(1), cl::Hidden,
184     cl::desc("When epilogue vectorization is enabled, and a value greater than "
185              "1 is specified, forces the given VF for all applicable epilogue "
186              "loops."));
187 
188 static cl::opt<unsigned> EpilogueVectorizationMinVF(
189     "epilogue-vectorization-minimum-VF", cl::init(16), cl::Hidden,
190     cl::desc("Only loops with vectorization factor equal to or larger than "
191              "the specified value are considered for epilogue vectorization."));
192 
193 /// Loops with a known constant trip count below this number are vectorized only
194 /// if no scalar iteration overheads are incurred.
195 static cl::opt<unsigned> TinyTripCountVectorThreshold(
196     "vectorizer-min-trip-count", cl::init(16), cl::Hidden,
197     cl::desc("Loops with a constant trip count that is smaller than this "
198              "value are vectorized only if no scalar iteration overheads "
199              "are incurred."));
200 
201 // Option prefer-predicate-over-epilogue indicates that an epilogue is undesired,
202 // that predication is preferred, and this lists all options. I.e., the
203 // vectorizer will try to fold the tail-loop (epilogue) into the vector body
204 // and predicate the instructions accordingly. If tail-folding fails, there are
205 // different fallback strategies depending on these values:
206 namespace PreferPredicateTy {
207   enum Option {
208     ScalarEpilogue = 0,
209     PredicateElseScalarEpilogue,
210     PredicateOrDontVectorize
211   };
212 } // namespace PreferPredicateTy
213 
214 static cl::opt<PreferPredicateTy::Option> PreferPredicateOverEpilogue(
215     "prefer-predicate-over-epilogue",
216     cl::init(PreferPredicateTy::ScalarEpilogue),
217     cl::Hidden,
218     cl::desc("Tail-folding and predication preferences over creating a scalar "
219              "epilogue loop."),
220     cl::values(clEnumValN(PreferPredicateTy::ScalarEpilogue,
221                          "scalar-epilogue",
222                          "Don't tail-predicate loops, create scalar epilogue"),
223               clEnumValN(PreferPredicateTy::PredicateElseScalarEpilogue,
224                          "predicate-else-scalar-epilogue",
225                          "prefer tail-folding, create scalar epilogue if tail "
226                          "folding fails."),
227               clEnumValN(PreferPredicateTy::PredicateOrDontVectorize,
228                          "predicate-dont-vectorize",
229                          "prefers tail-folding, don't attempt vectorization if "
230                          "tail-folding fails.")));
231 
232 static cl::opt<bool> MaximizeBandwidth(
233     "vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden,
234     cl::desc("Maximize bandwidth when selecting vectorization factor which "
235              "will be determined by the smallest type in loop."));
236 
237 static cl::opt<bool> EnableInterleavedMemAccesses(
238     "enable-interleaved-mem-accesses", cl::init(false), cl::Hidden,
239     cl::desc("Enable vectorization on interleaved memory accesses in a loop"));
240 
241 /// An interleave-group may need masking if it resides in a block that needs
242 /// predication, or in order to mask away gaps.
243 static cl::opt<bool> EnableMaskedInterleavedMemAccesses(
244     "enable-masked-interleaved-mem-accesses", cl::init(false), cl::Hidden,
245     cl::desc("Enable vectorization on masked interleaved memory accesses in a loop"));
246 
247 static cl::opt<unsigned> TinyTripCountInterleaveThreshold(
248     "tiny-trip-count-interleave-threshold", cl::init(128), cl::Hidden,
249     cl::desc("We don't interleave loops with a estimated constant trip count "
250              "below this number"));
251 
252 static cl::opt<unsigned> ForceTargetNumScalarRegs(
253     "force-target-num-scalar-regs", cl::init(0), cl::Hidden,
254     cl::desc("A flag that overrides the target's number of scalar registers."));
255 
256 static cl::opt<unsigned> ForceTargetNumVectorRegs(
257     "force-target-num-vector-regs", cl::init(0), cl::Hidden,
258     cl::desc("A flag that overrides the target's number of vector registers."));
259 
260 static cl::opt<unsigned> ForceTargetMaxScalarInterleaveFactor(
261     "force-target-max-scalar-interleave", cl::init(0), cl::Hidden,
262     cl::desc("A flag that overrides the target's max interleave factor for "
263              "scalar loops."));
264 
265 static cl::opt<unsigned> ForceTargetMaxVectorInterleaveFactor(
266     "force-target-max-vector-interleave", cl::init(0), cl::Hidden,
267     cl::desc("A flag that overrides the target's max interleave factor for "
268              "vectorized loops."));
269 
270 static cl::opt<unsigned> ForceTargetInstructionCost(
271     "force-target-instruction-cost", cl::init(0), cl::Hidden,
272     cl::desc("A flag that overrides the target's expected cost for "
273              "an instruction to a single constant value. Mostly "
274              "useful for getting consistent testing."));
275 
276 static cl::opt<bool> ForceTargetSupportsScalableVectors(
277     "force-target-supports-scalable-vectors", cl::init(false), cl::Hidden,
278     cl::desc(
279         "Pretend that scalable vectors are supported, even if the target does "
280         "not support them. This flag should only be used for testing."));
281 
282 static cl::opt<unsigned> SmallLoopCost(
283     "small-loop-cost", cl::init(20), cl::Hidden,
284     cl::desc(
285         "The cost of a loop that is considered 'small' by the interleaver."));
286 
287 static cl::opt<bool> LoopVectorizeWithBlockFrequency(
288     "loop-vectorize-with-block-frequency", cl::init(true), cl::Hidden,
289     cl::desc("Enable the use of the block frequency analysis to access PGO "
290              "heuristics minimizing code growth in cold regions and being more "
291              "aggressive in hot regions."));
292 
293 // Runtime interleave loops for load/store throughput.
294 static cl::opt<bool> EnableLoadStoreRuntimeInterleave(
295     "enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden,
296     cl::desc(
297         "Enable runtime interleaving until load/store ports are saturated"));
298 
299 /// Interleave small loops with scalar reductions.
300 static cl::opt<bool> InterleaveSmallLoopScalarReduction(
301     "interleave-small-loop-scalar-reduction", cl::init(false), cl::Hidden,
302     cl::desc("Enable interleaving for loops with small iteration counts that "
303              "contain scalar reductions to expose ILP."));
304 
305 /// The number of stores in a loop that are allowed to need predication.
306 static cl::opt<unsigned> NumberOfStoresToPredicate(
307     "vectorize-num-stores-pred", cl::init(1), cl::Hidden,
308     cl::desc("Max number of stores to be predicated behind an if."));
309 
310 static cl::opt<bool> EnableIndVarRegisterHeur(
311     "enable-ind-var-reg-heur", cl::init(true), cl::Hidden,
312     cl::desc("Count the induction variable only once when interleaving"));
313 
314 static cl::opt<bool> EnableCondStoresVectorization(
315     "enable-cond-stores-vec", cl::init(true), cl::Hidden,
316     cl::desc("Enable if predication of stores during vectorization."));
317 
318 static cl::opt<unsigned> MaxNestedScalarReductionIC(
319     "max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden,
320     cl::desc("The maximum interleave count to use when interleaving a scalar "
321              "reduction in a nested loop."));
322 
323 static cl::opt<bool>
324     PreferInLoopReductions("prefer-inloop-reductions", cl::init(false),
325                            cl::Hidden,
326                            cl::desc("Prefer in-loop vector reductions, "
327                                     "overriding the targets preference."));
328 
329 static cl::opt<bool> PreferPredicatedReductionSelect(
330     "prefer-predicated-reduction-select", cl::init(false), cl::Hidden,
331     cl::desc(
332         "Prefer predicating a reduction operation over an after loop select."));
333 
334 cl::opt<bool> EnableVPlanNativePath(
335     "enable-vplan-native-path", cl::init(false), cl::Hidden,
336     cl::desc("Enable VPlan-native vectorization path with "
337              "support for outer loop vectorization."));
338 
339 // FIXME: Remove this switch once we have divergence analysis. Currently we
340 // assume divergent non-backedge branches when this switch is true.
341 cl::opt<bool> EnableVPlanPredication(
342     "enable-vplan-predication", cl::init(false), cl::Hidden,
343     cl::desc("Enable VPlan-native vectorization path predicator with "
344              "support for outer loop vectorization."));
345 
346 // This flag enables the stress testing of the VPlan H-CFG construction in the
347 // VPlan-native vectorization path. It must be used in conjuction with
348 // -enable-vplan-native-path. -vplan-verify-hcfg can also be used to enable the
349 // verification of the H-CFGs built.
350 static cl::opt<bool> VPlanBuildStressTest(
351     "vplan-build-stress-test", cl::init(false), cl::Hidden,
352     cl::desc(
353         "Build VPlan for every supported loop nest in the function and bail "
354         "out right after the build (stress test the VPlan H-CFG construction "
355         "in the VPlan-native vectorization path)."));
356 
357 cl::opt<bool> llvm::EnableLoopInterleaving(
358     "interleave-loops", cl::init(true), cl::Hidden,
359     cl::desc("Enable loop interleaving in Loop vectorization passes"));
360 cl::opt<bool> llvm::EnableLoopVectorization(
361     "vectorize-loops", cl::init(true), cl::Hidden,
362     cl::desc("Run the Loop vectorization passes"));
363 
364 /// A helper function that returns the type of loaded or stored value.
365 static Type *getMemInstValueType(Value *I) {
366   assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
367          "Expected Load or Store instruction");
368   if (auto *LI = dyn_cast<LoadInst>(I))
369     return LI->getType();
370   return cast<StoreInst>(I)->getValueOperand()->getType();
371 }
372 
373 /// A helper function that returns true if the given type is irregular. The
374 /// type is irregular if its allocated size doesn't equal the store size of an
375 /// element of the corresponding vector type at the given vectorization factor.
376 static bool hasIrregularType(Type *Ty, const DataLayout &DL, ElementCount VF) {
377   // Determine if an array of VF elements of type Ty is "bitcast compatible"
378   // with a <VF x Ty> vector.
379   if (VF.isVector()) {
380     auto *VectorTy = VectorType::get(Ty, VF);
381     return TypeSize::get(VF.getKnownMinValue() *
382                              DL.getTypeAllocSize(Ty).getFixedValue(),
383                          VF.isScalable()) != DL.getTypeStoreSize(VectorTy);
384   }
385 
386   // If the vectorization factor is one, we just check if an array of type Ty
387   // requires padding between elements.
388   return DL.getTypeAllocSizeInBits(Ty) != DL.getTypeSizeInBits(Ty);
389 }
390 
391 /// A helper function that returns the reciprocal of the block probability of
392 /// predicated blocks. If we return X, we are assuming the predicated block
393 /// will execute once for every X iterations of the loop header.
394 ///
395 /// TODO: We should use actual block probability here, if available. Currently,
396 ///       we always assume predicated blocks have a 50% chance of executing.
397 static unsigned getReciprocalPredBlockProb() { return 2; }
398 
399 /// A helper function that adds a 'fast' flag to floating-point operations.
400 static Value *addFastMathFlag(Value *V) {
401   if (isa<FPMathOperator>(V))
402     cast<Instruction>(V)->setFastMathFlags(FastMathFlags::getFast());
403   return V;
404 }
405 
406 /// A helper function that returns an integer or floating-point constant with
407 /// value C.
408 static Constant *getSignedIntOrFpConstant(Type *Ty, int64_t C) {
409   return Ty->isIntegerTy() ? ConstantInt::getSigned(Ty, C)
410                            : ConstantFP::get(Ty, C);
411 }
412 
413 /// Returns "best known" trip count for the specified loop \p L as defined by
414 /// the following procedure:
415 ///   1) Returns exact trip count if it is known.
416 ///   2) Returns expected trip count according to profile data if any.
417 ///   3) Returns upper bound estimate if it is known.
418 ///   4) Returns None if all of the above failed.
419 static Optional<unsigned> getSmallBestKnownTC(ScalarEvolution &SE, Loop *L) {
420   // Check if exact trip count is known.
421   if (unsigned ExpectedTC = SE.getSmallConstantTripCount(L))
422     return ExpectedTC;
423 
424   // Check if there is an expected trip count available from profile data.
425   if (LoopVectorizeWithBlockFrequency)
426     if (auto EstimatedTC = getLoopEstimatedTripCount(L))
427       return EstimatedTC;
428 
429   // Check if upper bound estimate is known.
430   if (unsigned ExpectedTC = SE.getSmallConstantMaxTripCount(L))
431     return ExpectedTC;
432 
433   return None;
434 }
435 
436 namespace llvm {
437 
438 /// InnerLoopVectorizer vectorizes loops which contain only one basic
439 /// block to a specified vectorization factor (VF).
440 /// This class performs the widening of scalars into vectors, or multiple
441 /// scalars. This class also implements the following features:
442 /// * It inserts an epilogue loop for handling loops that don't have iteration
443 ///   counts that are known to be a multiple of the vectorization factor.
444 /// * It handles the code generation for reduction variables.
445 /// * Scalarization (implementation using scalars) of un-vectorizable
446 ///   instructions.
447 /// InnerLoopVectorizer does not perform any vectorization-legality
448 /// checks, and relies on the caller to check for the different legality
449 /// aspects. The InnerLoopVectorizer relies on the
450 /// LoopVectorizationLegality class to provide information about the induction
451 /// and reduction variables that were found to a given vectorization factor.
452 class InnerLoopVectorizer {
453 public:
454   InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE,
455                       LoopInfo *LI, DominatorTree *DT,
456                       const TargetLibraryInfo *TLI,
457                       const TargetTransformInfo *TTI, AssumptionCache *AC,
458                       OptimizationRemarkEmitter *ORE, ElementCount VecWidth,
459                       unsigned UnrollFactor, LoopVectorizationLegality *LVL,
460                       LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI,
461                       ProfileSummaryInfo *PSI)
462       : OrigLoop(OrigLoop), PSE(PSE), LI(LI), DT(DT), TLI(TLI), TTI(TTI),
463         AC(AC), ORE(ORE), VF(VecWidth), UF(UnrollFactor),
464         Builder(PSE.getSE()->getContext()),
465         VectorLoopValueMap(UnrollFactor, VecWidth), Legal(LVL), Cost(CM),
466         BFI(BFI), PSI(PSI) {
467     // Query this against the original loop and save it here because the profile
468     // of the original loop header may change as the transformation happens.
469     OptForSizeBasedOnProfile = llvm::shouldOptimizeForSize(
470         OrigLoop->getHeader(), PSI, BFI, PGSOQueryType::IRPass);
471   }
472 
473   virtual ~InnerLoopVectorizer() = default;
474 
475   /// Create a new empty loop that will contain vectorized instructions later
476   /// on, while the old loop will be used as the scalar remainder. Control flow
477   /// is generated around the vectorized (and scalar epilogue) loops consisting
478   /// of various checks and bypasses. Return the pre-header block of the new
479   /// loop.
480   /// In the case of epilogue vectorization, this function is overriden to
481   /// handle the more complex control flow around the loops.
482   virtual BasicBlock *createVectorizedLoopSkeleton();
483 
484   /// Widen a single instruction within the innermost loop.
485   void widenInstruction(Instruction &I, VPValue *Def, VPUser &Operands,
486                         VPTransformState &State);
487 
488   /// Widen a single call instruction within the innermost loop.
489   void widenCallInstruction(CallInst &I, VPValue *Def, VPUser &ArgOperands,
490                             VPTransformState &State);
491 
492   /// Widen a single select instruction within the innermost loop.
493   void widenSelectInstruction(SelectInst &I, VPValue *VPDef, VPUser &Operands,
494                               bool InvariantCond, VPTransformState &State);
495 
496   /// Fix the vectorized code, taking care of header phi's, live-outs, and more.
497   void fixVectorizedLoop();
498 
499   // Return true if any runtime check is added.
500   bool areSafetyChecksAdded() { return AddedSafetyChecks; }
501 
502   /// A type for vectorized values in the new loop. Each value from the
503   /// original loop, when vectorized, is represented by UF vector values in the
504   /// new unrolled loop, where UF is the unroll factor.
505   using VectorParts = SmallVector<Value *, 2>;
506 
507   /// Vectorize a single GetElementPtrInst based on information gathered and
508   /// decisions taken during planning.
509   void widenGEP(GetElementPtrInst *GEP, VPValue *VPDef, VPUser &Indices,
510                 unsigned UF, ElementCount VF, bool IsPtrLoopInvariant,
511                 SmallBitVector &IsIndexLoopInvariant, VPTransformState &State);
512 
513   /// Vectorize a single PHINode in a block. This method handles the induction
514   /// variable canonicalization. It supports both VF = 1 for unrolled loops and
515   /// arbitrary length vectors.
516   void widenPHIInstruction(Instruction *PN, RecurrenceDescriptor *RdxDesc,
517                            Value *StartV, unsigned UF, ElementCount VF);
518 
519   /// A helper function to scalarize a single Instruction in the innermost loop.
520   /// Generates a sequence of scalar instances for each lane between \p MinLane
521   /// and \p MaxLane, times each part between \p MinPart and \p MaxPart,
522   /// inclusive. Uses the VPValue operands from \p Operands instead of \p
523   /// Instr's operands.
524   void scalarizeInstruction(Instruction *Instr, VPUser &Operands,
525                             const VPIteration &Instance, bool IfPredicateInstr,
526                             VPTransformState &State);
527 
528   /// Widen an integer or floating-point induction variable \p IV. If \p Trunc
529   /// is provided, the integer induction variable will first be truncated to
530   /// the corresponding type.
531   void widenIntOrFpInduction(PHINode *IV, Value *Start, TruncInst *Trunc,
532                              VPValue *Def, VPValue *CastDef,
533                              VPTransformState &State);
534 
535   /// getOrCreateVectorValue and getOrCreateScalarValue coordinate to generate a
536   /// vector or scalar value on-demand if one is not yet available. When
537   /// vectorizing a loop, we visit the definition of an instruction before its
538   /// uses. When visiting the definition, we either vectorize or scalarize the
539   /// instruction, creating an entry for it in the corresponding map. (In some
540   /// cases, such as induction variables, we will create both vector and scalar
541   /// entries.) Then, as we encounter uses of the definition, we derive values
542   /// for each scalar or vector use unless such a value is already available.
543   /// For example, if we scalarize a definition and one of its uses is vector,
544   /// we build the required vector on-demand with an insertelement sequence
545   /// when visiting the use. Otherwise, if the use is scalar, we can use the
546   /// existing scalar definition.
547   ///
548   /// Return a value in the new loop corresponding to \p V from the original
549   /// loop at unroll index \p Part. If the value has already been vectorized,
550   /// the corresponding vector entry in VectorLoopValueMap is returned. If,
551   /// however, the value has a scalar entry in VectorLoopValueMap, we construct
552   /// a new vector value on-demand by inserting the scalar values into a vector
553   /// with an insertelement sequence. If the value has been neither vectorized
554   /// nor scalarized, it must be loop invariant, so we simply broadcast the
555   /// value into a vector.
556   Value *getOrCreateVectorValue(Value *V, unsigned Part);
557 
558   void setVectorValue(Value *Scalar, unsigned Part, Value *Vector) {
559     VectorLoopValueMap.setVectorValue(Scalar, Part, Vector);
560   }
561 
562   void setScalarValue(Value *Scalar, const VPIteration &Instance, Value *V) {
563     VectorLoopValueMap.setScalarValue(Scalar, Instance, V);
564   }
565 
566   /// Return a value in the new loop corresponding to \p V from the original
567   /// loop at unroll and vector indices \p Instance. If the value has been
568   /// vectorized but not scalarized, the necessary extractelement instruction
569   /// will be generated.
570   Value *getOrCreateScalarValue(Value *V, const VPIteration &Instance);
571 
572   /// Construct the vector value of a scalarized value \p V one lane at a time.
573   void packScalarIntoVectorValue(Value *V, const VPIteration &Instance);
574 
575   void packScalarIntoVectorValue(VPValue *Def, const VPIteration &Instance,
576                                  VPTransformState &State);
577 
578   /// Try to vectorize interleaved access group \p Group with the base address
579   /// given in \p Addr, optionally masking the vector operations if \p
580   /// BlockInMask is non-null. Use \p State to translate given VPValues to IR
581   /// values in the vectorized loop.
582   void vectorizeInterleaveGroup(const InterleaveGroup<Instruction> *Group,
583                                 ArrayRef<VPValue *> VPDefs,
584                                 VPTransformState &State, VPValue *Addr,
585                                 ArrayRef<VPValue *> StoredValues,
586                                 VPValue *BlockInMask = nullptr);
587 
588   /// Vectorize Load and Store instructions with the base address given in \p
589   /// Addr, optionally masking the vector operations if \p BlockInMask is
590   /// non-null. Use \p State to translate given VPValues to IR values in the
591   /// vectorized loop.
592   void vectorizeMemoryInstruction(Instruction *Instr, VPTransformState &State,
593                                   VPValue *Def, VPValue *Addr,
594                                   VPValue *StoredValue, VPValue *BlockInMask);
595 
596   /// Set the debug location in the builder using the debug location in
597   /// the instruction.
598   void setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr);
599 
600   /// Fix the non-induction PHIs in the OrigPHIsToFix vector.
601   void fixNonInductionPHIs(void);
602 
603   /// Create a broadcast instruction. This method generates a broadcast
604   /// instruction (shuffle) for loop invariant values and for the induction
605   /// value. If this is the induction variable then we extend it to N, N+1, ...
606   /// this is needed because each iteration in the loop corresponds to a SIMD
607   /// element.
608   virtual Value *getBroadcastInstrs(Value *V);
609 
610 protected:
611   friend class LoopVectorizationPlanner;
612 
613   /// A small list of PHINodes.
614   using PhiVector = SmallVector<PHINode *, 4>;
615 
616   /// A type for scalarized values in the new loop. Each value from the
617   /// original loop, when scalarized, is represented by UF x VF scalar values
618   /// in the new unrolled loop, where UF is the unroll factor and VF is the
619   /// vectorization factor.
620   using ScalarParts = SmallVector<SmallVector<Value *, 4>, 2>;
621 
622   /// Set up the values of the IVs correctly when exiting the vector loop.
623   void fixupIVUsers(PHINode *OrigPhi, const InductionDescriptor &II,
624                     Value *CountRoundDown, Value *EndValue,
625                     BasicBlock *MiddleBlock);
626 
627   /// Create a new induction variable inside L.
628   PHINode *createInductionVariable(Loop *L, Value *Start, Value *End,
629                                    Value *Step, Instruction *DL);
630 
631   /// Handle all cross-iteration phis in the header.
632   void fixCrossIterationPHIs();
633 
634   /// Fix a first-order recurrence. This is the second phase of vectorizing
635   /// this phi node.
636   void fixFirstOrderRecurrence(PHINode *Phi);
637 
638   /// Fix a reduction cross-iteration phi. This is the second phase of
639   /// vectorizing this phi node.
640   void fixReduction(PHINode *Phi);
641 
642   /// Clear NSW/NUW flags from reduction instructions if necessary.
643   void clearReductionWrapFlags(RecurrenceDescriptor &RdxDesc);
644 
645   /// Fixup the LCSSA phi nodes in the unique exit block.  This simply
646   /// means we need to add the appropriate incoming value from the middle
647   /// block as exiting edges from the scalar epilogue loop (if present) are
648   /// already in place, and we exit the vector loop exclusively to the middle
649   /// block.
650   void fixLCSSAPHIs();
651 
652   /// Iteratively sink the scalarized operands of a predicated instruction into
653   /// the block that was created for it.
654   void sinkScalarOperands(Instruction *PredInst);
655 
656   /// Shrinks vector element sizes to the smallest bitwidth they can be legally
657   /// represented as.
658   void truncateToMinimalBitwidths();
659 
660   /// This function adds (StartIdx, StartIdx + Step, StartIdx + 2*Step, ...)
661   /// to each vector element of Val. The sequence starts at StartIndex.
662   /// \p Opcode is relevant for FP induction variable.
663   virtual Value *getStepVector(Value *Val, int StartIdx, Value *Step,
664                                Instruction::BinaryOps Opcode =
665                                Instruction::BinaryOpsEnd);
666 
667   /// Compute scalar induction steps. \p ScalarIV is the scalar induction
668   /// variable on which to base the steps, \p Step is the size of the step, and
669   /// \p EntryVal is the value from the original loop that maps to the steps.
670   /// Note that \p EntryVal doesn't have to be an induction variable - it
671   /// can also be a truncate instruction.
672   void buildScalarSteps(Value *ScalarIV, Value *Step, Instruction *EntryVal,
673                         const InductionDescriptor &ID, VPValue *Def,
674                         VPValue *CastDef, VPTransformState &State);
675 
676   /// Create a vector induction phi node based on an existing scalar one. \p
677   /// EntryVal is the value from the original loop that maps to the vector phi
678   /// node, and \p Step is the loop-invariant step. If \p EntryVal is a
679   /// truncate instruction, instead of widening the original IV, we widen a
680   /// version of the IV truncated to \p EntryVal's type.
681   void createVectorIntOrFpInductionPHI(const InductionDescriptor &II,
682                                        Value *Step, Value *Start,
683                                        Instruction *EntryVal, VPValue *Def,
684                                        VPValue *CastDef,
685                                        VPTransformState &State);
686 
687   /// Returns true if an instruction \p I should be scalarized instead of
688   /// vectorized for the chosen vectorization factor.
689   bool shouldScalarizeInstruction(Instruction *I) const;
690 
691   /// Returns true if we should generate a scalar version of \p IV.
692   bool needsScalarInduction(Instruction *IV) const;
693 
694   /// If there is a cast involved in the induction variable \p ID, which should
695   /// be ignored in the vectorized loop body, this function records the
696   /// VectorLoopValue of the respective Phi also as the VectorLoopValue of the
697   /// cast. We had already proved that the casted Phi is equal to the uncasted
698   /// Phi in the vectorized loop (under a runtime guard), and therefore
699   /// there is no need to vectorize the cast - the same value can be used in the
700   /// vector loop for both the Phi and the cast.
701   /// If \p VectorLoopValue is a scalarized value, \p Lane is also specified,
702   /// Otherwise, \p VectorLoopValue is a widened/vectorized value.
703   ///
704   /// \p EntryVal is the value from the original loop that maps to the vector
705   /// phi node and is used to distinguish what is the IV currently being
706   /// processed - original one (if \p EntryVal is a phi corresponding to the
707   /// original IV) or the "newly-created" one based on the proof mentioned above
708   /// (see also buildScalarSteps() and createVectorIntOrFPInductionPHI()). In the
709   /// latter case \p EntryVal is a TruncInst and we must not record anything for
710   /// that IV, but it's error-prone to expect callers of this routine to care
711   /// about that, hence this explicit parameter.
712   void recordVectorLoopValueForInductionCast(
713       const InductionDescriptor &ID, const Instruction *EntryVal,
714       Value *VectorLoopValue, VPValue *CastDef, VPTransformState &State,
715       unsigned Part, unsigned Lane = UINT_MAX);
716 
717   /// Generate a shuffle sequence that will reverse the vector Vec.
718   virtual Value *reverseVector(Value *Vec);
719 
720   /// Returns (and creates if needed) the original loop trip count.
721   Value *getOrCreateTripCount(Loop *NewLoop);
722 
723   /// Returns (and creates if needed) the trip count of the widened loop.
724   Value *getOrCreateVectorTripCount(Loop *NewLoop);
725 
726   /// Returns a bitcasted value to the requested vector type.
727   /// Also handles bitcasts of vector<float> <-> vector<pointer> types.
728   Value *createBitOrPointerCast(Value *V, VectorType *DstVTy,
729                                 const DataLayout &DL);
730 
731   /// Emit a bypass check to see if the vector trip count is zero, including if
732   /// it overflows.
733   void emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass);
734 
735   /// Emit a bypass check to see if all of the SCEV assumptions we've
736   /// had to make are correct.
737   void emitSCEVChecks(Loop *L, BasicBlock *Bypass);
738 
739   /// Emit bypass checks to check any memory assumptions we may have made.
740   void emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass);
741 
742   /// Compute the transformed value of Index at offset StartValue using step
743   /// StepValue.
744   /// For integer induction, returns StartValue + Index * StepValue.
745   /// For pointer induction, returns StartValue[Index * StepValue].
746   /// FIXME: The newly created binary instructions should contain nsw/nuw
747   /// flags, which can be found from the original scalar operations.
748   Value *emitTransformedIndex(IRBuilder<> &B, Value *Index, ScalarEvolution *SE,
749                               const DataLayout &DL,
750                               const InductionDescriptor &ID) const;
751 
752   /// Emit basic blocks (prefixed with \p Prefix) for the iteration check,
753   /// vector loop preheader, middle block and scalar preheader. Also
754   /// allocate a loop object for the new vector loop and return it.
755   Loop *createVectorLoopSkeleton(StringRef Prefix);
756 
757   /// Create new phi nodes for the induction variables to resume iteration count
758   /// in the scalar epilogue, from where the vectorized loop left off (given by
759   /// \p VectorTripCount).
760   /// In cases where the loop skeleton is more complicated (eg. epilogue
761   /// vectorization) and the resume values can come from an additional bypass
762   /// block, the \p AdditionalBypass pair provides information about the bypass
763   /// block and the end value on the edge from bypass to this loop.
764   void createInductionResumeValues(
765       Loop *L, Value *VectorTripCount,
766       std::pair<BasicBlock *, Value *> AdditionalBypass = {nullptr, nullptr});
767 
768   /// Complete the loop skeleton by adding debug MDs, creating appropriate
769   /// conditional branches in the middle block, preparing the builder and
770   /// running the verifier. Take in the vector loop \p L as argument, and return
771   /// the preheader of the completed vector loop.
772   BasicBlock *completeLoopSkeleton(Loop *L, MDNode *OrigLoopID);
773 
774   /// Add additional metadata to \p To that was not present on \p Orig.
775   ///
776   /// Currently this is used to add the noalias annotations based on the
777   /// inserted memchecks.  Use this for instructions that are *cloned* into the
778   /// vector loop.
779   void addNewMetadata(Instruction *To, const Instruction *Orig);
780 
781   /// Add metadata from one instruction to another.
782   ///
783   /// This includes both the original MDs from \p From and additional ones (\see
784   /// addNewMetadata).  Use this for *newly created* instructions in the vector
785   /// loop.
786   void addMetadata(Instruction *To, Instruction *From);
787 
788   /// Similar to the previous function but it adds the metadata to a
789   /// vector of instructions.
790   void addMetadata(ArrayRef<Value *> To, Instruction *From);
791 
792   /// Allow subclasses to override and print debug traces before/after vplan
793   /// execution, when trace information is requested.
794   virtual void printDebugTracesAtStart(){};
795   virtual void printDebugTracesAtEnd(){};
796 
797   /// The original loop.
798   Loop *OrigLoop;
799 
800   /// A wrapper around ScalarEvolution used to add runtime SCEV checks. Applies
801   /// dynamic knowledge to simplify SCEV expressions and converts them to a
802   /// more usable form.
803   PredicatedScalarEvolution &PSE;
804 
805   /// Loop Info.
806   LoopInfo *LI;
807 
808   /// Dominator Tree.
809   DominatorTree *DT;
810 
811   /// Alias Analysis.
812   AAResults *AA;
813 
814   /// Target Library Info.
815   const TargetLibraryInfo *TLI;
816 
817   /// Target Transform Info.
818   const TargetTransformInfo *TTI;
819 
820   /// Assumption Cache.
821   AssumptionCache *AC;
822 
823   /// Interface to emit optimization remarks.
824   OptimizationRemarkEmitter *ORE;
825 
826   /// LoopVersioning.  It's only set up (non-null) if memchecks were
827   /// used.
828   ///
829   /// This is currently only used to add no-alias metadata based on the
830   /// memchecks.  The actually versioning is performed manually.
831   std::unique_ptr<LoopVersioning> LVer;
832 
833   /// The vectorization SIMD factor to use. Each vector will have this many
834   /// vector elements.
835   ElementCount VF;
836 
837   /// The vectorization unroll factor to use. Each scalar is vectorized to this
838   /// many different vector instructions.
839   unsigned UF;
840 
841   /// The builder that we use
842   IRBuilder<> Builder;
843 
844   // --- Vectorization state ---
845 
846   /// The vector-loop preheader.
847   BasicBlock *LoopVectorPreHeader;
848 
849   /// The scalar-loop preheader.
850   BasicBlock *LoopScalarPreHeader;
851 
852   /// Middle Block between the vector and the scalar.
853   BasicBlock *LoopMiddleBlock;
854 
855   /// The (unique) ExitBlock of the scalar loop.  Note that
856   /// there can be multiple exiting edges reaching this block.
857   BasicBlock *LoopExitBlock;
858 
859   /// The vector loop body.
860   BasicBlock *LoopVectorBody;
861 
862   /// The scalar loop body.
863   BasicBlock *LoopScalarBody;
864 
865   /// A list of all bypass blocks. The first block is the entry of the loop.
866   SmallVector<BasicBlock *, 4> LoopBypassBlocks;
867 
868   /// The new Induction variable which was added to the new block.
869   PHINode *Induction = nullptr;
870 
871   /// The induction variable of the old basic block.
872   PHINode *OldInduction = nullptr;
873 
874   /// Maps values from the original loop to their corresponding values in the
875   /// vectorized loop. A key value can map to either vector values, scalar
876   /// values or both kinds of values, depending on whether the key was
877   /// vectorized and scalarized.
878   VectorizerValueMap VectorLoopValueMap;
879 
880   /// Store instructions that were predicated.
881   SmallVector<Instruction *, 4> PredicatedInstructions;
882 
883   /// Trip count of the original loop.
884   Value *TripCount = nullptr;
885 
886   /// Trip count of the widened loop (TripCount - TripCount % (VF*UF))
887   Value *VectorTripCount = nullptr;
888 
889   /// The legality analysis.
890   LoopVectorizationLegality *Legal;
891 
892   /// The profitablity analysis.
893   LoopVectorizationCostModel *Cost;
894 
895   // Record whether runtime checks are added.
896   bool AddedSafetyChecks = false;
897 
898   // Holds the end values for each induction variable. We save the end values
899   // so we can later fix-up the external users of the induction variables.
900   DenseMap<PHINode *, Value *> IVEndValues;
901 
902   // Vector of original scalar PHIs whose corresponding widened PHIs need to be
903   // fixed up at the end of vector code generation.
904   SmallVector<PHINode *, 8> OrigPHIsToFix;
905 
906   /// BFI and PSI are used to check for profile guided size optimizations.
907   BlockFrequencyInfo *BFI;
908   ProfileSummaryInfo *PSI;
909 
910   // Whether this loop should be optimized for size based on profile guided size
911   // optimizatios.
912   bool OptForSizeBasedOnProfile;
913 };
914 
915 class InnerLoopUnroller : public InnerLoopVectorizer {
916 public:
917   InnerLoopUnroller(Loop *OrigLoop, PredicatedScalarEvolution &PSE,
918                     LoopInfo *LI, DominatorTree *DT,
919                     const TargetLibraryInfo *TLI,
920                     const TargetTransformInfo *TTI, AssumptionCache *AC,
921                     OptimizationRemarkEmitter *ORE, unsigned UnrollFactor,
922                     LoopVectorizationLegality *LVL,
923                     LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI,
924                     ProfileSummaryInfo *PSI)
925       : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE,
926                             ElementCount::getFixed(1), UnrollFactor, LVL, CM,
927                             BFI, PSI) {}
928 
929 private:
930   Value *getBroadcastInstrs(Value *V) override;
931   Value *getStepVector(Value *Val, int StartIdx, Value *Step,
932                        Instruction::BinaryOps Opcode =
933                        Instruction::BinaryOpsEnd) override;
934   Value *reverseVector(Value *Vec) override;
935 };
936 
937 /// Encapsulate information regarding vectorization of a loop and its epilogue.
938 /// This information is meant to be updated and used across two stages of
939 /// epilogue vectorization.
940 struct EpilogueLoopVectorizationInfo {
941   ElementCount MainLoopVF = ElementCount::getFixed(0);
942   unsigned MainLoopUF = 0;
943   ElementCount EpilogueVF = ElementCount::getFixed(0);
944   unsigned EpilogueUF = 0;
945   BasicBlock *MainLoopIterationCountCheck = nullptr;
946   BasicBlock *EpilogueIterationCountCheck = nullptr;
947   BasicBlock *SCEVSafetyCheck = nullptr;
948   BasicBlock *MemSafetyCheck = nullptr;
949   Value *TripCount = nullptr;
950   Value *VectorTripCount = nullptr;
951 
952   EpilogueLoopVectorizationInfo(unsigned MVF, unsigned MUF, unsigned EVF,
953                                 unsigned EUF)
954       : MainLoopVF(ElementCount::getFixed(MVF)), MainLoopUF(MUF),
955         EpilogueVF(ElementCount::getFixed(EVF)), EpilogueUF(EUF) {
956     assert(EUF == 1 &&
957            "A high UF for the epilogue loop is likely not beneficial.");
958   }
959 };
960 
961 /// An extension of the inner loop vectorizer that creates a skeleton for a
962 /// vectorized loop that has its epilogue (residual) also vectorized.
963 /// The idea is to run the vplan on a given loop twice, firstly to setup the
964 /// skeleton and vectorize the main loop, and secondly to complete the skeleton
965 /// from the first step and vectorize the epilogue.  This is achieved by
966 /// deriving two concrete strategy classes from this base class and invoking
967 /// them in succession from the loop vectorizer planner.
968 class InnerLoopAndEpilogueVectorizer : public InnerLoopVectorizer {
969 public:
970   InnerLoopAndEpilogueVectorizer(
971       Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI,
972       DominatorTree *DT, const TargetLibraryInfo *TLI,
973       const TargetTransformInfo *TTI, AssumptionCache *AC,
974       OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI,
975       LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM,
976       BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI)
977       : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE,
978                             EPI.MainLoopVF, EPI.MainLoopUF, LVL, CM, BFI, PSI),
979         EPI(EPI) {}
980 
981   // Override this function to handle the more complex control flow around the
982   // three loops.
983   BasicBlock *createVectorizedLoopSkeleton() final override {
984     return createEpilogueVectorizedLoopSkeleton();
985   }
986 
987   /// The interface for creating a vectorized skeleton using one of two
988   /// different strategies, each corresponding to one execution of the vplan
989   /// as described above.
990   virtual BasicBlock *createEpilogueVectorizedLoopSkeleton() = 0;
991 
992   /// Holds and updates state information required to vectorize the main loop
993   /// and its epilogue in two separate passes. This setup helps us avoid
994   /// regenerating and recomputing runtime safety checks. It also helps us to
995   /// shorten the iteration-count-check path length for the cases where the
996   /// iteration count of the loop is so small that the main vector loop is
997   /// completely skipped.
998   EpilogueLoopVectorizationInfo &EPI;
999 };
1000 
1001 /// A specialized derived class of inner loop vectorizer that performs
1002 /// vectorization of *main* loops in the process of vectorizing loops and their
1003 /// epilogues.
1004 class EpilogueVectorizerMainLoop : public InnerLoopAndEpilogueVectorizer {
1005 public:
1006   EpilogueVectorizerMainLoop(
1007       Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI,
1008       DominatorTree *DT, const TargetLibraryInfo *TLI,
1009       const TargetTransformInfo *TTI, AssumptionCache *AC,
1010       OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI,
1011       LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM,
1012       BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI)
1013       : InnerLoopAndEpilogueVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE,
1014                                        EPI, LVL, CM, BFI, PSI) {}
1015   /// Implements the interface for creating a vectorized skeleton using the
1016   /// *main loop* strategy (ie the first pass of vplan execution).
1017   BasicBlock *createEpilogueVectorizedLoopSkeleton() final override;
1018 
1019 protected:
1020   /// Emits an iteration count bypass check once for the main loop (when \p
1021   /// ForEpilogue is false) and once for the epilogue loop (when \p
1022   /// ForEpilogue is true).
1023   BasicBlock *emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass,
1024                                              bool ForEpilogue);
1025   void printDebugTracesAtStart() override;
1026   void printDebugTracesAtEnd() override;
1027 };
1028 
1029 // A specialized derived class of inner loop vectorizer that performs
1030 // vectorization of *epilogue* loops in the process of vectorizing loops and
1031 // their epilogues.
1032 class EpilogueVectorizerEpilogueLoop : public InnerLoopAndEpilogueVectorizer {
1033 public:
1034   EpilogueVectorizerEpilogueLoop(Loop *OrigLoop, PredicatedScalarEvolution &PSE,
1035                     LoopInfo *LI, DominatorTree *DT,
1036                     const TargetLibraryInfo *TLI,
1037                     const TargetTransformInfo *TTI, AssumptionCache *AC,
1038                     OptimizationRemarkEmitter *ORE,
1039                     EpilogueLoopVectorizationInfo &EPI,
1040                     LoopVectorizationLegality *LVL,
1041                     llvm::LoopVectorizationCostModel *CM,
1042                     BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI)
1043       : InnerLoopAndEpilogueVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE,
1044                                        EPI, LVL, CM, BFI, PSI) {}
1045   /// Implements the interface for creating a vectorized skeleton using the
1046   /// *epilogue loop* strategy (ie the second pass of vplan execution).
1047   BasicBlock *createEpilogueVectorizedLoopSkeleton() final override;
1048 
1049 protected:
1050   /// Emits an iteration count bypass check after the main vector loop has
1051   /// finished to see if there are any iterations left to execute by either
1052   /// the vector epilogue or the scalar epilogue.
1053   BasicBlock *emitMinimumVectorEpilogueIterCountCheck(Loop *L,
1054                                                       BasicBlock *Bypass,
1055                                                       BasicBlock *Insert);
1056   void printDebugTracesAtStart() override;
1057   void printDebugTracesAtEnd() override;
1058 };
1059 } // end namespace llvm
1060 
1061 /// Look for a meaningful debug location on the instruction or it's
1062 /// operands.
1063 static Instruction *getDebugLocFromInstOrOperands(Instruction *I) {
1064   if (!I)
1065     return I;
1066 
1067   DebugLoc Empty;
1068   if (I->getDebugLoc() != Empty)
1069     return I;
1070 
1071   for (User::op_iterator OI = I->op_begin(), OE = I->op_end(); OI != OE; ++OI) {
1072     if (Instruction *OpInst = dyn_cast<Instruction>(*OI))
1073       if (OpInst->getDebugLoc() != Empty)
1074         return OpInst;
1075   }
1076 
1077   return I;
1078 }
1079 
1080 void InnerLoopVectorizer::setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr) {
1081   if (const Instruction *Inst = dyn_cast_or_null<Instruction>(Ptr)) {
1082     const DILocation *DIL = Inst->getDebugLoc();
1083     if (DIL && Inst->getFunction()->isDebugInfoForProfiling() &&
1084         !isa<DbgInfoIntrinsic>(Inst)) {
1085       assert(!VF.isScalable() && "scalable vectors not yet supported.");
1086       auto NewDIL =
1087           DIL->cloneByMultiplyingDuplicationFactor(UF * VF.getKnownMinValue());
1088       if (NewDIL)
1089         B.SetCurrentDebugLocation(NewDIL.getValue());
1090       else
1091         LLVM_DEBUG(dbgs()
1092                    << "Failed to create new discriminator: "
1093                    << DIL->getFilename() << " Line: " << DIL->getLine());
1094     }
1095     else
1096       B.SetCurrentDebugLocation(DIL);
1097   } else
1098     B.SetCurrentDebugLocation(DebugLoc());
1099 }
1100 
1101 /// Write a record \p DebugMsg about vectorization failure to the debug
1102 /// output stream. If \p I is passed, it is an instruction that prevents
1103 /// vectorization.
1104 #ifndef NDEBUG
1105 static void debugVectorizationFailure(const StringRef DebugMsg,
1106     Instruction *I) {
1107   dbgs() << "LV: Not vectorizing: " << DebugMsg;
1108   if (I != nullptr)
1109     dbgs() << " " << *I;
1110   else
1111     dbgs() << '.';
1112   dbgs() << '\n';
1113 }
1114 #endif
1115 
1116 /// Create an analysis remark that explains why vectorization failed
1117 ///
1118 /// \p PassName is the name of the pass (e.g. can be AlwaysPrint).  \p
1119 /// RemarkName is the identifier for the remark.  If \p I is passed it is an
1120 /// instruction that prevents vectorization.  Otherwise \p TheLoop is used for
1121 /// the location of the remark.  \return the remark object that can be
1122 /// streamed to.
1123 static OptimizationRemarkAnalysis createLVAnalysis(const char *PassName,
1124     StringRef RemarkName, Loop *TheLoop, Instruction *I) {
1125   Value *CodeRegion = TheLoop->getHeader();
1126   DebugLoc DL = TheLoop->getStartLoc();
1127 
1128   if (I) {
1129     CodeRegion = I->getParent();
1130     // If there is no debug location attached to the instruction, revert back to
1131     // using the loop's.
1132     if (I->getDebugLoc())
1133       DL = I->getDebugLoc();
1134   }
1135 
1136   OptimizationRemarkAnalysis R(PassName, RemarkName, DL, CodeRegion);
1137   R << "loop not vectorized: ";
1138   return R;
1139 }
1140 
1141 /// Return a value for Step multiplied by VF.
1142 static Value *createStepForVF(IRBuilder<> &B, Constant *Step, ElementCount VF) {
1143   assert(isa<ConstantInt>(Step) && "Expected an integer step");
1144   Constant *StepVal = ConstantInt::get(
1145       Step->getType(),
1146       cast<ConstantInt>(Step)->getSExtValue() * VF.getKnownMinValue());
1147   return VF.isScalable() ? B.CreateVScale(StepVal) : StepVal;
1148 }
1149 
1150 namespace llvm {
1151 
1152 void reportVectorizationFailure(const StringRef DebugMsg,
1153     const StringRef OREMsg, const StringRef ORETag,
1154     OptimizationRemarkEmitter *ORE, Loop *TheLoop, Instruction *I) {
1155   LLVM_DEBUG(debugVectorizationFailure(DebugMsg, I));
1156   LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE);
1157   ORE->emit(createLVAnalysis(Hints.vectorizeAnalysisPassName(),
1158                 ORETag, TheLoop, I) << OREMsg);
1159 }
1160 
1161 } // end namespace llvm
1162 
1163 #ifndef NDEBUG
1164 /// \return string containing a file name and a line # for the given loop.
1165 static std::string getDebugLocString(const Loop *L) {
1166   std::string Result;
1167   if (L) {
1168     raw_string_ostream OS(Result);
1169     if (const DebugLoc LoopDbgLoc = L->getStartLoc())
1170       LoopDbgLoc.print(OS);
1171     else
1172       // Just print the module name.
1173       OS << L->getHeader()->getParent()->getParent()->getModuleIdentifier();
1174     OS.flush();
1175   }
1176   return Result;
1177 }
1178 #endif
1179 
1180 void InnerLoopVectorizer::addNewMetadata(Instruction *To,
1181                                          const Instruction *Orig) {
1182   // If the loop was versioned with memchecks, add the corresponding no-alias
1183   // metadata.
1184   if (LVer && (isa<LoadInst>(Orig) || isa<StoreInst>(Orig)))
1185     LVer->annotateInstWithNoAlias(To, Orig);
1186 }
1187 
1188 void InnerLoopVectorizer::addMetadata(Instruction *To,
1189                                       Instruction *From) {
1190   propagateMetadata(To, From);
1191   addNewMetadata(To, From);
1192 }
1193 
1194 void InnerLoopVectorizer::addMetadata(ArrayRef<Value *> To,
1195                                       Instruction *From) {
1196   for (Value *V : To) {
1197     if (Instruction *I = dyn_cast<Instruction>(V))
1198       addMetadata(I, From);
1199   }
1200 }
1201 
1202 namespace llvm {
1203 
1204 // Loop vectorization cost-model hints how the scalar epilogue loop should be
1205 // lowered.
1206 enum ScalarEpilogueLowering {
1207 
1208   // The default: allowing scalar epilogues.
1209   CM_ScalarEpilogueAllowed,
1210 
1211   // Vectorization with OptForSize: don't allow epilogues.
1212   CM_ScalarEpilogueNotAllowedOptSize,
1213 
1214   // A special case of vectorisation with OptForSize: loops with a very small
1215   // trip count are considered for vectorization under OptForSize, thereby
1216   // making sure the cost of their loop body is dominant, free of runtime
1217   // guards and scalar iteration overheads.
1218   CM_ScalarEpilogueNotAllowedLowTripLoop,
1219 
1220   // Loop hint predicate indicating an epilogue is undesired.
1221   CM_ScalarEpilogueNotNeededUsePredicate,
1222 
1223   // Directive indicating we must either tail fold or not vectorize
1224   CM_ScalarEpilogueNotAllowedUsePredicate
1225 };
1226 
1227 /// LoopVectorizationCostModel - estimates the expected speedups due to
1228 /// vectorization.
1229 /// In many cases vectorization is not profitable. This can happen because of
1230 /// a number of reasons. In this class we mainly attempt to predict the
1231 /// expected speedup/slowdowns due to the supported instruction set. We use the
1232 /// TargetTransformInfo to query the different backends for the cost of
1233 /// different operations.
1234 class LoopVectorizationCostModel {
1235 public:
1236   LoopVectorizationCostModel(ScalarEpilogueLowering SEL, Loop *L,
1237                              PredicatedScalarEvolution &PSE, LoopInfo *LI,
1238                              LoopVectorizationLegality *Legal,
1239                              const TargetTransformInfo &TTI,
1240                              const TargetLibraryInfo *TLI, DemandedBits *DB,
1241                              AssumptionCache *AC,
1242                              OptimizationRemarkEmitter *ORE, const Function *F,
1243                              const LoopVectorizeHints *Hints,
1244                              InterleavedAccessInfo &IAI)
1245       : ScalarEpilogueStatus(SEL), TheLoop(L), PSE(PSE), LI(LI), Legal(Legal),
1246         TTI(TTI), TLI(TLI), DB(DB), AC(AC), ORE(ORE), TheFunction(F),
1247         Hints(Hints), InterleaveInfo(IAI) {}
1248 
1249   /// \return An upper bound for the vectorization factor, or None if
1250   /// vectorization and interleaving should be avoided up front.
1251   Optional<ElementCount> computeMaxVF(ElementCount UserVF, unsigned UserIC);
1252 
1253   /// \return True if runtime checks are required for vectorization, and false
1254   /// otherwise.
1255   bool runtimeChecksRequired();
1256 
1257   /// \return The most profitable vectorization factor and the cost of that VF.
1258   /// This method checks every power of two up to MaxVF. If UserVF is not ZERO
1259   /// then this vectorization factor will be selected if vectorization is
1260   /// possible.
1261   VectorizationFactor selectVectorizationFactor(ElementCount MaxVF);
1262   VectorizationFactor
1263   selectEpilogueVectorizationFactor(const ElementCount MaxVF,
1264                                     const LoopVectorizationPlanner &LVP);
1265 
1266   /// Setup cost-based decisions for user vectorization factor.
1267   void selectUserVectorizationFactor(ElementCount UserVF) {
1268     collectUniformsAndScalars(UserVF);
1269     collectInstsToScalarize(UserVF);
1270   }
1271 
1272   /// \return The size (in bits) of the smallest and widest types in the code
1273   /// that needs to be vectorized. We ignore values that remain scalar such as
1274   /// 64 bit loop indices.
1275   std::pair<unsigned, unsigned> getSmallestAndWidestTypes();
1276 
1277   /// \return The desired interleave count.
1278   /// If interleave count has been specified by metadata it will be returned.
1279   /// Otherwise, the interleave count is computed and returned. VF and LoopCost
1280   /// are the selected vectorization factor and the cost of the selected VF.
1281   unsigned selectInterleaveCount(ElementCount VF, unsigned LoopCost);
1282 
1283   /// Memory access instruction may be vectorized in more than one way.
1284   /// Form of instruction after vectorization depends on cost.
1285   /// This function takes cost-based decisions for Load/Store instructions
1286   /// and collects them in a map. This decisions map is used for building
1287   /// the lists of loop-uniform and loop-scalar instructions.
1288   /// The calculated cost is saved with widening decision in order to
1289   /// avoid redundant calculations.
1290   void setCostBasedWideningDecision(ElementCount VF);
1291 
1292   /// A struct that represents some properties of the register usage
1293   /// of a loop.
1294   struct RegisterUsage {
1295     /// Holds the number of loop invariant values that are used in the loop.
1296     /// The key is ClassID of target-provided register class.
1297     SmallMapVector<unsigned, unsigned, 4> LoopInvariantRegs;
1298     /// Holds the maximum number of concurrent live intervals in the loop.
1299     /// The key is ClassID of target-provided register class.
1300     SmallMapVector<unsigned, unsigned, 4> MaxLocalUsers;
1301   };
1302 
1303   /// \return Returns information about the register usages of the loop for the
1304   /// given vectorization factors.
1305   SmallVector<RegisterUsage, 8>
1306   calculateRegisterUsage(ArrayRef<ElementCount> VFs);
1307 
1308   /// Collect values we want to ignore in the cost model.
1309   void collectValuesToIgnore();
1310 
1311   /// Split reductions into those that happen in the loop, and those that happen
1312   /// outside. In loop reductions are collected into InLoopReductionChains.
1313   void collectInLoopReductions();
1314 
1315   /// \returns The smallest bitwidth each instruction can be represented with.
1316   /// The vector equivalents of these instructions should be truncated to this
1317   /// type.
1318   const MapVector<Instruction *, uint64_t> &getMinimalBitwidths() const {
1319     return MinBWs;
1320   }
1321 
1322   /// \returns True if it is more profitable to scalarize instruction \p I for
1323   /// vectorization factor \p VF.
1324   bool isProfitableToScalarize(Instruction *I, ElementCount VF) const {
1325     assert(VF.isVector() &&
1326            "Profitable to scalarize relevant only for VF > 1.");
1327 
1328     // Cost model is not run in the VPlan-native path - return conservative
1329     // result until this changes.
1330     if (EnableVPlanNativePath)
1331       return false;
1332 
1333     auto Scalars = InstsToScalarize.find(VF);
1334     assert(Scalars != InstsToScalarize.end() &&
1335            "VF not yet analyzed for scalarization profitability");
1336     return Scalars->second.find(I) != Scalars->second.end();
1337   }
1338 
1339   /// Returns true if \p I is known to be uniform after vectorization.
1340   bool isUniformAfterVectorization(Instruction *I, ElementCount VF) const {
1341     if (VF.isScalar())
1342       return true;
1343 
1344     // Cost model is not run in the VPlan-native path - return conservative
1345     // result until this changes.
1346     if (EnableVPlanNativePath)
1347       return false;
1348 
1349     auto UniformsPerVF = Uniforms.find(VF);
1350     assert(UniformsPerVF != Uniforms.end() &&
1351            "VF not yet analyzed for uniformity");
1352     return UniformsPerVF->second.count(I);
1353   }
1354 
1355   /// Returns true if \p I is known to be scalar after vectorization.
1356   bool isScalarAfterVectorization(Instruction *I, ElementCount VF) const {
1357     if (VF.isScalar())
1358       return true;
1359 
1360     // Cost model is not run in the VPlan-native path - return conservative
1361     // result until this changes.
1362     if (EnableVPlanNativePath)
1363       return false;
1364 
1365     auto ScalarsPerVF = Scalars.find(VF);
1366     assert(ScalarsPerVF != Scalars.end() &&
1367            "Scalar values are not calculated for VF");
1368     return ScalarsPerVF->second.count(I);
1369   }
1370 
1371   /// \returns True if instruction \p I can be truncated to a smaller bitwidth
1372   /// for vectorization factor \p VF.
1373   bool canTruncateToMinimalBitwidth(Instruction *I, ElementCount VF) const {
1374     return VF.isVector() && MinBWs.find(I) != MinBWs.end() &&
1375            !isProfitableToScalarize(I, VF) &&
1376            !isScalarAfterVectorization(I, VF);
1377   }
1378 
1379   /// Decision that was taken during cost calculation for memory instruction.
1380   enum InstWidening {
1381     CM_Unknown,
1382     CM_Widen,         // For consecutive accesses with stride +1.
1383     CM_Widen_Reverse, // For consecutive accesses with stride -1.
1384     CM_Interleave,
1385     CM_GatherScatter,
1386     CM_Scalarize
1387   };
1388 
1389   /// Save vectorization decision \p W and \p Cost taken by the cost model for
1390   /// instruction \p I and vector width \p VF.
1391   void setWideningDecision(Instruction *I, ElementCount VF, InstWidening W,
1392                            InstructionCost Cost) {
1393     assert(VF.isVector() && "Expected VF >=2");
1394     WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost);
1395   }
1396 
1397   /// Save vectorization decision \p W and \p Cost taken by the cost model for
1398   /// interleaving group \p Grp and vector width \p VF.
1399   void setWideningDecision(const InterleaveGroup<Instruction> *Grp,
1400                            ElementCount VF, InstWidening W,
1401                            InstructionCost Cost) {
1402     assert(VF.isVector() && "Expected VF >=2");
1403     /// Broadcast this decicion to all instructions inside the group.
1404     /// But the cost will be assigned to one instruction only.
1405     for (unsigned i = 0; i < Grp->getFactor(); ++i) {
1406       if (auto *I = Grp->getMember(i)) {
1407         if (Grp->getInsertPos() == I)
1408           WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost);
1409         else
1410           WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, 0);
1411       }
1412     }
1413   }
1414 
1415   /// Return the cost model decision for the given instruction \p I and vector
1416   /// width \p VF. Return CM_Unknown if this instruction did not pass
1417   /// through the cost modeling.
1418   InstWidening getWideningDecision(Instruction *I, ElementCount VF) {
1419     assert(VF.isVector() && "Expected VF to be a vector VF");
1420     // Cost model is not run in the VPlan-native path - return conservative
1421     // result until this changes.
1422     if (EnableVPlanNativePath)
1423       return CM_GatherScatter;
1424 
1425     std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(I, VF);
1426     auto Itr = WideningDecisions.find(InstOnVF);
1427     if (Itr == WideningDecisions.end())
1428       return CM_Unknown;
1429     return Itr->second.first;
1430   }
1431 
1432   /// Return the vectorization cost for the given instruction \p I and vector
1433   /// width \p VF.
1434   InstructionCost getWideningCost(Instruction *I, ElementCount VF) {
1435     assert(VF.isVector() && "Expected VF >=2");
1436     std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(I, VF);
1437     assert(WideningDecisions.find(InstOnVF) != WideningDecisions.end() &&
1438            "The cost is not calculated");
1439     return WideningDecisions[InstOnVF].second;
1440   }
1441 
1442   /// Return True if instruction \p I is an optimizable truncate whose operand
1443   /// is an induction variable. Such a truncate will be removed by adding a new
1444   /// induction variable with the destination type.
1445   bool isOptimizableIVTruncate(Instruction *I, ElementCount VF) {
1446     // If the instruction is not a truncate, return false.
1447     auto *Trunc = dyn_cast<TruncInst>(I);
1448     if (!Trunc)
1449       return false;
1450 
1451     // Get the source and destination types of the truncate.
1452     Type *SrcTy = ToVectorTy(cast<CastInst>(I)->getSrcTy(), VF);
1453     Type *DestTy = ToVectorTy(cast<CastInst>(I)->getDestTy(), VF);
1454 
1455     // If the truncate is free for the given types, return false. Replacing a
1456     // free truncate with an induction variable would add an induction variable
1457     // update instruction to each iteration of the loop. We exclude from this
1458     // check the primary induction variable since it will need an update
1459     // instruction regardless.
1460     Value *Op = Trunc->getOperand(0);
1461     if (Op != Legal->getPrimaryInduction() && TTI.isTruncateFree(SrcTy, DestTy))
1462       return false;
1463 
1464     // If the truncated value is not an induction variable, return false.
1465     return Legal->isInductionPhi(Op);
1466   }
1467 
1468   /// Collects the instructions to scalarize for each predicated instruction in
1469   /// the loop.
1470   void collectInstsToScalarize(ElementCount VF);
1471 
1472   /// Collect Uniform and Scalar values for the given \p VF.
1473   /// The sets depend on CM decision for Load/Store instructions
1474   /// that may be vectorized as interleave, gather-scatter or scalarized.
1475   void collectUniformsAndScalars(ElementCount VF) {
1476     // Do the analysis once.
1477     if (VF.isScalar() || Uniforms.find(VF) != Uniforms.end())
1478       return;
1479     setCostBasedWideningDecision(VF);
1480     collectLoopUniforms(VF);
1481     collectLoopScalars(VF);
1482   }
1483 
1484   /// Returns true if the target machine supports masked store operation
1485   /// for the given \p DataType and kind of access to \p Ptr.
1486   bool isLegalMaskedStore(Type *DataType, Value *Ptr, Align Alignment) {
1487     return Legal->isConsecutivePtr(Ptr) &&
1488            TTI.isLegalMaskedStore(DataType, Alignment);
1489   }
1490 
1491   /// Returns true if the target machine supports masked load operation
1492   /// for the given \p DataType and kind of access to \p Ptr.
1493   bool isLegalMaskedLoad(Type *DataType, Value *Ptr, Align Alignment) {
1494     return Legal->isConsecutivePtr(Ptr) &&
1495            TTI.isLegalMaskedLoad(DataType, Alignment);
1496   }
1497 
1498   /// Returns true if the target machine supports masked scatter operation
1499   /// for the given \p DataType.
1500   bool isLegalMaskedScatter(Type *DataType, Align Alignment) {
1501     return TTI.isLegalMaskedScatter(DataType, Alignment);
1502   }
1503 
1504   /// Returns true if the target machine supports masked gather operation
1505   /// for the given \p DataType.
1506   bool isLegalMaskedGather(Type *DataType, Align Alignment) {
1507     return TTI.isLegalMaskedGather(DataType, Alignment);
1508   }
1509 
1510   /// Returns true if the target machine can represent \p V as a masked gather
1511   /// or scatter operation.
1512   bool isLegalGatherOrScatter(Value *V) {
1513     bool LI = isa<LoadInst>(V);
1514     bool SI = isa<StoreInst>(V);
1515     if (!LI && !SI)
1516       return false;
1517     auto *Ty = getMemInstValueType(V);
1518     Align Align = getLoadStoreAlignment(V);
1519     return (LI && isLegalMaskedGather(Ty, Align)) ||
1520            (SI && isLegalMaskedScatter(Ty, Align));
1521   }
1522 
1523   /// Returns true if \p I is an instruction that will be scalarized with
1524   /// predication. Such instructions include conditional stores and
1525   /// instructions that may divide by zero.
1526   /// If a non-zero VF has been calculated, we check if I will be scalarized
1527   /// predication for that VF.
1528   bool isScalarWithPredication(Instruction *I,
1529                                ElementCount VF = ElementCount::getFixed(1));
1530 
1531   // Returns true if \p I is an instruction that will be predicated either
1532   // through scalar predication or masked load/store or masked gather/scatter.
1533   // Superset of instructions that return true for isScalarWithPredication.
1534   bool isPredicatedInst(Instruction *I) {
1535     if (!blockNeedsPredication(I->getParent()))
1536       return false;
1537     // Loads and stores that need some form of masked operation are predicated
1538     // instructions.
1539     if (isa<LoadInst>(I) || isa<StoreInst>(I))
1540       return Legal->isMaskRequired(I);
1541     return isScalarWithPredication(I);
1542   }
1543 
1544   /// Returns true if \p I is a memory instruction with consecutive memory
1545   /// access that can be widened.
1546   bool
1547   memoryInstructionCanBeWidened(Instruction *I,
1548                                 ElementCount VF = ElementCount::getFixed(1));
1549 
1550   /// Returns true if \p I is a memory instruction in an interleaved-group
1551   /// of memory accesses that can be vectorized with wide vector loads/stores
1552   /// and shuffles.
1553   bool
1554   interleavedAccessCanBeWidened(Instruction *I,
1555                                 ElementCount VF = ElementCount::getFixed(1));
1556 
1557   /// Check if \p Instr belongs to any interleaved access group.
1558   bool isAccessInterleaved(Instruction *Instr) {
1559     return InterleaveInfo.isInterleaved(Instr);
1560   }
1561 
1562   /// Get the interleaved access group that \p Instr belongs to.
1563   const InterleaveGroup<Instruction> *
1564   getInterleavedAccessGroup(Instruction *Instr) {
1565     return InterleaveInfo.getInterleaveGroup(Instr);
1566   }
1567 
1568   /// Returns true if we're required to use a scalar epilogue for at least
1569   /// the final iteration of the original loop.
1570   bool requiresScalarEpilogue() const {
1571     if (!isScalarEpilogueAllowed())
1572       return false;
1573     // If we might exit from anywhere but the latch, must run the exiting
1574     // iteration in scalar form.
1575     if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch())
1576       return true;
1577     return InterleaveInfo.requiresScalarEpilogue();
1578   }
1579 
1580   /// Returns true if a scalar epilogue is not allowed due to optsize or a
1581   /// loop hint annotation.
1582   bool isScalarEpilogueAllowed() const {
1583     return ScalarEpilogueStatus == CM_ScalarEpilogueAllowed;
1584   }
1585 
1586   /// Returns true if all loop blocks should be masked to fold tail loop.
1587   bool foldTailByMasking() const { return FoldTailByMasking; }
1588 
1589   bool blockNeedsPredication(BasicBlock *BB) {
1590     return foldTailByMasking() || Legal->blockNeedsPredication(BB);
1591   }
1592 
1593   /// A SmallMapVector to store the InLoop reduction op chains, mapping phi
1594   /// nodes to the chain of instructions representing the reductions. Uses a
1595   /// MapVector to ensure deterministic iteration order.
1596   using ReductionChainMap =
1597       SmallMapVector<PHINode *, SmallVector<Instruction *, 4>, 4>;
1598 
1599   /// Return the chain of instructions representing an inloop reduction.
1600   const ReductionChainMap &getInLoopReductionChains() const {
1601     return InLoopReductionChains;
1602   }
1603 
1604   /// Returns true if the Phi is part of an inloop reduction.
1605   bool isInLoopReduction(PHINode *Phi) const {
1606     return InLoopReductionChains.count(Phi);
1607   }
1608 
1609   /// Estimate cost of an intrinsic call instruction CI if it were vectorized
1610   /// with factor VF.  Return the cost of the instruction, including
1611   /// scalarization overhead if it's needed.
1612   InstructionCost getVectorIntrinsicCost(CallInst *CI, ElementCount VF);
1613 
1614   /// Estimate cost of a call instruction CI if it were vectorized with factor
1615   /// VF. Return the cost of the instruction, including scalarization overhead
1616   /// if it's needed. The flag NeedToScalarize shows if the call needs to be
1617   /// scalarized -
1618   /// i.e. either vector version isn't available, or is too expensive.
1619   InstructionCost getVectorCallCost(CallInst *CI, ElementCount VF,
1620                                     bool &NeedToScalarize);
1621 
1622   /// Invalidates decisions already taken by the cost model.
1623   void invalidateCostModelingDecisions() {
1624     WideningDecisions.clear();
1625     Uniforms.clear();
1626     Scalars.clear();
1627   }
1628 
1629 private:
1630   unsigned NumPredStores = 0;
1631 
1632   /// \return An upper bound for the vectorization factor, a power-of-2 larger
1633   /// than zero. One is returned if vectorization should best be avoided due
1634   /// to cost.
1635   ElementCount computeFeasibleMaxVF(unsigned ConstTripCount,
1636                                     ElementCount UserVF);
1637 
1638   /// The vectorization cost is a combination of the cost itself and a boolean
1639   /// indicating whether any of the contributing operations will actually
1640   /// operate on
1641   /// vector values after type legalization in the backend. If this latter value
1642   /// is
1643   /// false, then all operations will be scalarized (i.e. no vectorization has
1644   /// actually taken place).
1645   using VectorizationCostTy = std::pair<InstructionCost, bool>;
1646 
1647   /// Returns the expected execution cost. The unit of the cost does
1648   /// not matter because we use the 'cost' units to compare different
1649   /// vector widths. The cost that is returned is *not* normalized by
1650   /// the factor width.
1651   VectorizationCostTy expectedCost(ElementCount VF);
1652 
1653   /// Returns the execution time cost of an instruction for a given vector
1654   /// width. Vector width of one means scalar.
1655   VectorizationCostTy getInstructionCost(Instruction *I, ElementCount VF);
1656 
1657   /// The cost-computation logic from getInstructionCost which provides
1658   /// the vector type as an output parameter.
1659   InstructionCost getInstructionCost(Instruction *I, ElementCount VF,
1660                                      Type *&VectorTy);
1661 
1662   /// Return the cost of instructions in an inloop reduction pattern, if I is
1663   /// part of that pattern.
1664   InstructionCost getReductionPatternCost(Instruction *I, ElementCount VF,
1665                                           Type *VectorTy,
1666                                           TTI::TargetCostKind CostKind);
1667 
1668   /// Calculate vectorization cost of memory instruction \p I.
1669   InstructionCost getMemoryInstructionCost(Instruction *I, ElementCount VF);
1670 
1671   /// The cost computation for scalarized memory instruction.
1672   InstructionCost getMemInstScalarizationCost(Instruction *I, ElementCount VF);
1673 
1674   /// The cost computation for interleaving group of memory instructions.
1675   InstructionCost getInterleaveGroupCost(Instruction *I, ElementCount VF);
1676 
1677   /// The cost computation for Gather/Scatter instruction.
1678   InstructionCost getGatherScatterCost(Instruction *I, ElementCount VF);
1679 
1680   /// The cost computation for widening instruction \p I with consecutive
1681   /// memory access.
1682   InstructionCost getConsecutiveMemOpCost(Instruction *I, ElementCount VF);
1683 
1684   /// The cost calculation for Load/Store instruction \p I with uniform pointer -
1685   /// Load: scalar load + broadcast.
1686   /// Store: scalar store + (loop invariant value stored? 0 : extract of last
1687   /// element)
1688   InstructionCost getUniformMemOpCost(Instruction *I, ElementCount VF);
1689 
1690   /// Estimate the overhead of scalarizing an instruction. This is a
1691   /// convenience wrapper for the type-based getScalarizationOverhead API.
1692   InstructionCost getScalarizationOverhead(Instruction *I, ElementCount VF);
1693 
1694   /// Returns whether the instruction is a load or store and will be a emitted
1695   /// as a vector operation.
1696   bool isConsecutiveLoadOrStore(Instruction *I);
1697 
1698   /// Returns true if an artificially high cost for emulated masked memrefs
1699   /// should be used.
1700   bool useEmulatedMaskMemRefHack(Instruction *I);
1701 
1702   /// Map of scalar integer values to the smallest bitwidth they can be legally
1703   /// represented as. The vector equivalents of these values should be truncated
1704   /// to this type.
1705   MapVector<Instruction *, uint64_t> MinBWs;
1706 
1707   /// A type representing the costs for instructions if they were to be
1708   /// scalarized rather than vectorized. The entries are Instruction-Cost
1709   /// pairs.
1710   using ScalarCostsTy = DenseMap<Instruction *, InstructionCost>;
1711 
1712   /// A set containing all BasicBlocks that are known to present after
1713   /// vectorization as a predicated block.
1714   SmallPtrSet<BasicBlock *, 4> PredicatedBBsAfterVectorization;
1715 
1716   /// Records whether it is allowed to have the original scalar loop execute at
1717   /// least once. This may be needed as a fallback loop in case runtime
1718   /// aliasing/dependence checks fail, or to handle the tail/remainder
1719   /// iterations when the trip count is unknown or doesn't divide by the VF,
1720   /// or as a peel-loop to handle gaps in interleave-groups.
1721   /// Under optsize and when the trip count is very small we don't allow any
1722   /// iterations to execute in the scalar loop.
1723   ScalarEpilogueLowering ScalarEpilogueStatus = CM_ScalarEpilogueAllowed;
1724 
1725   /// All blocks of loop are to be masked to fold tail of scalar iterations.
1726   bool FoldTailByMasking = false;
1727 
1728   /// A map holding scalar costs for different vectorization factors. The
1729   /// presence of a cost for an instruction in the mapping indicates that the
1730   /// instruction will be scalarized when vectorizing with the associated
1731   /// vectorization factor. The entries are VF-ScalarCostTy pairs.
1732   DenseMap<ElementCount, ScalarCostsTy> InstsToScalarize;
1733 
1734   /// Holds the instructions known to be uniform after vectorization.
1735   /// The data is collected per VF.
1736   DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Uniforms;
1737 
1738   /// Holds the instructions known to be scalar after vectorization.
1739   /// The data is collected per VF.
1740   DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Scalars;
1741 
1742   /// Holds the instructions (address computations) that are forced to be
1743   /// scalarized.
1744   DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> ForcedScalars;
1745 
1746   /// PHINodes of the reductions that should be expanded in-loop along with
1747   /// their associated chains of reduction operations, in program order from top
1748   /// (PHI) to bottom
1749   ReductionChainMap InLoopReductionChains;
1750 
1751   /// A Map of inloop reduction operations and their immediate chain operand.
1752   /// FIXME: This can be removed once reductions can be costed correctly in
1753   /// vplan. This was added to allow quick lookup to the inloop operations,
1754   /// without having to loop through InLoopReductionChains.
1755   DenseMap<Instruction *, Instruction *> InLoopReductionImmediateChains;
1756 
1757   /// Returns the expected difference in cost from scalarizing the expression
1758   /// feeding a predicated instruction \p PredInst. The instructions to
1759   /// scalarize and their scalar costs are collected in \p ScalarCosts. A
1760   /// non-negative return value implies the expression will be scalarized.
1761   /// Currently, only single-use chains are considered for scalarization.
1762   int computePredInstDiscount(Instruction *PredInst, ScalarCostsTy &ScalarCosts,
1763                               ElementCount VF);
1764 
1765   /// Collect the instructions that are uniform after vectorization. An
1766   /// instruction is uniform if we represent it with a single scalar value in
1767   /// the vectorized loop corresponding to each vector iteration. Examples of
1768   /// uniform instructions include pointer operands of consecutive or
1769   /// interleaved memory accesses. Note that although uniformity implies an
1770   /// instruction will be scalar, the reverse is not true. In general, a
1771   /// scalarized instruction will be represented by VF scalar values in the
1772   /// vectorized loop, each corresponding to an iteration of the original
1773   /// scalar loop.
1774   void collectLoopUniforms(ElementCount VF);
1775 
1776   /// Collect the instructions that are scalar after vectorization. An
1777   /// instruction is scalar if it is known to be uniform or will be scalarized
1778   /// during vectorization. Non-uniform scalarized instructions will be
1779   /// represented by VF values in the vectorized loop, each corresponding to an
1780   /// iteration of the original scalar loop.
1781   void collectLoopScalars(ElementCount VF);
1782 
1783   /// Keeps cost model vectorization decision and cost for instructions.
1784   /// Right now it is used for memory instructions only.
1785   using DecisionList = DenseMap<std::pair<Instruction *, ElementCount>,
1786                                 std::pair<InstWidening, InstructionCost>>;
1787 
1788   DecisionList WideningDecisions;
1789 
1790   /// Returns true if \p V is expected to be vectorized and it needs to be
1791   /// extracted.
1792   bool needsExtract(Value *V, ElementCount VF) const {
1793     Instruction *I = dyn_cast<Instruction>(V);
1794     if (VF.isScalar() || !I || !TheLoop->contains(I) ||
1795         TheLoop->isLoopInvariant(I))
1796       return false;
1797 
1798     // Assume we can vectorize V (and hence we need extraction) if the
1799     // scalars are not computed yet. This can happen, because it is called
1800     // via getScalarizationOverhead from setCostBasedWideningDecision, before
1801     // the scalars are collected. That should be a safe assumption in most
1802     // cases, because we check if the operands have vectorizable types
1803     // beforehand in LoopVectorizationLegality.
1804     return Scalars.find(VF) == Scalars.end() ||
1805            !isScalarAfterVectorization(I, VF);
1806   };
1807 
1808   /// Returns a range containing only operands needing to be extracted.
1809   SmallVector<Value *, 4> filterExtractingOperands(Instruction::op_range Ops,
1810                                                    ElementCount VF) {
1811     return SmallVector<Value *, 4>(make_filter_range(
1812         Ops, [this, VF](Value *V) { return this->needsExtract(V, VF); }));
1813   }
1814 
1815   /// Determines if we have the infrastructure to vectorize loop \p L and its
1816   /// epilogue, assuming the main loop is vectorized by \p VF.
1817   bool isCandidateForEpilogueVectorization(const Loop &L,
1818                                            const ElementCount VF) const;
1819 
1820   /// Returns true if epilogue vectorization is considered profitable, and
1821   /// false otherwise.
1822   /// \p VF is the vectorization factor chosen for the original loop.
1823   bool isEpilogueVectorizationProfitable(const ElementCount VF) const;
1824 
1825 public:
1826   /// The loop that we evaluate.
1827   Loop *TheLoop;
1828 
1829   /// Predicated scalar evolution analysis.
1830   PredicatedScalarEvolution &PSE;
1831 
1832   /// Loop Info analysis.
1833   LoopInfo *LI;
1834 
1835   /// Vectorization legality.
1836   LoopVectorizationLegality *Legal;
1837 
1838   /// Vector target information.
1839   const TargetTransformInfo &TTI;
1840 
1841   /// Target Library Info.
1842   const TargetLibraryInfo *TLI;
1843 
1844   /// Demanded bits analysis.
1845   DemandedBits *DB;
1846 
1847   /// Assumption cache.
1848   AssumptionCache *AC;
1849 
1850   /// Interface to emit optimization remarks.
1851   OptimizationRemarkEmitter *ORE;
1852 
1853   const Function *TheFunction;
1854 
1855   /// Loop Vectorize Hint.
1856   const LoopVectorizeHints *Hints;
1857 
1858   /// The interleave access information contains groups of interleaved accesses
1859   /// with the same stride and close to each other.
1860   InterleavedAccessInfo &InterleaveInfo;
1861 
1862   /// Values to ignore in the cost model.
1863   SmallPtrSet<const Value *, 16> ValuesToIgnore;
1864 
1865   /// Values to ignore in the cost model when VF > 1.
1866   SmallPtrSet<const Value *, 16> VecValuesToIgnore;
1867 
1868   /// Profitable vector factors.
1869   SmallVector<VectorizationFactor, 8> ProfitableVFs;
1870 };
1871 
1872 } // end namespace llvm
1873 
1874 // Return true if \p OuterLp is an outer loop annotated with hints for explicit
1875 // vectorization. The loop needs to be annotated with #pragma omp simd
1876 // simdlen(#) or #pragma clang vectorize(enable) vectorize_width(#). If the
1877 // vector length information is not provided, vectorization is not considered
1878 // explicit. Interleave hints are not allowed either. These limitations will be
1879 // relaxed in the future.
1880 // Please, note that we are currently forced to abuse the pragma 'clang
1881 // vectorize' semantics. This pragma provides *auto-vectorization hints*
1882 // (i.e., LV must check that vectorization is legal) whereas pragma 'omp simd'
1883 // provides *explicit vectorization hints* (LV can bypass legal checks and
1884 // assume that vectorization is legal). However, both hints are implemented
1885 // using the same metadata (llvm.loop.vectorize, processed by
1886 // LoopVectorizeHints). This will be fixed in the future when the native IR
1887 // representation for pragma 'omp simd' is introduced.
1888 static bool isExplicitVecOuterLoop(Loop *OuterLp,
1889                                    OptimizationRemarkEmitter *ORE) {
1890   assert(!OuterLp->isInnermost() && "This is not an outer loop");
1891   LoopVectorizeHints Hints(OuterLp, true /*DisableInterleaving*/, *ORE);
1892 
1893   // Only outer loops with an explicit vectorization hint are supported.
1894   // Unannotated outer loops are ignored.
1895   if (Hints.getForce() == LoopVectorizeHints::FK_Undefined)
1896     return false;
1897 
1898   Function *Fn = OuterLp->getHeader()->getParent();
1899   if (!Hints.allowVectorization(Fn, OuterLp,
1900                                 true /*VectorizeOnlyWhenForced*/)) {
1901     LLVM_DEBUG(dbgs() << "LV: Loop hints prevent outer loop vectorization.\n");
1902     return false;
1903   }
1904 
1905   if (Hints.getInterleave() > 1) {
1906     // TODO: Interleave support is future work.
1907     LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Interleave is not supported for "
1908                          "outer loops.\n");
1909     Hints.emitRemarkWithHints();
1910     return false;
1911   }
1912 
1913   return true;
1914 }
1915 
1916 static void collectSupportedLoops(Loop &L, LoopInfo *LI,
1917                                   OptimizationRemarkEmitter *ORE,
1918                                   SmallVectorImpl<Loop *> &V) {
1919   // Collect inner loops and outer loops without irreducible control flow. For
1920   // now, only collect outer loops that have explicit vectorization hints. If we
1921   // are stress testing the VPlan H-CFG construction, we collect the outermost
1922   // loop of every loop nest.
1923   if (L.isInnermost() || VPlanBuildStressTest ||
1924       (EnableVPlanNativePath && isExplicitVecOuterLoop(&L, ORE))) {
1925     LoopBlocksRPO RPOT(&L);
1926     RPOT.perform(LI);
1927     if (!containsIrreducibleCFG<const BasicBlock *>(RPOT, *LI)) {
1928       V.push_back(&L);
1929       // TODO: Collect inner loops inside marked outer loops in case
1930       // vectorization fails for the outer loop. Do not invoke
1931       // 'containsIrreducibleCFG' again for inner loops when the outer loop is
1932       // already known to be reducible. We can use an inherited attribute for
1933       // that.
1934       return;
1935     }
1936   }
1937   for (Loop *InnerL : L)
1938     collectSupportedLoops(*InnerL, LI, ORE, V);
1939 }
1940 
1941 namespace {
1942 
1943 /// The LoopVectorize Pass.
1944 struct LoopVectorize : public FunctionPass {
1945   /// Pass identification, replacement for typeid
1946   static char ID;
1947 
1948   LoopVectorizePass Impl;
1949 
1950   explicit LoopVectorize(bool InterleaveOnlyWhenForced = false,
1951                          bool VectorizeOnlyWhenForced = false)
1952       : FunctionPass(ID),
1953         Impl({InterleaveOnlyWhenForced, VectorizeOnlyWhenForced}) {
1954     initializeLoopVectorizePass(*PassRegistry::getPassRegistry());
1955   }
1956 
1957   bool runOnFunction(Function &F) override {
1958     if (skipFunction(F))
1959       return false;
1960 
1961     auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
1962     auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
1963     auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
1964     auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
1965     auto *BFI = &getAnalysis<BlockFrequencyInfoWrapperPass>().getBFI();
1966     auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
1967     auto *TLI = TLIP ? &TLIP->getTLI(F) : nullptr;
1968     auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
1969     auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
1970     auto *LAA = &getAnalysis<LoopAccessLegacyAnalysis>();
1971     auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits();
1972     auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE();
1973     auto *PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI();
1974 
1975     std::function<const LoopAccessInfo &(Loop &)> GetLAA =
1976         [&](Loop &L) -> const LoopAccessInfo & { return LAA->getInfo(&L); };
1977 
1978     return Impl.runImpl(F, *SE, *LI, *TTI, *DT, *BFI, TLI, *DB, *AA, *AC,
1979                         GetLAA, *ORE, PSI).MadeAnyChange;
1980   }
1981 
1982   void getAnalysisUsage(AnalysisUsage &AU) const override {
1983     AU.addRequired<AssumptionCacheTracker>();
1984     AU.addRequired<BlockFrequencyInfoWrapperPass>();
1985     AU.addRequired<DominatorTreeWrapperPass>();
1986     AU.addRequired<LoopInfoWrapperPass>();
1987     AU.addRequired<ScalarEvolutionWrapperPass>();
1988     AU.addRequired<TargetTransformInfoWrapperPass>();
1989     AU.addRequired<AAResultsWrapperPass>();
1990     AU.addRequired<LoopAccessLegacyAnalysis>();
1991     AU.addRequired<DemandedBitsWrapperPass>();
1992     AU.addRequired<OptimizationRemarkEmitterWrapperPass>();
1993     AU.addRequired<InjectTLIMappingsLegacy>();
1994 
1995     // We currently do not preserve loopinfo/dominator analyses with outer loop
1996     // vectorization. Until this is addressed, mark these analyses as preserved
1997     // only for non-VPlan-native path.
1998     // TODO: Preserve Loop and Dominator analyses for VPlan-native path.
1999     if (!EnableVPlanNativePath) {
2000       AU.addPreserved<LoopInfoWrapperPass>();
2001       AU.addPreserved<DominatorTreeWrapperPass>();
2002     }
2003 
2004     AU.addPreserved<BasicAAWrapperPass>();
2005     AU.addPreserved<GlobalsAAWrapperPass>();
2006     AU.addRequired<ProfileSummaryInfoWrapperPass>();
2007   }
2008 };
2009 
2010 } // end anonymous namespace
2011 
2012 //===----------------------------------------------------------------------===//
2013 // Implementation of LoopVectorizationLegality, InnerLoopVectorizer and
2014 // LoopVectorizationCostModel and LoopVectorizationPlanner.
2015 //===----------------------------------------------------------------------===//
2016 
2017 Value *InnerLoopVectorizer::getBroadcastInstrs(Value *V) {
2018   // We need to place the broadcast of invariant variables outside the loop,
2019   // but only if it's proven safe to do so. Else, broadcast will be inside
2020   // vector loop body.
2021   Instruction *Instr = dyn_cast<Instruction>(V);
2022   bool SafeToHoist = OrigLoop->isLoopInvariant(V) &&
2023                      (!Instr ||
2024                       DT->dominates(Instr->getParent(), LoopVectorPreHeader));
2025   // Place the code for broadcasting invariant variables in the new preheader.
2026   IRBuilder<>::InsertPointGuard Guard(Builder);
2027   if (SafeToHoist)
2028     Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
2029 
2030   // Broadcast the scalar into all locations in the vector.
2031   Value *Shuf = Builder.CreateVectorSplat(VF, V, "broadcast");
2032 
2033   return Shuf;
2034 }
2035 
2036 void InnerLoopVectorizer::createVectorIntOrFpInductionPHI(
2037     const InductionDescriptor &II, Value *Step, Value *Start,
2038     Instruction *EntryVal, VPValue *Def, VPValue *CastDef,
2039     VPTransformState &State) {
2040   assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) &&
2041          "Expected either an induction phi-node or a truncate of it!");
2042 
2043   // Construct the initial value of the vector IV in the vector loop preheader
2044   auto CurrIP = Builder.saveIP();
2045   Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
2046   if (isa<TruncInst>(EntryVal)) {
2047     assert(Start->getType()->isIntegerTy() &&
2048            "Truncation requires an integer type");
2049     auto *TruncType = cast<IntegerType>(EntryVal->getType());
2050     Step = Builder.CreateTrunc(Step, TruncType);
2051     Start = Builder.CreateCast(Instruction::Trunc, Start, TruncType);
2052   }
2053   Value *SplatStart = Builder.CreateVectorSplat(VF, Start);
2054   Value *SteppedStart =
2055       getStepVector(SplatStart, 0, Step, II.getInductionOpcode());
2056 
2057   // We create vector phi nodes for both integer and floating-point induction
2058   // variables. Here, we determine the kind of arithmetic we will perform.
2059   Instruction::BinaryOps AddOp;
2060   Instruction::BinaryOps MulOp;
2061   if (Step->getType()->isIntegerTy()) {
2062     AddOp = Instruction::Add;
2063     MulOp = Instruction::Mul;
2064   } else {
2065     AddOp = II.getInductionOpcode();
2066     MulOp = Instruction::FMul;
2067   }
2068 
2069   // Multiply the vectorization factor by the step using integer or
2070   // floating-point arithmetic as appropriate.
2071   Value *ConstVF =
2072       getSignedIntOrFpConstant(Step->getType(), VF.getKnownMinValue());
2073   Value *Mul = addFastMathFlag(Builder.CreateBinOp(MulOp, Step, ConstVF));
2074 
2075   // Create a vector splat to use in the induction update.
2076   //
2077   // FIXME: If the step is non-constant, we create the vector splat with
2078   //        IRBuilder. IRBuilder can constant-fold the multiply, but it doesn't
2079   //        handle a constant vector splat.
2080   assert(!VF.isScalable() && "scalable vectors not yet supported.");
2081   Value *SplatVF = isa<Constant>(Mul)
2082                        ? ConstantVector::getSplat(VF, cast<Constant>(Mul))
2083                        : Builder.CreateVectorSplat(VF, Mul);
2084   Builder.restoreIP(CurrIP);
2085 
2086   // We may need to add the step a number of times, depending on the unroll
2087   // factor. The last of those goes into the PHI.
2088   PHINode *VecInd = PHINode::Create(SteppedStart->getType(), 2, "vec.ind",
2089                                     &*LoopVectorBody->getFirstInsertionPt());
2090   VecInd->setDebugLoc(EntryVal->getDebugLoc());
2091   Instruction *LastInduction = VecInd;
2092   for (unsigned Part = 0; Part < UF; ++Part) {
2093     State.set(Def, EntryVal, LastInduction, Part);
2094 
2095     if (isa<TruncInst>(EntryVal))
2096       addMetadata(LastInduction, EntryVal);
2097     recordVectorLoopValueForInductionCast(II, EntryVal, LastInduction, CastDef,
2098                                           State, Part);
2099 
2100     LastInduction = cast<Instruction>(addFastMathFlag(
2101         Builder.CreateBinOp(AddOp, LastInduction, SplatVF, "step.add")));
2102     LastInduction->setDebugLoc(EntryVal->getDebugLoc());
2103   }
2104 
2105   // Move the last step to the end of the latch block. This ensures consistent
2106   // placement of all induction updates.
2107   auto *LoopVectorLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch();
2108   auto *Br = cast<BranchInst>(LoopVectorLatch->getTerminator());
2109   auto *ICmp = cast<Instruction>(Br->getCondition());
2110   LastInduction->moveBefore(ICmp);
2111   LastInduction->setName("vec.ind.next");
2112 
2113   VecInd->addIncoming(SteppedStart, LoopVectorPreHeader);
2114   VecInd->addIncoming(LastInduction, LoopVectorLatch);
2115 }
2116 
2117 bool InnerLoopVectorizer::shouldScalarizeInstruction(Instruction *I) const {
2118   return Cost->isScalarAfterVectorization(I, VF) ||
2119          Cost->isProfitableToScalarize(I, VF);
2120 }
2121 
2122 bool InnerLoopVectorizer::needsScalarInduction(Instruction *IV) const {
2123   if (shouldScalarizeInstruction(IV))
2124     return true;
2125   auto isScalarInst = [&](User *U) -> bool {
2126     auto *I = cast<Instruction>(U);
2127     return (OrigLoop->contains(I) && shouldScalarizeInstruction(I));
2128   };
2129   return llvm::any_of(IV->users(), isScalarInst);
2130 }
2131 
2132 void InnerLoopVectorizer::recordVectorLoopValueForInductionCast(
2133     const InductionDescriptor &ID, const Instruction *EntryVal,
2134     Value *VectorLoopVal, VPValue *CastDef, VPTransformState &State,
2135     unsigned Part, unsigned Lane) {
2136   assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) &&
2137          "Expected either an induction phi-node or a truncate of it!");
2138 
2139   // This induction variable is not the phi from the original loop but the
2140   // newly-created IV based on the proof that casted Phi is equal to the
2141   // uncasted Phi in the vectorized loop (under a runtime guard possibly). It
2142   // re-uses the same InductionDescriptor that original IV uses but we don't
2143   // have to do any recording in this case - that is done when original IV is
2144   // processed.
2145   if (isa<TruncInst>(EntryVal))
2146     return;
2147 
2148   const SmallVectorImpl<Instruction *> &Casts = ID.getCastInsts();
2149   if (Casts.empty())
2150     return;
2151   // Only the first Cast instruction in the Casts vector is of interest.
2152   // The rest of the Casts (if exist) have no uses outside the
2153   // induction update chain itself.
2154   if (Lane < UINT_MAX)
2155     State.set(CastDef, VectorLoopVal, VPIteration(Part, Lane));
2156   else
2157     State.set(CastDef, VectorLoopVal, Part);
2158 }
2159 
2160 void InnerLoopVectorizer::widenIntOrFpInduction(PHINode *IV, Value *Start,
2161                                                 TruncInst *Trunc, VPValue *Def,
2162                                                 VPValue *CastDef,
2163                                                 VPTransformState &State) {
2164   assert((IV->getType()->isIntegerTy() || IV != OldInduction) &&
2165          "Primary induction variable must have an integer type");
2166 
2167   auto II = Legal->getInductionVars().find(IV);
2168   assert(II != Legal->getInductionVars().end() && "IV is not an induction");
2169 
2170   auto ID = II->second;
2171   assert(IV->getType() == ID.getStartValue()->getType() && "Types must match");
2172 
2173   // The value from the original loop to which we are mapping the new induction
2174   // variable.
2175   Instruction *EntryVal = Trunc ? cast<Instruction>(Trunc) : IV;
2176 
2177   auto &DL = OrigLoop->getHeader()->getModule()->getDataLayout();
2178 
2179   // Generate code for the induction step. Note that induction steps are
2180   // required to be loop-invariant
2181   auto CreateStepValue = [&](const SCEV *Step) -> Value * {
2182     assert(PSE.getSE()->isLoopInvariant(Step, OrigLoop) &&
2183            "Induction step should be loop invariant");
2184     if (PSE.getSE()->isSCEVable(IV->getType())) {
2185       SCEVExpander Exp(*PSE.getSE(), DL, "induction");
2186       return Exp.expandCodeFor(Step, Step->getType(),
2187                                LoopVectorPreHeader->getTerminator());
2188     }
2189     return cast<SCEVUnknown>(Step)->getValue();
2190   };
2191 
2192   // The scalar value to broadcast. This is derived from the canonical
2193   // induction variable. If a truncation type is given, truncate the canonical
2194   // induction variable and step. Otherwise, derive these values from the
2195   // induction descriptor.
2196   auto CreateScalarIV = [&](Value *&Step) -> Value * {
2197     Value *ScalarIV = Induction;
2198     if (IV != OldInduction) {
2199       ScalarIV = IV->getType()->isIntegerTy()
2200                      ? Builder.CreateSExtOrTrunc(Induction, IV->getType())
2201                      : Builder.CreateCast(Instruction::SIToFP, Induction,
2202                                           IV->getType());
2203       ScalarIV = emitTransformedIndex(Builder, ScalarIV, PSE.getSE(), DL, ID);
2204       ScalarIV->setName("offset.idx");
2205     }
2206     if (Trunc) {
2207       auto *TruncType = cast<IntegerType>(Trunc->getType());
2208       assert(Step->getType()->isIntegerTy() &&
2209              "Truncation requires an integer step");
2210       ScalarIV = Builder.CreateTrunc(ScalarIV, TruncType);
2211       Step = Builder.CreateTrunc(Step, TruncType);
2212     }
2213     return ScalarIV;
2214   };
2215 
2216   // Create the vector values from the scalar IV, in the absence of creating a
2217   // vector IV.
2218   auto CreateSplatIV = [&](Value *ScalarIV, Value *Step) {
2219     Value *Broadcasted = getBroadcastInstrs(ScalarIV);
2220     for (unsigned Part = 0; Part < UF; ++Part) {
2221       assert(!VF.isScalable() && "scalable vectors not yet supported.");
2222       Value *EntryPart =
2223           getStepVector(Broadcasted, VF.getKnownMinValue() * Part, Step,
2224                         ID.getInductionOpcode());
2225       State.set(Def, EntryVal, EntryPart, Part);
2226       if (Trunc)
2227         addMetadata(EntryPart, Trunc);
2228       recordVectorLoopValueForInductionCast(ID, EntryVal, EntryPart, CastDef,
2229                                             State, Part);
2230     }
2231   };
2232 
2233   // Now do the actual transformations, and start with creating the step value.
2234   Value *Step = CreateStepValue(ID.getStep());
2235   if (VF.isZero() || VF.isScalar()) {
2236     Value *ScalarIV = CreateScalarIV(Step);
2237     CreateSplatIV(ScalarIV, Step);
2238     return;
2239   }
2240 
2241   // Determine if we want a scalar version of the induction variable. This is
2242   // true if the induction variable itself is not widened, or if it has at
2243   // least one user in the loop that is not widened.
2244   auto NeedsScalarIV = needsScalarInduction(EntryVal);
2245   if (!NeedsScalarIV) {
2246     createVectorIntOrFpInductionPHI(ID, Step, Start, EntryVal, Def, CastDef,
2247                                     State);
2248     return;
2249   }
2250 
2251   // Try to create a new independent vector induction variable. If we can't
2252   // create the phi node, we will splat the scalar induction variable in each
2253   // loop iteration.
2254   if (!shouldScalarizeInstruction(EntryVal)) {
2255     createVectorIntOrFpInductionPHI(ID, Step, Start, EntryVal, Def, CastDef,
2256                                     State);
2257     Value *ScalarIV = CreateScalarIV(Step);
2258     // Create scalar steps that can be used by instructions we will later
2259     // scalarize. Note that the addition of the scalar steps will not increase
2260     // the number of instructions in the loop in the common case prior to
2261     // InstCombine. We will be trading one vector extract for each scalar step.
2262     buildScalarSteps(ScalarIV, Step, EntryVal, ID, Def, CastDef, State);
2263     return;
2264   }
2265 
2266   // All IV users are scalar instructions, so only emit a scalar IV, not a
2267   // vectorised IV. Except when we tail-fold, then the splat IV feeds the
2268   // predicate used by the masked loads/stores.
2269   Value *ScalarIV = CreateScalarIV(Step);
2270   if (!Cost->isScalarEpilogueAllowed())
2271     CreateSplatIV(ScalarIV, Step);
2272   buildScalarSteps(ScalarIV, Step, EntryVal, ID, Def, CastDef, State);
2273 }
2274 
2275 Value *InnerLoopVectorizer::getStepVector(Value *Val, int StartIdx, Value *Step,
2276                                           Instruction::BinaryOps BinOp) {
2277   // Create and check the types.
2278   auto *ValVTy = cast<FixedVectorType>(Val->getType());
2279   int VLen = ValVTy->getNumElements();
2280 
2281   Type *STy = Val->getType()->getScalarType();
2282   assert((STy->isIntegerTy() || STy->isFloatingPointTy()) &&
2283          "Induction Step must be an integer or FP");
2284   assert(Step->getType() == STy && "Step has wrong type");
2285 
2286   SmallVector<Constant *, 8> Indices;
2287 
2288   if (STy->isIntegerTy()) {
2289     // Create a vector of consecutive numbers from zero to VF.
2290     for (int i = 0; i < VLen; ++i)
2291       Indices.push_back(ConstantInt::get(STy, StartIdx + i));
2292 
2293     // Add the consecutive indices to the vector value.
2294     Constant *Cv = ConstantVector::get(Indices);
2295     assert(Cv->getType() == Val->getType() && "Invalid consecutive vec");
2296     Step = Builder.CreateVectorSplat(VLen, Step);
2297     assert(Step->getType() == Val->getType() && "Invalid step vec");
2298     // FIXME: The newly created binary instructions should contain nsw/nuw flags,
2299     // which can be found from the original scalar operations.
2300     Step = Builder.CreateMul(Cv, Step);
2301     return Builder.CreateAdd(Val, Step, "induction");
2302   }
2303 
2304   // Floating point induction.
2305   assert((BinOp == Instruction::FAdd || BinOp == Instruction::FSub) &&
2306          "Binary Opcode should be specified for FP induction");
2307   // Create a vector of consecutive numbers from zero to VF.
2308   for (int i = 0; i < VLen; ++i)
2309     Indices.push_back(ConstantFP::get(STy, (double)(StartIdx + i)));
2310 
2311   // Add the consecutive indices to the vector value.
2312   Constant *Cv = ConstantVector::get(Indices);
2313 
2314   Step = Builder.CreateVectorSplat(VLen, Step);
2315 
2316   // Floating point operations had to be 'fast' to enable the induction.
2317   FastMathFlags Flags;
2318   Flags.setFast();
2319 
2320   Value *MulOp = Builder.CreateFMul(Cv, Step);
2321   if (isa<Instruction>(MulOp))
2322     // Have to check, MulOp may be a constant
2323     cast<Instruction>(MulOp)->setFastMathFlags(Flags);
2324 
2325   Value *BOp = Builder.CreateBinOp(BinOp, Val, MulOp, "induction");
2326   if (isa<Instruction>(BOp))
2327     cast<Instruction>(BOp)->setFastMathFlags(Flags);
2328   return BOp;
2329 }
2330 
2331 void InnerLoopVectorizer::buildScalarSteps(Value *ScalarIV, Value *Step,
2332                                            Instruction *EntryVal,
2333                                            const InductionDescriptor &ID,
2334                                            VPValue *Def, VPValue *CastDef,
2335                                            VPTransformState &State) {
2336   // We shouldn't have to build scalar steps if we aren't vectorizing.
2337   assert(VF.isVector() && "VF should be greater than one");
2338   // Get the value type and ensure it and the step have the same integer type.
2339   Type *ScalarIVTy = ScalarIV->getType()->getScalarType();
2340   assert(ScalarIVTy == Step->getType() &&
2341          "Val and Step should have the same type");
2342 
2343   // We build scalar steps for both integer and floating-point induction
2344   // variables. Here, we determine the kind of arithmetic we will perform.
2345   Instruction::BinaryOps AddOp;
2346   Instruction::BinaryOps MulOp;
2347   if (ScalarIVTy->isIntegerTy()) {
2348     AddOp = Instruction::Add;
2349     MulOp = Instruction::Mul;
2350   } else {
2351     AddOp = ID.getInductionOpcode();
2352     MulOp = Instruction::FMul;
2353   }
2354 
2355   // Determine the number of scalars we need to generate for each unroll
2356   // iteration. If EntryVal is uniform, we only need to generate the first
2357   // lane. Otherwise, we generate all VF values.
2358   unsigned Lanes =
2359       Cost->isUniformAfterVectorization(cast<Instruction>(EntryVal), VF)
2360           ? 1
2361           : VF.getKnownMinValue();
2362   assert((!VF.isScalable() || Lanes == 1) &&
2363          "Should never scalarize a scalable vector");
2364   // Compute the scalar steps and save the results in VectorLoopValueMap.
2365   for (unsigned Part = 0; Part < UF; ++Part) {
2366     for (unsigned Lane = 0; Lane < Lanes; ++Lane) {
2367       auto *IntStepTy = IntegerType::get(ScalarIVTy->getContext(),
2368                                          ScalarIVTy->getScalarSizeInBits());
2369       Value *StartIdx =
2370           createStepForVF(Builder, ConstantInt::get(IntStepTy, Part), VF);
2371       if (ScalarIVTy->isFloatingPointTy())
2372         StartIdx = Builder.CreateSIToFP(StartIdx, ScalarIVTy);
2373       StartIdx = addFastMathFlag(Builder.CreateBinOp(
2374           AddOp, StartIdx, getSignedIntOrFpConstant(ScalarIVTy, Lane)));
2375       // The step returned by `createStepForVF` is a runtime-evaluated value
2376       // when VF is scalable. Otherwise, it should be folded into a Constant.
2377       assert((VF.isScalable() || isa<Constant>(StartIdx)) &&
2378              "Expected StartIdx to be folded to a constant when VF is not "
2379              "scalable");
2380       auto *Mul = addFastMathFlag(Builder.CreateBinOp(MulOp, StartIdx, Step));
2381       auto *Add = addFastMathFlag(Builder.CreateBinOp(AddOp, ScalarIV, Mul));
2382       State.set(Def, Add, VPIteration(Part, Lane));
2383       recordVectorLoopValueForInductionCast(ID, EntryVal, Add, CastDef, State,
2384                                             Part, Lane);
2385     }
2386   }
2387 }
2388 
2389 Value *InnerLoopVectorizer::getOrCreateVectorValue(Value *V, unsigned Part) {
2390   assert(V != Induction && "The new induction variable should not be used.");
2391   assert(!V->getType()->isVectorTy() && "Can't widen a vector");
2392   assert(!V->getType()->isVoidTy() && "Type does not produce a value");
2393 
2394   // If we have a stride that is replaced by one, do it here. Defer this for
2395   // the VPlan-native path until we start running Legal checks in that path.
2396   if (!EnableVPlanNativePath && Legal->hasStride(V))
2397     V = ConstantInt::get(V->getType(), 1);
2398 
2399   // If we have a vector mapped to this value, return it.
2400   if (VectorLoopValueMap.hasVectorValue(V, Part))
2401     return VectorLoopValueMap.getVectorValue(V, Part);
2402 
2403   // If the value has not been vectorized, check if it has been scalarized
2404   // instead. If it has been scalarized, and we actually need the value in
2405   // vector form, we will construct the vector values on demand.
2406   if (VectorLoopValueMap.hasAnyScalarValue(V)) {
2407     Value *ScalarValue =
2408         VectorLoopValueMap.getScalarValue(V, VPIteration(Part, 0));
2409 
2410     // If we've scalarized a value, that value should be an instruction.
2411     auto *I = cast<Instruction>(V);
2412 
2413     // If we aren't vectorizing, we can just copy the scalar map values over to
2414     // the vector map.
2415     if (VF.isScalar()) {
2416       VectorLoopValueMap.setVectorValue(V, Part, ScalarValue);
2417       return ScalarValue;
2418     }
2419 
2420     // Get the last scalar instruction we generated for V and Part. If the value
2421     // is known to be uniform after vectorization, this corresponds to lane zero
2422     // of the Part unroll iteration. Otherwise, the last instruction is the one
2423     // we created for the last vector lane of the Part unroll iteration.
2424     unsigned LastLane = Cost->isUniformAfterVectorization(I, VF)
2425                             ? 0
2426                             : VF.getKnownMinValue() - 1;
2427     assert((!VF.isScalable() || LastLane == 0) &&
2428            "Scalable vectorization can't lead to any scalarized values.");
2429     auto *LastInst = cast<Instruction>(
2430         VectorLoopValueMap.getScalarValue(V, VPIteration(Part, LastLane)));
2431 
2432     // Set the insert point after the last scalarized instruction. This ensures
2433     // the insertelement sequence will directly follow the scalar definitions.
2434     auto OldIP = Builder.saveIP();
2435     auto NewIP = std::next(BasicBlock::iterator(LastInst));
2436     Builder.SetInsertPoint(&*NewIP);
2437 
2438     // However, if we are vectorizing, we need to construct the vector values.
2439     // If the value is known to be uniform after vectorization, we can just
2440     // broadcast the scalar value corresponding to lane zero for each unroll
2441     // iteration. Otherwise, we construct the vector values using insertelement
2442     // instructions. Since the resulting vectors are stored in
2443     // VectorLoopValueMap, we will only generate the insertelements once.
2444     Value *VectorValue = nullptr;
2445     if (Cost->isUniformAfterVectorization(I, VF)) {
2446       VectorValue = getBroadcastInstrs(ScalarValue);
2447       VectorLoopValueMap.setVectorValue(V, Part, VectorValue);
2448     } else {
2449       // Initialize packing with insertelements to start from poison.
2450       assert(!VF.isScalable() && "VF is assumed to be non scalable.");
2451       Value *Poison = PoisonValue::get(VectorType::get(V->getType(), VF));
2452       VectorLoopValueMap.setVectorValue(V, Part, Poison);
2453       for (unsigned Lane = 0; Lane < VF.getKnownMinValue(); ++Lane)
2454         packScalarIntoVectorValue(V, VPIteration(Part, Lane));
2455       VectorValue = VectorLoopValueMap.getVectorValue(V, Part);
2456     }
2457     Builder.restoreIP(OldIP);
2458     return VectorValue;
2459   }
2460 
2461   // If this scalar is unknown, assume that it is a constant or that it is
2462   // loop invariant. Broadcast V and save the value for future uses.
2463   Value *B = getBroadcastInstrs(V);
2464   VectorLoopValueMap.setVectorValue(V, Part, B);
2465   return B;
2466 }
2467 
2468 Value *
2469 InnerLoopVectorizer::getOrCreateScalarValue(Value *V,
2470                                             const VPIteration &Instance) {
2471   // If the value is not an instruction contained in the loop, it should
2472   // already be scalar.
2473   if (OrigLoop->isLoopInvariant(V))
2474     return V;
2475 
2476   assert(Instance.Lane > 0
2477              ? !Cost->isUniformAfterVectorization(cast<Instruction>(V), VF)
2478              : true && "Uniform values only have lane zero");
2479 
2480   // If the value from the original loop has not been vectorized, it is
2481   // represented by UF x VF scalar values in the new loop. Return the requested
2482   // scalar value.
2483   if (VectorLoopValueMap.hasScalarValue(V, Instance))
2484     return VectorLoopValueMap.getScalarValue(V, Instance);
2485 
2486   // If the value has not been scalarized, get its entry in VectorLoopValueMap
2487   // for the given unroll part. If this entry is not a vector type (i.e., the
2488   // vectorization factor is one), there is no need to generate an
2489   // extractelement instruction.
2490   auto *U = getOrCreateVectorValue(V, Instance.Part);
2491   if (!U->getType()->isVectorTy()) {
2492     assert(VF.isScalar() && "Value not scalarized has non-vector type");
2493     return U;
2494   }
2495 
2496   // Otherwise, the value from the original loop has been vectorized and is
2497   // represented by UF vector values. Extract and return the requested scalar
2498   // value from the appropriate vector lane.
2499   return Builder.CreateExtractElement(U, Builder.getInt32(Instance.Lane));
2500 }
2501 
2502 void InnerLoopVectorizer::packScalarIntoVectorValue(
2503     Value *V, const VPIteration &Instance) {
2504   assert(V != Induction && "The new induction variable should not be used.");
2505   assert(!V->getType()->isVectorTy() && "Can't pack a vector");
2506   assert(!V->getType()->isVoidTy() && "Type does not produce a value");
2507 
2508   Value *ScalarInst = VectorLoopValueMap.getScalarValue(V, Instance);
2509   Value *VectorValue = VectorLoopValueMap.getVectorValue(V, Instance.Part);
2510   VectorValue = Builder.CreateInsertElement(VectorValue, ScalarInst,
2511                                             Builder.getInt32(Instance.Lane));
2512   VectorLoopValueMap.resetVectorValue(V, Instance.Part, VectorValue);
2513 }
2514 
2515 void InnerLoopVectorizer::packScalarIntoVectorValue(VPValue *Def,
2516                                                     const VPIteration &Instance,
2517                                                     VPTransformState &State) {
2518   Value *ScalarInst = State.get(Def, Instance);
2519   Value *VectorValue = State.get(Def, Instance.Part);
2520   VectorValue = Builder.CreateInsertElement(
2521       VectorValue, ScalarInst, State.Builder.getInt32(Instance.Lane));
2522   State.set(Def, VectorValue, Instance.Part);
2523 }
2524 
2525 Value *InnerLoopVectorizer::reverseVector(Value *Vec) {
2526   assert(Vec->getType()->isVectorTy() && "Invalid type");
2527   assert(!VF.isScalable() && "Cannot reverse scalable vectors");
2528   SmallVector<int, 8> ShuffleMask;
2529   for (unsigned i = 0; i < VF.getKnownMinValue(); ++i)
2530     ShuffleMask.push_back(VF.getKnownMinValue() - i - 1);
2531 
2532   return Builder.CreateShuffleVector(Vec, ShuffleMask, "reverse");
2533 }
2534 
2535 // Return whether we allow using masked interleave-groups (for dealing with
2536 // strided loads/stores that reside in predicated blocks, or for dealing
2537 // with gaps).
2538 static bool useMaskedInterleavedAccesses(const TargetTransformInfo &TTI) {
2539   // If an override option has been passed in for interleaved accesses, use it.
2540   if (EnableMaskedInterleavedMemAccesses.getNumOccurrences() > 0)
2541     return EnableMaskedInterleavedMemAccesses;
2542 
2543   return TTI.enableMaskedInterleavedAccessVectorization();
2544 }
2545 
2546 // Try to vectorize the interleave group that \p Instr belongs to.
2547 //
2548 // E.g. Translate following interleaved load group (factor = 3):
2549 //   for (i = 0; i < N; i+=3) {
2550 //     R = Pic[i];             // Member of index 0
2551 //     G = Pic[i+1];           // Member of index 1
2552 //     B = Pic[i+2];           // Member of index 2
2553 //     ... // do something to R, G, B
2554 //   }
2555 // To:
2556 //   %wide.vec = load <12 x i32>                       ; Read 4 tuples of R,G,B
2557 //   %R.vec = shuffle %wide.vec, poison, <0, 3, 6, 9>   ; R elements
2558 //   %G.vec = shuffle %wide.vec, poison, <1, 4, 7, 10>  ; G elements
2559 //   %B.vec = shuffle %wide.vec, poison, <2, 5, 8, 11>  ; B elements
2560 //
2561 // Or translate following interleaved store group (factor = 3):
2562 //   for (i = 0; i < N; i+=3) {
2563 //     ... do something to R, G, B
2564 //     Pic[i]   = R;           // Member of index 0
2565 //     Pic[i+1] = G;           // Member of index 1
2566 //     Pic[i+2] = B;           // Member of index 2
2567 //   }
2568 // To:
2569 //   %R_G.vec = shuffle %R.vec, %G.vec, <0, 1, 2, ..., 7>
2570 //   %B_U.vec = shuffle %B.vec, poison, <0, 1, 2, 3, u, u, u, u>
2571 //   %interleaved.vec = shuffle %R_G.vec, %B_U.vec,
2572 //        <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11>    ; Interleave R,G,B elements
2573 //   store <12 x i32> %interleaved.vec              ; Write 4 tuples of R,G,B
2574 void InnerLoopVectorizer::vectorizeInterleaveGroup(
2575     const InterleaveGroup<Instruction> *Group, ArrayRef<VPValue *> VPDefs,
2576     VPTransformState &State, VPValue *Addr, ArrayRef<VPValue *> StoredValues,
2577     VPValue *BlockInMask) {
2578   Instruction *Instr = Group->getInsertPos();
2579   const DataLayout &DL = Instr->getModule()->getDataLayout();
2580 
2581   // Prepare for the vector type of the interleaved load/store.
2582   Type *ScalarTy = getMemInstValueType(Instr);
2583   unsigned InterleaveFactor = Group->getFactor();
2584   assert(!VF.isScalable() && "scalable vectors not yet supported.");
2585   auto *VecTy = VectorType::get(ScalarTy, VF * InterleaveFactor);
2586 
2587   // Prepare for the new pointers.
2588   SmallVector<Value *, 2> AddrParts;
2589   unsigned Index = Group->getIndex(Instr);
2590 
2591   // TODO: extend the masked interleaved-group support to reversed access.
2592   assert((!BlockInMask || !Group->isReverse()) &&
2593          "Reversed masked interleave-group not supported.");
2594 
2595   // If the group is reverse, adjust the index to refer to the last vector lane
2596   // instead of the first. We adjust the index from the first vector lane,
2597   // rather than directly getting the pointer for lane VF - 1, because the
2598   // pointer operand of the interleaved access is supposed to be uniform. For
2599   // uniform instructions, we're only required to generate a value for the
2600   // first vector lane in each unroll iteration.
2601   assert(!VF.isScalable() &&
2602          "scalable vector reverse operation is not implemented");
2603   if (Group->isReverse())
2604     Index += (VF.getKnownMinValue() - 1) * Group->getFactor();
2605 
2606   for (unsigned Part = 0; Part < UF; Part++) {
2607     Value *AddrPart = State.get(Addr, VPIteration(Part, 0));
2608     setDebugLocFromInst(Builder, AddrPart);
2609 
2610     // Notice current instruction could be any index. Need to adjust the address
2611     // to the member of index 0.
2612     //
2613     // E.g.  a = A[i+1];     // Member of index 1 (Current instruction)
2614     //       b = A[i];       // Member of index 0
2615     // Current pointer is pointed to A[i+1], adjust it to A[i].
2616     //
2617     // E.g.  A[i+1] = a;     // Member of index 1
2618     //       A[i]   = b;     // Member of index 0
2619     //       A[i+2] = c;     // Member of index 2 (Current instruction)
2620     // Current pointer is pointed to A[i+2], adjust it to A[i].
2621 
2622     bool InBounds = false;
2623     if (auto *gep = dyn_cast<GetElementPtrInst>(AddrPart->stripPointerCasts()))
2624       InBounds = gep->isInBounds();
2625     AddrPart = Builder.CreateGEP(ScalarTy, AddrPart, Builder.getInt32(-Index));
2626     cast<GetElementPtrInst>(AddrPart)->setIsInBounds(InBounds);
2627 
2628     // Cast to the vector pointer type.
2629     unsigned AddressSpace = AddrPart->getType()->getPointerAddressSpace();
2630     Type *PtrTy = VecTy->getPointerTo(AddressSpace);
2631     AddrParts.push_back(Builder.CreateBitCast(AddrPart, PtrTy));
2632   }
2633 
2634   setDebugLocFromInst(Builder, Instr);
2635   Value *PoisonVec = PoisonValue::get(VecTy);
2636 
2637   Value *MaskForGaps = nullptr;
2638   if (Group->requiresScalarEpilogue() && !Cost->isScalarEpilogueAllowed()) {
2639     assert(!VF.isScalable() && "scalable vectors not yet supported.");
2640     MaskForGaps = createBitMaskForGaps(Builder, VF.getKnownMinValue(), *Group);
2641     assert(MaskForGaps && "Mask for Gaps is required but it is null");
2642   }
2643 
2644   // Vectorize the interleaved load group.
2645   if (isa<LoadInst>(Instr)) {
2646     // For each unroll part, create a wide load for the group.
2647     SmallVector<Value *, 2> NewLoads;
2648     for (unsigned Part = 0; Part < UF; Part++) {
2649       Instruction *NewLoad;
2650       if (BlockInMask || MaskForGaps) {
2651         assert(useMaskedInterleavedAccesses(*TTI) &&
2652                "masked interleaved groups are not allowed.");
2653         Value *GroupMask = MaskForGaps;
2654         if (BlockInMask) {
2655           Value *BlockInMaskPart = State.get(BlockInMask, Part);
2656           assert(!VF.isScalable() && "scalable vectors not yet supported.");
2657           Value *ShuffledMask = Builder.CreateShuffleVector(
2658               BlockInMaskPart,
2659               createReplicatedMask(InterleaveFactor, VF.getKnownMinValue()),
2660               "interleaved.mask");
2661           GroupMask = MaskForGaps
2662                           ? Builder.CreateBinOp(Instruction::And, ShuffledMask,
2663                                                 MaskForGaps)
2664                           : ShuffledMask;
2665         }
2666         NewLoad =
2667             Builder.CreateMaskedLoad(AddrParts[Part], Group->getAlign(),
2668                                      GroupMask, PoisonVec, "wide.masked.vec");
2669       }
2670       else
2671         NewLoad = Builder.CreateAlignedLoad(VecTy, AddrParts[Part],
2672                                             Group->getAlign(), "wide.vec");
2673       Group->addMetadata(NewLoad);
2674       NewLoads.push_back(NewLoad);
2675     }
2676 
2677     // For each member in the group, shuffle out the appropriate data from the
2678     // wide loads.
2679     unsigned J = 0;
2680     for (unsigned I = 0; I < InterleaveFactor; ++I) {
2681       Instruction *Member = Group->getMember(I);
2682 
2683       // Skip the gaps in the group.
2684       if (!Member)
2685         continue;
2686 
2687       assert(!VF.isScalable() && "scalable vectors not yet supported.");
2688       auto StrideMask =
2689           createStrideMask(I, InterleaveFactor, VF.getKnownMinValue());
2690       for (unsigned Part = 0; Part < UF; Part++) {
2691         Value *StridedVec = Builder.CreateShuffleVector(
2692             NewLoads[Part], StrideMask, "strided.vec");
2693 
2694         // If this member has different type, cast the result type.
2695         if (Member->getType() != ScalarTy) {
2696           assert(!VF.isScalable() && "VF is assumed to be non scalable.");
2697           VectorType *OtherVTy = VectorType::get(Member->getType(), VF);
2698           StridedVec = createBitOrPointerCast(StridedVec, OtherVTy, DL);
2699         }
2700 
2701         if (Group->isReverse())
2702           StridedVec = reverseVector(StridedVec);
2703 
2704         State.set(VPDefs[J], Member, StridedVec, Part);
2705       }
2706       ++J;
2707     }
2708     return;
2709   }
2710 
2711   // The sub vector type for current instruction.
2712   assert(!VF.isScalable() && "VF is assumed to be non scalable.");
2713   auto *SubVT = VectorType::get(ScalarTy, VF);
2714 
2715   // Vectorize the interleaved store group.
2716   for (unsigned Part = 0; Part < UF; Part++) {
2717     // Collect the stored vector from each member.
2718     SmallVector<Value *, 4> StoredVecs;
2719     for (unsigned i = 0; i < InterleaveFactor; i++) {
2720       // Interleaved store group doesn't allow a gap, so each index has a member
2721       assert(Group->getMember(i) && "Fail to get a member from an interleaved store group");
2722 
2723       Value *StoredVec = State.get(StoredValues[i], Part);
2724 
2725       if (Group->isReverse())
2726         StoredVec = reverseVector(StoredVec);
2727 
2728       // If this member has different type, cast it to a unified type.
2729 
2730       if (StoredVec->getType() != SubVT)
2731         StoredVec = createBitOrPointerCast(StoredVec, SubVT, DL);
2732 
2733       StoredVecs.push_back(StoredVec);
2734     }
2735 
2736     // Concatenate all vectors into a wide vector.
2737     Value *WideVec = concatenateVectors(Builder, StoredVecs);
2738 
2739     // Interleave the elements in the wide vector.
2740     assert(!VF.isScalable() && "scalable vectors not yet supported.");
2741     Value *IVec = Builder.CreateShuffleVector(
2742         WideVec, createInterleaveMask(VF.getKnownMinValue(), InterleaveFactor),
2743         "interleaved.vec");
2744 
2745     Instruction *NewStoreInstr;
2746     if (BlockInMask) {
2747       Value *BlockInMaskPart = State.get(BlockInMask, Part);
2748       Value *ShuffledMask = Builder.CreateShuffleVector(
2749           BlockInMaskPart,
2750           createReplicatedMask(InterleaveFactor, VF.getKnownMinValue()),
2751           "interleaved.mask");
2752       NewStoreInstr = Builder.CreateMaskedStore(
2753           IVec, AddrParts[Part], Group->getAlign(), ShuffledMask);
2754     }
2755     else
2756       NewStoreInstr =
2757           Builder.CreateAlignedStore(IVec, AddrParts[Part], Group->getAlign());
2758 
2759     Group->addMetadata(NewStoreInstr);
2760   }
2761 }
2762 
2763 void InnerLoopVectorizer::vectorizeMemoryInstruction(
2764     Instruction *Instr, VPTransformState &State, VPValue *Def, VPValue *Addr,
2765     VPValue *StoredValue, VPValue *BlockInMask) {
2766   // Attempt to issue a wide load.
2767   LoadInst *LI = dyn_cast<LoadInst>(Instr);
2768   StoreInst *SI = dyn_cast<StoreInst>(Instr);
2769 
2770   assert((LI || SI) && "Invalid Load/Store instruction");
2771   assert((!SI || StoredValue) && "No stored value provided for widened store");
2772   assert((!LI || !StoredValue) && "Stored value provided for widened load");
2773 
2774   LoopVectorizationCostModel::InstWidening Decision =
2775       Cost->getWideningDecision(Instr, VF);
2776   assert((Decision == LoopVectorizationCostModel::CM_Widen ||
2777           Decision == LoopVectorizationCostModel::CM_Widen_Reverse ||
2778           Decision == LoopVectorizationCostModel::CM_GatherScatter) &&
2779          "CM decision is not to widen the memory instruction");
2780 
2781   Type *ScalarDataTy = getMemInstValueType(Instr);
2782 
2783   auto *DataTy = VectorType::get(ScalarDataTy, VF);
2784   const Align Alignment = getLoadStoreAlignment(Instr);
2785 
2786   // Determine if the pointer operand of the access is either consecutive or
2787   // reverse consecutive.
2788   bool Reverse = (Decision == LoopVectorizationCostModel::CM_Widen_Reverse);
2789   bool ConsecutiveStride =
2790       Reverse || (Decision == LoopVectorizationCostModel::CM_Widen);
2791   bool CreateGatherScatter =
2792       (Decision == LoopVectorizationCostModel::CM_GatherScatter);
2793 
2794   // Either Ptr feeds a vector load/store, or a vector GEP should feed a vector
2795   // gather/scatter. Otherwise Decision should have been to Scalarize.
2796   assert((ConsecutiveStride || CreateGatherScatter) &&
2797          "The instruction should be scalarized");
2798   (void)ConsecutiveStride;
2799 
2800   VectorParts BlockInMaskParts(UF);
2801   bool isMaskRequired = BlockInMask;
2802   if (isMaskRequired)
2803     for (unsigned Part = 0; Part < UF; ++Part)
2804       BlockInMaskParts[Part] = State.get(BlockInMask, Part);
2805 
2806   const auto CreateVecPtr = [&](unsigned Part, Value *Ptr) -> Value * {
2807     // Calculate the pointer for the specific unroll-part.
2808     GetElementPtrInst *PartPtr = nullptr;
2809 
2810     bool InBounds = false;
2811     if (auto *gep = dyn_cast<GetElementPtrInst>(Ptr->stripPointerCasts()))
2812       InBounds = gep->isInBounds();
2813 
2814     if (Reverse) {
2815       assert(!VF.isScalable() &&
2816              "Reversing vectors is not yet supported for scalable vectors.");
2817 
2818       // If the address is consecutive but reversed, then the
2819       // wide store needs to start at the last vector element.
2820       PartPtr = cast<GetElementPtrInst>(Builder.CreateGEP(
2821           ScalarDataTy, Ptr, Builder.getInt32(-Part * VF.getKnownMinValue())));
2822       PartPtr->setIsInBounds(InBounds);
2823       PartPtr = cast<GetElementPtrInst>(Builder.CreateGEP(
2824           ScalarDataTy, PartPtr, Builder.getInt32(1 - VF.getKnownMinValue())));
2825       PartPtr->setIsInBounds(InBounds);
2826       if (isMaskRequired) // Reverse of a null all-one mask is a null mask.
2827         BlockInMaskParts[Part] = reverseVector(BlockInMaskParts[Part]);
2828     } else {
2829       Value *Increment = createStepForVF(Builder, Builder.getInt32(Part), VF);
2830       PartPtr = cast<GetElementPtrInst>(
2831           Builder.CreateGEP(ScalarDataTy, Ptr, Increment));
2832       PartPtr->setIsInBounds(InBounds);
2833     }
2834 
2835     unsigned AddressSpace = Ptr->getType()->getPointerAddressSpace();
2836     return Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace));
2837   };
2838 
2839   // Handle Stores:
2840   if (SI) {
2841     setDebugLocFromInst(Builder, SI);
2842 
2843     for (unsigned Part = 0; Part < UF; ++Part) {
2844       Instruction *NewSI = nullptr;
2845       Value *StoredVal = State.get(StoredValue, Part);
2846       if (CreateGatherScatter) {
2847         Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr;
2848         Value *VectorGep = State.get(Addr, Part);
2849         NewSI = Builder.CreateMaskedScatter(StoredVal, VectorGep, Alignment,
2850                                             MaskPart);
2851       } else {
2852         if (Reverse) {
2853           // If we store to reverse consecutive memory locations, then we need
2854           // to reverse the order of elements in the stored value.
2855           StoredVal = reverseVector(StoredVal);
2856           // We don't want to update the value in the map as it might be used in
2857           // another expression. So don't call resetVectorValue(StoredVal).
2858         }
2859         auto *VecPtr = CreateVecPtr(Part, State.get(Addr, VPIteration(0, 0)));
2860         if (isMaskRequired)
2861           NewSI = Builder.CreateMaskedStore(StoredVal, VecPtr, Alignment,
2862                                             BlockInMaskParts[Part]);
2863         else
2864           NewSI = Builder.CreateAlignedStore(StoredVal, VecPtr, Alignment);
2865       }
2866       addMetadata(NewSI, SI);
2867     }
2868     return;
2869   }
2870 
2871   // Handle loads.
2872   assert(LI && "Must have a load instruction");
2873   setDebugLocFromInst(Builder, LI);
2874   for (unsigned Part = 0; Part < UF; ++Part) {
2875     Value *NewLI;
2876     if (CreateGatherScatter) {
2877       Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr;
2878       Value *VectorGep = State.get(Addr, Part);
2879       NewLI = Builder.CreateMaskedGather(VectorGep, Alignment, MaskPart,
2880                                          nullptr, "wide.masked.gather");
2881       addMetadata(NewLI, LI);
2882     } else {
2883       auto *VecPtr = CreateVecPtr(Part, State.get(Addr, VPIteration(0, 0)));
2884       if (isMaskRequired)
2885         NewLI = Builder.CreateMaskedLoad(
2886             VecPtr, Alignment, BlockInMaskParts[Part], PoisonValue::get(DataTy),
2887             "wide.masked.load");
2888       else
2889         NewLI =
2890             Builder.CreateAlignedLoad(DataTy, VecPtr, Alignment, "wide.load");
2891 
2892       // Add metadata to the load, but setVectorValue to the reverse shuffle.
2893       addMetadata(NewLI, LI);
2894       if (Reverse)
2895         NewLI = reverseVector(NewLI);
2896     }
2897 
2898     State.set(Def, Instr, NewLI, Part);
2899   }
2900 }
2901 
2902 void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr, VPUser &User,
2903                                                const VPIteration &Instance,
2904                                                bool IfPredicateInstr,
2905                                                VPTransformState &State) {
2906   assert(!Instr->getType()->isAggregateType() && "Can't handle vectors");
2907 
2908   // llvm.experimental.noalias.scope.decl intrinsics must only be duplicated for
2909   // the first lane and part.
2910   if (isa<NoAliasScopeDeclInst>(Instr))
2911     if (!Instance.isFirstIteration())
2912       return;
2913 
2914   setDebugLocFromInst(Builder, Instr);
2915 
2916   // Does this instruction return a value ?
2917   bool IsVoidRetTy = Instr->getType()->isVoidTy();
2918 
2919   Instruction *Cloned = Instr->clone();
2920   if (!IsVoidRetTy)
2921     Cloned->setName(Instr->getName() + ".cloned");
2922 
2923   // Replace the operands of the cloned instructions with their scalar
2924   // equivalents in the new loop.
2925   for (unsigned op = 0, e = User.getNumOperands(); op != e; ++op) {
2926     auto *Operand = dyn_cast<Instruction>(Instr->getOperand(op));
2927     auto InputInstance = Instance;
2928     if (!Operand || !OrigLoop->contains(Operand) ||
2929         (Cost->isUniformAfterVectorization(Operand, State.VF)))
2930       InputInstance.Lane = 0;
2931     auto *NewOp = State.get(User.getOperand(op), InputInstance);
2932     Cloned->setOperand(op, NewOp);
2933   }
2934   addNewMetadata(Cloned, Instr);
2935 
2936   // Place the cloned scalar in the new loop.
2937   Builder.Insert(Cloned);
2938 
2939   // TODO: Set result for VPValue of VPReciplicateRecipe. This requires
2940   // representing scalar values in VPTransformState. Add the cloned scalar to
2941   // the scalar map entry.
2942   VectorLoopValueMap.setScalarValue(Instr, Instance, Cloned);
2943 
2944   // If we just cloned a new assumption, add it the assumption cache.
2945   if (auto *II = dyn_cast<IntrinsicInst>(Cloned))
2946     if (II->getIntrinsicID() == Intrinsic::assume)
2947       AC->registerAssumption(II);
2948 
2949   // End if-block.
2950   if (IfPredicateInstr)
2951     PredicatedInstructions.push_back(Cloned);
2952 }
2953 
2954 PHINode *InnerLoopVectorizer::createInductionVariable(Loop *L, Value *Start,
2955                                                       Value *End, Value *Step,
2956                                                       Instruction *DL) {
2957   BasicBlock *Header = L->getHeader();
2958   BasicBlock *Latch = L->getLoopLatch();
2959   // As we're just creating this loop, it's possible no latch exists
2960   // yet. If so, use the header as this will be a single block loop.
2961   if (!Latch)
2962     Latch = Header;
2963 
2964   IRBuilder<> Builder(&*Header->getFirstInsertionPt());
2965   Instruction *OldInst = getDebugLocFromInstOrOperands(OldInduction);
2966   setDebugLocFromInst(Builder, OldInst);
2967   auto *Induction = Builder.CreatePHI(Start->getType(), 2, "index");
2968 
2969   Builder.SetInsertPoint(Latch->getTerminator());
2970   setDebugLocFromInst(Builder, OldInst);
2971 
2972   // Create i+1 and fill the PHINode.
2973   Value *Next = Builder.CreateAdd(Induction, Step, "index.next");
2974   Induction->addIncoming(Start, L->getLoopPreheader());
2975   Induction->addIncoming(Next, Latch);
2976   // Create the compare.
2977   Value *ICmp = Builder.CreateICmpEQ(Next, End);
2978   Builder.CreateCondBr(ICmp, L->getUniqueExitBlock(), Header);
2979 
2980   // Now we have two terminators. Remove the old one from the block.
2981   Latch->getTerminator()->eraseFromParent();
2982 
2983   return Induction;
2984 }
2985 
2986 Value *InnerLoopVectorizer::getOrCreateTripCount(Loop *L) {
2987   if (TripCount)
2988     return TripCount;
2989 
2990   assert(L && "Create Trip Count for null loop.");
2991   IRBuilder<> Builder(L->getLoopPreheader()->getTerminator());
2992   // Find the loop boundaries.
2993   ScalarEvolution *SE = PSE.getSE();
2994   const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount();
2995   assert(!isa<SCEVCouldNotCompute>(BackedgeTakenCount) &&
2996          "Invalid loop count");
2997 
2998   Type *IdxTy = Legal->getWidestInductionType();
2999   assert(IdxTy && "No type for induction");
3000 
3001   // The exit count might have the type of i64 while the phi is i32. This can
3002   // happen if we have an induction variable that is sign extended before the
3003   // compare. The only way that we get a backedge taken count is that the
3004   // induction variable was signed and as such will not overflow. In such a case
3005   // truncation is legal.
3006   if (SE->getTypeSizeInBits(BackedgeTakenCount->getType()) >
3007       IdxTy->getPrimitiveSizeInBits())
3008     BackedgeTakenCount = SE->getTruncateOrNoop(BackedgeTakenCount, IdxTy);
3009   BackedgeTakenCount = SE->getNoopOrZeroExtend(BackedgeTakenCount, IdxTy);
3010 
3011   // Get the total trip count from the count by adding 1.
3012   const SCEV *ExitCount = SE->getAddExpr(
3013       BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType()));
3014 
3015   const DataLayout &DL = L->getHeader()->getModule()->getDataLayout();
3016 
3017   // Expand the trip count and place the new instructions in the preheader.
3018   // Notice that the pre-header does not change, only the loop body.
3019   SCEVExpander Exp(*SE, DL, "induction");
3020 
3021   // Count holds the overall loop count (N).
3022   TripCount = Exp.expandCodeFor(ExitCount, ExitCount->getType(),
3023                                 L->getLoopPreheader()->getTerminator());
3024 
3025   if (TripCount->getType()->isPointerTy())
3026     TripCount =
3027         CastInst::CreatePointerCast(TripCount, IdxTy, "exitcount.ptrcnt.to.int",
3028                                     L->getLoopPreheader()->getTerminator());
3029 
3030   return TripCount;
3031 }
3032 
3033 Value *InnerLoopVectorizer::getOrCreateVectorTripCount(Loop *L) {
3034   if (VectorTripCount)
3035     return VectorTripCount;
3036 
3037   Value *TC = getOrCreateTripCount(L);
3038   IRBuilder<> Builder(L->getLoopPreheader()->getTerminator());
3039 
3040   Type *Ty = TC->getType();
3041   // This is where we can make the step a runtime constant.
3042   Value *Step = createStepForVF(Builder, ConstantInt::get(Ty, UF), VF);
3043 
3044   // If the tail is to be folded by masking, round the number of iterations N
3045   // up to a multiple of Step instead of rounding down. This is done by first
3046   // adding Step-1 and then rounding down. Note that it's ok if this addition
3047   // overflows: the vector induction variable will eventually wrap to zero given
3048   // that it starts at zero and its Step is a power of two; the loop will then
3049   // exit, with the last early-exit vector comparison also producing all-true.
3050   if (Cost->foldTailByMasking()) {
3051     assert(isPowerOf2_32(VF.getKnownMinValue() * UF) &&
3052            "VF*UF must be a power of 2 when folding tail by masking");
3053     assert(!VF.isScalable() &&
3054            "Tail folding not yet supported for scalable vectors");
3055     TC = Builder.CreateAdd(
3056         TC, ConstantInt::get(Ty, VF.getKnownMinValue() * UF - 1), "n.rnd.up");
3057   }
3058 
3059   // Now we need to generate the expression for the part of the loop that the
3060   // vectorized body will execute. This is equal to N - (N % Step) if scalar
3061   // iterations are not required for correctness, or N - Step, otherwise. Step
3062   // is equal to the vectorization factor (number of SIMD elements) times the
3063   // unroll factor (number of SIMD instructions).
3064   Value *R = Builder.CreateURem(TC, Step, "n.mod.vf");
3065 
3066   // There are two cases where we need to ensure (at least) the last iteration
3067   // runs in the scalar remainder loop. Thus, if the step evenly divides
3068   // the trip count, we set the remainder to be equal to the step. If the step
3069   // does not evenly divide the trip count, no adjustment is necessary since
3070   // there will already be scalar iterations. Note that the minimum iterations
3071   // check ensures that N >= Step. The cases are:
3072   // 1) If there is a non-reversed interleaved group that may speculatively
3073   //    access memory out-of-bounds.
3074   // 2) If any instruction may follow a conditionally taken exit. That is, if
3075   //    the loop contains multiple exiting blocks, or a single exiting block
3076   //    which is not the latch.
3077   if (VF.isVector() && Cost->requiresScalarEpilogue()) {
3078     auto *IsZero = Builder.CreateICmpEQ(R, ConstantInt::get(R->getType(), 0));
3079     R = Builder.CreateSelect(IsZero, Step, R);
3080   }
3081 
3082   VectorTripCount = Builder.CreateSub(TC, R, "n.vec");
3083 
3084   return VectorTripCount;
3085 }
3086 
3087 Value *InnerLoopVectorizer::createBitOrPointerCast(Value *V, VectorType *DstVTy,
3088                                                    const DataLayout &DL) {
3089   // Verify that V is a vector type with same number of elements as DstVTy.
3090   auto *DstFVTy = cast<FixedVectorType>(DstVTy);
3091   unsigned VF = DstFVTy->getNumElements();
3092   auto *SrcVecTy = cast<FixedVectorType>(V->getType());
3093   assert((VF == SrcVecTy->getNumElements()) && "Vector dimensions do not match");
3094   Type *SrcElemTy = SrcVecTy->getElementType();
3095   Type *DstElemTy = DstFVTy->getElementType();
3096   assert((DL.getTypeSizeInBits(SrcElemTy) == DL.getTypeSizeInBits(DstElemTy)) &&
3097          "Vector elements must have same size");
3098 
3099   // Do a direct cast if element types are castable.
3100   if (CastInst::isBitOrNoopPointerCastable(SrcElemTy, DstElemTy, DL)) {
3101     return Builder.CreateBitOrPointerCast(V, DstFVTy);
3102   }
3103   // V cannot be directly casted to desired vector type.
3104   // May happen when V is a floating point vector but DstVTy is a vector of
3105   // pointers or vice-versa. Handle this using a two-step bitcast using an
3106   // intermediate Integer type for the bitcast i.e. Ptr <-> Int <-> Float.
3107   assert((DstElemTy->isPointerTy() != SrcElemTy->isPointerTy()) &&
3108          "Only one type should be a pointer type");
3109   assert((DstElemTy->isFloatingPointTy() != SrcElemTy->isFloatingPointTy()) &&
3110          "Only one type should be a floating point type");
3111   Type *IntTy =
3112       IntegerType::getIntNTy(V->getContext(), DL.getTypeSizeInBits(SrcElemTy));
3113   auto *VecIntTy = FixedVectorType::get(IntTy, VF);
3114   Value *CastVal = Builder.CreateBitOrPointerCast(V, VecIntTy);
3115   return Builder.CreateBitOrPointerCast(CastVal, DstFVTy);
3116 }
3117 
3118 void InnerLoopVectorizer::emitMinimumIterationCountCheck(Loop *L,
3119                                                          BasicBlock *Bypass) {
3120   Value *Count = getOrCreateTripCount(L);
3121   // Reuse existing vector loop preheader for TC checks.
3122   // Note that new preheader block is generated for vector loop.
3123   BasicBlock *const TCCheckBlock = LoopVectorPreHeader;
3124   IRBuilder<> Builder(TCCheckBlock->getTerminator());
3125 
3126   // Generate code to check if the loop's trip count is less than VF * UF, or
3127   // equal to it in case a scalar epilogue is required; this implies that the
3128   // vector trip count is zero. This check also covers the case where adding one
3129   // to the backedge-taken count overflowed leading to an incorrect trip count
3130   // of zero. In this case we will also jump to the scalar loop.
3131   auto P = Cost->requiresScalarEpilogue() ? ICmpInst::ICMP_ULE
3132                                           : ICmpInst::ICMP_ULT;
3133 
3134   // If tail is to be folded, vector loop takes care of all iterations.
3135   Value *CheckMinIters = Builder.getFalse();
3136   if (!Cost->foldTailByMasking()) {
3137     Value *Step =
3138         createStepForVF(Builder, ConstantInt::get(Count->getType(), UF), VF);
3139     CheckMinIters = Builder.CreateICmp(P, Count, Step, "min.iters.check");
3140   }
3141   // Create new preheader for vector loop.
3142   LoopVectorPreHeader =
3143       SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(), DT, LI, nullptr,
3144                  "vector.ph");
3145 
3146   assert(DT->properlyDominates(DT->getNode(TCCheckBlock),
3147                                DT->getNode(Bypass)->getIDom()) &&
3148          "TC check is expected to dominate Bypass");
3149 
3150   // Update dominator for Bypass & LoopExit.
3151   DT->changeImmediateDominator(Bypass, TCCheckBlock);
3152   DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock);
3153 
3154   ReplaceInstWithInst(
3155       TCCheckBlock->getTerminator(),
3156       BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters));
3157   LoopBypassBlocks.push_back(TCCheckBlock);
3158 }
3159 
3160 void InnerLoopVectorizer::emitSCEVChecks(Loop *L, BasicBlock *Bypass) {
3161   // Reuse existing vector loop preheader for SCEV checks.
3162   // Note that new preheader block is generated for vector loop.
3163   BasicBlock *const SCEVCheckBlock = LoopVectorPreHeader;
3164 
3165   // Generate the code to check that the SCEV assumptions that we made.
3166   // We want the new basic block to start at the first instruction in a
3167   // sequence of instructions that form a check.
3168   SCEVExpander Exp(*PSE.getSE(), Bypass->getModule()->getDataLayout(),
3169                    "scev.check");
3170   Value *SCEVCheck = Exp.expandCodeForPredicate(
3171       &PSE.getUnionPredicate(), SCEVCheckBlock->getTerminator());
3172 
3173   if (auto *C = dyn_cast<ConstantInt>(SCEVCheck))
3174     if (C->isZero())
3175       return;
3176 
3177   assert(!(SCEVCheckBlock->getParent()->hasOptSize() ||
3178            (OptForSizeBasedOnProfile &&
3179             Cost->Hints->getForce() != LoopVectorizeHints::FK_Enabled)) &&
3180          "Cannot SCEV check stride or overflow when optimizing for size");
3181 
3182   SCEVCheckBlock->setName("vector.scevcheck");
3183   // Create new preheader for vector loop.
3184   LoopVectorPreHeader =
3185       SplitBlock(SCEVCheckBlock, SCEVCheckBlock->getTerminator(), DT, LI,
3186                  nullptr, "vector.ph");
3187 
3188   // Update dominator only if this is first RT check.
3189   if (LoopBypassBlocks.empty()) {
3190     DT->changeImmediateDominator(Bypass, SCEVCheckBlock);
3191     DT->changeImmediateDominator(LoopExitBlock, SCEVCheckBlock);
3192   }
3193 
3194   ReplaceInstWithInst(
3195       SCEVCheckBlock->getTerminator(),
3196       BranchInst::Create(Bypass, LoopVectorPreHeader, SCEVCheck));
3197   LoopBypassBlocks.push_back(SCEVCheckBlock);
3198   AddedSafetyChecks = true;
3199 }
3200 
3201 void InnerLoopVectorizer::emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass) {
3202   // VPlan-native path does not do any analysis for runtime checks currently.
3203   if (EnableVPlanNativePath)
3204     return;
3205 
3206   // Reuse existing vector loop preheader for runtime memory checks.
3207   // Note that new preheader block is generated for vector loop.
3208   BasicBlock *const MemCheckBlock = L->getLoopPreheader();
3209 
3210   // Generate the code that checks in runtime if arrays overlap. We put the
3211   // checks into a separate block to make the more common case of few elements
3212   // faster.
3213   auto *LAI = Legal->getLAI();
3214   const auto &RtPtrChecking = *LAI->getRuntimePointerChecking();
3215   if (!RtPtrChecking.Need)
3216     return;
3217 
3218   if (MemCheckBlock->getParent()->hasOptSize() || OptForSizeBasedOnProfile) {
3219     assert(Cost->Hints->getForce() == LoopVectorizeHints::FK_Enabled &&
3220            "Cannot emit memory checks when optimizing for size, unless forced "
3221            "to vectorize.");
3222     ORE->emit([&]() {
3223       return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationCodeSize",
3224                                         L->getStartLoc(), L->getHeader())
3225              << "Code-size may be reduced by not forcing "
3226                 "vectorization, or by source-code modifications "
3227                 "eliminating the need for runtime checks "
3228                 "(e.g., adding 'restrict').";
3229     });
3230   }
3231 
3232   MemCheckBlock->setName("vector.memcheck");
3233   // Create new preheader for vector loop.
3234   LoopVectorPreHeader =
3235       SplitBlock(MemCheckBlock, MemCheckBlock->getTerminator(), DT, LI, nullptr,
3236                  "vector.ph");
3237 
3238   auto *CondBranch = cast<BranchInst>(
3239       Builder.CreateCondBr(Builder.getTrue(), Bypass, LoopVectorPreHeader));
3240   ReplaceInstWithInst(MemCheckBlock->getTerminator(), CondBranch);
3241   LoopBypassBlocks.push_back(MemCheckBlock);
3242   AddedSafetyChecks = true;
3243 
3244   // Update dominator only if this is first RT check.
3245   if (LoopBypassBlocks.empty()) {
3246     DT->changeImmediateDominator(Bypass, MemCheckBlock);
3247     DT->changeImmediateDominator(LoopExitBlock, MemCheckBlock);
3248   }
3249 
3250   Instruction *FirstCheckInst;
3251   Instruction *MemRuntimeCheck;
3252   SCEVExpander Exp(*PSE.getSE(), MemCheckBlock->getModule()->getDataLayout(),
3253                    "induction");
3254   std::tie(FirstCheckInst, MemRuntimeCheck) = addRuntimeChecks(
3255       MemCheckBlock->getTerminator(), OrigLoop, RtPtrChecking.getChecks(), Exp);
3256   assert(MemRuntimeCheck && "no RT checks generated although RtPtrChecking "
3257                             "claimed checks are required");
3258   CondBranch->setCondition(MemRuntimeCheck);
3259 
3260   // We currently don't use LoopVersioning for the actual loop cloning but we
3261   // still use it to add the noalias metadata.
3262   LVer = std::make_unique<LoopVersioning>(
3263       *Legal->getLAI(),
3264       Legal->getLAI()->getRuntimePointerChecking()->getChecks(), OrigLoop, LI,
3265       DT, PSE.getSE());
3266   LVer->prepareNoAliasMetadata();
3267 }
3268 
3269 Value *InnerLoopVectorizer::emitTransformedIndex(
3270     IRBuilder<> &B, Value *Index, ScalarEvolution *SE, const DataLayout &DL,
3271     const InductionDescriptor &ID) const {
3272 
3273   SCEVExpander Exp(*SE, DL, "induction");
3274   auto Step = ID.getStep();
3275   auto StartValue = ID.getStartValue();
3276   assert(Index->getType() == Step->getType() &&
3277          "Index type does not match StepValue type");
3278 
3279   // Note: the IR at this point is broken. We cannot use SE to create any new
3280   // SCEV and then expand it, hoping that SCEV's simplification will give us
3281   // a more optimal code. Unfortunately, attempt of doing so on invalid IR may
3282   // lead to various SCEV crashes. So all we can do is to use builder and rely
3283   // on InstCombine for future simplifications. Here we handle some trivial
3284   // cases only.
3285   auto CreateAdd = [&B](Value *X, Value *Y) {
3286     assert(X->getType() == Y->getType() && "Types don't match!");
3287     if (auto *CX = dyn_cast<ConstantInt>(X))
3288       if (CX->isZero())
3289         return Y;
3290     if (auto *CY = dyn_cast<ConstantInt>(Y))
3291       if (CY->isZero())
3292         return X;
3293     return B.CreateAdd(X, Y);
3294   };
3295 
3296   auto CreateMul = [&B](Value *X, Value *Y) {
3297     assert(X->getType() == Y->getType() && "Types don't match!");
3298     if (auto *CX = dyn_cast<ConstantInt>(X))
3299       if (CX->isOne())
3300         return Y;
3301     if (auto *CY = dyn_cast<ConstantInt>(Y))
3302       if (CY->isOne())
3303         return X;
3304     return B.CreateMul(X, Y);
3305   };
3306 
3307   // Get a suitable insert point for SCEV expansion. For blocks in the vector
3308   // loop, choose the end of the vector loop header (=LoopVectorBody), because
3309   // the DomTree is not kept up-to-date for additional blocks generated in the
3310   // vector loop. By using the header as insertion point, we guarantee that the
3311   // expanded instructions dominate all their uses.
3312   auto GetInsertPoint = [this, &B]() {
3313     BasicBlock *InsertBB = B.GetInsertPoint()->getParent();
3314     if (InsertBB != LoopVectorBody &&
3315         LI->getLoopFor(LoopVectorBody) == LI->getLoopFor(InsertBB))
3316       return LoopVectorBody->getTerminator();
3317     return &*B.GetInsertPoint();
3318   };
3319   switch (ID.getKind()) {
3320   case InductionDescriptor::IK_IntInduction: {
3321     assert(Index->getType() == StartValue->getType() &&
3322            "Index type does not match StartValue type");
3323     if (ID.getConstIntStepValue() && ID.getConstIntStepValue()->isMinusOne())
3324       return B.CreateSub(StartValue, Index);
3325     auto *Offset = CreateMul(
3326         Index, Exp.expandCodeFor(Step, Index->getType(), GetInsertPoint()));
3327     return CreateAdd(StartValue, Offset);
3328   }
3329   case InductionDescriptor::IK_PtrInduction: {
3330     assert(isa<SCEVConstant>(Step) &&
3331            "Expected constant step for pointer induction");
3332     return B.CreateGEP(
3333         StartValue->getType()->getPointerElementType(), StartValue,
3334         CreateMul(Index,
3335                   Exp.expandCodeFor(Step, Index->getType(), GetInsertPoint())));
3336   }
3337   case InductionDescriptor::IK_FpInduction: {
3338     assert(Step->getType()->isFloatingPointTy() && "Expected FP Step value");
3339     auto InductionBinOp = ID.getInductionBinOp();
3340     assert(InductionBinOp &&
3341            (InductionBinOp->getOpcode() == Instruction::FAdd ||
3342             InductionBinOp->getOpcode() == Instruction::FSub) &&
3343            "Original bin op should be defined for FP induction");
3344 
3345     Value *StepValue = cast<SCEVUnknown>(Step)->getValue();
3346 
3347     // Floating point operations had to be 'fast' to enable the induction.
3348     FastMathFlags Flags;
3349     Flags.setFast();
3350 
3351     Value *MulExp = B.CreateFMul(StepValue, Index);
3352     if (isa<Instruction>(MulExp))
3353       // We have to check, the MulExp may be a constant.
3354       cast<Instruction>(MulExp)->setFastMathFlags(Flags);
3355 
3356     Value *BOp = B.CreateBinOp(InductionBinOp->getOpcode(), StartValue, MulExp,
3357                                "induction");
3358     if (isa<Instruction>(BOp))
3359       cast<Instruction>(BOp)->setFastMathFlags(Flags);
3360 
3361     return BOp;
3362   }
3363   case InductionDescriptor::IK_NoInduction:
3364     return nullptr;
3365   }
3366   llvm_unreachable("invalid enum");
3367 }
3368 
3369 Loop *InnerLoopVectorizer::createVectorLoopSkeleton(StringRef Prefix) {
3370   LoopScalarBody = OrigLoop->getHeader();
3371   LoopVectorPreHeader = OrigLoop->getLoopPreheader();
3372   LoopExitBlock = OrigLoop->getUniqueExitBlock();
3373   assert(LoopExitBlock && "Must have an exit block");
3374   assert(LoopVectorPreHeader && "Invalid loop structure");
3375 
3376   LoopMiddleBlock =
3377       SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT,
3378                  LI, nullptr, Twine(Prefix) + "middle.block");
3379   LoopScalarPreHeader =
3380       SplitBlock(LoopMiddleBlock, LoopMiddleBlock->getTerminator(), DT, LI,
3381                  nullptr, Twine(Prefix) + "scalar.ph");
3382 
3383   // Set up branch from middle block to the exit and scalar preheader blocks.
3384   // completeLoopSkeleton will update the condition to use an iteration check,
3385   // if required to decide whether to execute the remainder.
3386   BranchInst *BrInst =
3387       BranchInst::Create(LoopExitBlock, LoopScalarPreHeader, Builder.getTrue());
3388   auto *ScalarLatchTerm = OrigLoop->getLoopLatch()->getTerminator();
3389   BrInst->setDebugLoc(ScalarLatchTerm->getDebugLoc());
3390   ReplaceInstWithInst(LoopMiddleBlock->getTerminator(), BrInst);
3391 
3392   // We intentionally don't let SplitBlock to update LoopInfo since
3393   // LoopVectorBody should belong to another loop than LoopVectorPreHeader.
3394   // LoopVectorBody is explicitly added to the correct place few lines later.
3395   LoopVectorBody =
3396       SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT,
3397                  nullptr, nullptr, Twine(Prefix) + "vector.body");
3398 
3399   // Update dominator for loop exit.
3400   DT->changeImmediateDominator(LoopExitBlock, LoopMiddleBlock);
3401 
3402   // Create and register the new vector loop.
3403   Loop *Lp = LI->AllocateLoop();
3404   Loop *ParentLoop = OrigLoop->getParentLoop();
3405 
3406   // Insert the new loop into the loop nest and register the new basic blocks
3407   // before calling any utilities such as SCEV that require valid LoopInfo.
3408   if (ParentLoop) {
3409     ParentLoop->addChildLoop(Lp);
3410   } else {
3411     LI->addTopLevelLoop(Lp);
3412   }
3413   Lp->addBasicBlockToLoop(LoopVectorBody, *LI);
3414   return Lp;
3415 }
3416 
3417 void InnerLoopVectorizer::createInductionResumeValues(
3418     Loop *L, Value *VectorTripCount,
3419     std::pair<BasicBlock *, Value *> AdditionalBypass) {
3420   assert(VectorTripCount && L && "Expected valid arguments");
3421   assert(((AdditionalBypass.first && AdditionalBypass.second) ||
3422           (!AdditionalBypass.first && !AdditionalBypass.second)) &&
3423          "Inconsistent information about additional bypass.");
3424   // We are going to resume the execution of the scalar loop.
3425   // Go over all of the induction variables that we found and fix the
3426   // PHIs that are left in the scalar version of the loop.
3427   // The starting values of PHI nodes depend on the counter of the last
3428   // iteration in the vectorized loop.
3429   // If we come from a bypass edge then we need to start from the original
3430   // start value.
3431   for (auto &InductionEntry : Legal->getInductionVars()) {
3432     PHINode *OrigPhi = InductionEntry.first;
3433     InductionDescriptor II = InductionEntry.second;
3434 
3435     // Create phi nodes to merge from the  backedge-taken check block.
3436     PHINode *BCResumeVal =
3437         PHINode::Create(OrigPhi->getType(), 3, "bc.resume.val",
3438                         LoopScalarPreHeader->getTerminator());
3439     // Copy original phi DL over to the new one.
3440     BCResumeVal->setDebugLoc(OrigPhi->getDebugLoc());
3441     Value *&EndValue = IVEndValues[OrigPhi];
3442     Value *EndValueFromAdditionalBypass = AdditionalBypass.second;
3443     if (OrigPhi == OldInduction) {
3444       // We know what the end value is.
3445       EndValue = VectorTripCount;
3446     } else {
3447       IRBuilder<> B(L->getLoopPreheader()->getTerminator());
3448       Type *StepType = II.getStep()->getType();
3449       Instruction::CastOps CastOp =
3450           CastInst::getCastOpcode(VectorTripCount, true, StepType, true);
3451       Value *CRD = B.CreateCast(CastOp, VectorTripCount, StepType, "cast.crd");
3452       const DataLayout &DL = LoopScalarBody->getModule()->getDataLayout();
3453       EndValue = emitTransformedIndex(B, CRD, PSE.getSE(), DL, II);
3454       EndValue->setName("ind.end");
3455 
3456       // Compute the end value for the additional bypass (if applicable).
3457       if (AdditionalBypass.first) {
3458         B.SetInsertPoint(&(*AdditionalBypass.first->getFirstInsertionPt()));
3459         CastOp = CastInst::getCastOpcode(AdditionalBypass.second, true,
3460                                          StepType, true);
3461         CRD =
3462             B.CreateCast(CastOp, AdditionalBypass.second, StepType, "cast.crd");
3463         EndValueFromAdditionalBypass =
3464             emitTransformedIndex(B, CRD, PSE.getSE(), DL, II);
3465         EndValueFromAdditionalBypass->setName("ind.end");
3466       }
3467     }
3468     // The new PHI merges the original incoming value, in case of a bypass,
3469     // or the value at the end of the vectorized loop.
3470     BCResumeVal->addIncoming(EndValue, LoopMiddleBlock);
3471 
3472     // Fix the scalar body counter (PHI node).
3473     // The old induction's phi node in the scalar body needs the truncated
3474     // value.
3475     for (BasicBlock *BB : LoopBypassBlocks)
3476       BCResumeVal->addIncoming(II.getStartValue(), BB);
3477 
3478     if (AdditionalBypass.first)
3479       BCResumeVal->setIncomingValueForBlock(AdditionalBypass.first,
3480                                             EndValueFromAdditionalBypass);
3481 
3482     OrigPhi->setIncomingValueForBlock(LoopScalarPreHeader, BCResumeVal);
3483   }
3484 }
3485 
3486 BasicBlock *InnerLoopVectorizer::completeLoopSkeleton(Loop *L,
3487                                                       MDNode *OrigLoopID) {
3488   assert(L && "Expected valid loop.");
3489 
3490   // The trip counts should be cached by now.
3491   Value *Count = getOrCreateTripCount(L);
3492   Value *VectorTripCount = getOrCreateVectorTripCount(L);
3493 
3494   auto *ScalarLatchTerm = OrigLoop->getLoopLatch()->getTerminator();
3495 
3496   // Add a check in the middle block to see if we have completed
3497   // all of the iterations in the first vector loop.
3498   // If (N - N%VF) == N, then we *don't* need to run the remainder.
3499   // If tail is to be folded, we know we don't need to run the remainder.
3500   if (!Cost->foldTailByMasking()) {
3501     Instruction *CmpN = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ,
3502                                         Count, VectorTripCount, "cmp.n",
3503                                         LoopMiddleBlock->getTerminator());
3504 
3505     // Here we use the same DebugLoc as the scalar loop latch terminator instead
3506     // of the corresponding compare because they may have ended up with
3507     // different line numbers and we want to avoid awkward line stepping while
3508     // debugging. Eg. if the compare has got a line number inside the loop.
3509     CmpN->setDebugLoc(ScalarLatchTerm->getDebugLoc());
3510     cast<BranchInst>(LoopMiddleBlock->getTerminator())->setCondition(CmpN);
3511   }
3512 
3513   // Get ready to start creating new instructions into the vectorized body.
3514   assert(LoopVectorPreHeader == L->getLoopPreheader() &&
3515          "Inconsistent vector loop preheader");
3516   Builder.SetInsertPoint(&*LoopVectorBody->getFirstInsertionPt());
3517 
3518   Optional<MDNode *> VectorizedLoopID =
3519       makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll,
3520                                       LLVMLoopVectorizeFollowupVectorized});
3521   if (VectorizedLoopID.hasValue()) {
3522     L->setLoopID(VectorizedLoopID.getValue());
3523 
3524     // Do not setAlreadyVectorized if loop attributes have been defined
3525     // explicitly.
3526     return LoopVectorPreHeader;
3527   }
3528 
3529   // Keep all loop hints from the original loop on the vector loop (we'll
3530   // replace the vectorizer-specific hints below).
3531   if (MDNode *LID = OrigLoop->getLoopID())
3532     L->setLoopID(LID);
3533 
3534   LoopVectorizeHints Hints(L, true, *ORE);
3535   Hints.setAlreadyVectorized();
3536 
3537 #ifdef EXPENSIVE_CHECKS
3538   assert(DT->verify(DominatorTree::VerificationLevel::Fast));
3539   LI->verify(*DT);
3540 #endif
3541 
3542   return LoopVectorPreHeader;
3543 }
3544 
3545 BasicBlock *InnerLoopVectorizer::createVectorizedLoopSkeleton() {
3546   /*
3547    In this function we generate a new loop. The new loop will contain
3548    the vectorized instructions while the old loop will continue to run the
3549    scalar remainder.
3550 
3551        [ ] <-- loop iteration number check.
3552     /   |
3553    /    v
3554   |    [ ] <-- vector loop bypass (may consist of multiple blocks).
3555   |  /  |
3556   | /   v
3557   ||   [ ]     <-- vector pre header.
3558   |/    |
3559   |     v
3560   |    [  ] \
3561   |    [  ]_|   <-- vector loop.
3562   |     |
3563   |     v
3564   |   -[ ]   <--- middle-block.
3565   |  /  |
3566   | /   v
3567   -|- >[ ]     <--- new preheader.
3568    |    |
3569    |    v
3570    |   [ ] \
3571    |   [ ]_|   <-- old scalar loop to handle remainder.
3572     \   |
3573      \  v
3574       >[ ]     <-- exit block.
3575    ...
3576    */
3577 
3578   // Get the metadata of the original loop before it gets modified.
3579   MDNode *OrigLoopID = OrigLoop->getLoopID();
3580 
3581   // Create an empty vector loop, and prepare basic blocks for the runtime
3582   // checks.
3583   Loop *Lp = createVectorLoopSkeleton("");
3584 
3585   // Now, compare the new count to zero. If it is zero skip the vector loop and
3586   // jump to the scalar loop. This check also covers the case where the
3587   // backedge-taken count is uint##_max: adding one to it will overflow leading
3588   // to an incorrect trip count of zero. In this (rare) case we will also jump
3589   // to the scalar loop.
3590   emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader);
3591 
3592   // Generate the code to check any assumptions that we've made for SCEV
3593   // expressions.
3594   emitSCEVChecks(Lp, LoopScalarPreHeader);
3595 
3596   // Generate the code that checks in runtime if arrays overlap. We put the
3597   // checks into a separate block to make the more common case of few elements
3598   // faster.
3599   emitMemRuntimeChecks(Lp, LoopScalarPreHeader);
3600 
3601   // Some loops have a single integer induction variable, while other loops
3602   // don't. One example is c++ iterators that often have multiple pointer
3603   // induction variables. In the code below we also support a case where we
3604   // don't have a single induction variable.
3605   //
3606   // We try to obtain an induction variable from the original loop as hard
3607   // as possible. However if we don't find one that:
3608   //   - is an integer
3609   //   - counts from zero, stepping by one
3610   //   - is the size of the widest induction variable type
3611   // then we create a new one.
3612   OldInduction = Legal->getPrimaryInduction();
3613   Type *IdxTy = Legal->getWidestInductionType();
3614   Value *StartIdx = ConstantInt::get(IdxTy, 0);
3615   // The loop step is equal to the vectorization factor (num of SIMD elements)
3616   // times the unroll factor (num of SIMD instructions).
3617   Builder.SetInsertPoint(&*Lp->getHeader()->getFirstInsertionPt());
3618   Value *Step = createStepForVF(Builder, ConstantInt::get(IdxTy, UF), VF);
3619   Value *CountRoundDown = getOrCreateVectorTripCount(Lp);
3620   Induction =
3621       createInductionVariable(Lp, StartIdx, CountRoundDown, Step,
3622                               getDebugLocFromInstOrOperands(OldInduction));
3623 
3624   // Emit phis for the new starting index of the scalar loop.
3625   createInductionResumeValues(Lp, CountRoundDown);
3626 
3627   return completeLoopSkeleton(Lp, OrigLoopID);
3628 }
3629 
3630 // Fix up external users of the induction variable. At this point, we are
3631 // in LCSSA form, with all external PHIs that use the IV having one input value,
3632 // coming from the remainder loop. We need those PHIs to also have a correct
3633 // value for the IV when arriving directly from the middle block.
3634 void InnerLoopVectorizer::fixupIVUsers(PHINode *OrigPhi,
3635                                        const InductionDescriptor &II,
3636                                        Value *CountRoundDown, Value *EndValue,
3637                                        BasicBlock *MiddleBlock) {
3638   // There are two kinds of external IV usages - those that use the value
3639   // computed in the last iteration (the PHI) and those that use the penultimate
3640   // value (the value that feeds into the phi from the loop latch).
3641   // We allow both, but they, obviously, have different values.
3642 
3643   assert(OrigLoop->getUniqueExitBlock() && "Expected a single exit block");
3644 
3645   DenseMap<Value *, Value *> MissingVals;
3646 
3647   // An external user of the last iteration's value should see the value that
3648   // the remainder loop uses to initialize its own IV.
3649   Value *PostInc = OrigPhi->getIncomingValueForBlock(OrigLoop->getLoopLatch());
3650   for (User *U : PostInc->users()) {
3651     Instruction *UI = cast<Instruction>(U);
3652     if (!OrigLoop->contains(UI)) {
3653       assert(isa<PHINode>(UI) && "Expected LCSSA form");
3654       MissingVals[UI] = EndValue;
3655     }
3656   }
3657 
3658   // An external user of the penultimate value need to see EndValue - Step.
3659   // The simplest way to get this is to recompute it from the constituent SCEVs,
3660   // that is Start + (Step * (CRD - 1)).
3661   for (User *U : OrigPhi->users()) {
3662     auto *UI = cast<Instruction>(U);
3663     if (!OrigLoop->contains(UI)) {
3664       const DataLayout &DL =
3665           OrigLoop->getHeader()->getModule()->getDataLayout();
3666       assert(isa<PHINode>(UI) && "Expected LCSSA form");
3667 
3668       IRBuilder<> B(MiddleBlock->getTerminator());
3669       Value *CountMinusOne = B.CreateSub(
3670           CountRoundDown, ConstantInt::get(CountRoundDown->getType(), 1));
3671       Value *CMO =
3672           !II.getStep()->getType()->isIntegerTy()
3673               ? B.CreateCast(Instruction::SIToFP, CountMinusOne,
3674                              II.getStep()->getType())
3675               : B.CreateSExtOrTrunc(CountMinusOne, II.getStep()->getType());
3676       CMO->setName("cast.cmo");
3677       Value *Escape = emitTransformedIndex(B, CMO, PSE.getSE(), DL, II);
3678       Escape->setName("ind.escape");
3679       MissingVals[UI] = Escape;
3680     }
3681   }
3682 
3683   for (auto &I : MissingVals) {
3684     PHINode *PHI = cast<PHINode>(I.first);
3685     // One corner case we have to handle is two IVs "chasing" each-other,
3686     // that is %IV2 = phi [...], [ %IV1, %latch ]
3687     // In this case, if IV1 has an external use, we need to avoid adding both
3688     // "last value of IV1" and "penultimate value of IV2". So, verify that we
3689     // don't already have an incoming value for the middle block.
3690     if (PHI->getBasicBlockIndex(MiddleBlock) == -1)
3691       PHI->addIncoming(I.second, MiddleBlock);
3692   }
3693 }
3694 
3695 namespace {
3696 
3697 struct CSEDenseMapInfo {
3698   static bool canHandle(const Instruction *I) {
3699     return isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) ||
3700            isa<ShuffleVectorInst>(I) || isa<GetElementPtrInst>(I);
3701   }
3702 
3703   static inline Instruction *getEmptyKey() {
3704     return DenseMapInfo<Instruction *>::getEmptyKey();
3705   }
3706 
3707   static inline Instruction *getTombstoneKey() {
3708     return DenseMapInfo<Instruction *>::getTombstoneKey();
3709   }
3710 
3711   static unsigned getHashValue(const Instruction *I) {
3712     assert(canHandle(I) && "Unknown instruction!");
3713     return hash_combine(I->getOpcode(), hash_combine_range(I->value_op_begin(),
3714                                                            I->value_op_end()));
3715   }
3716 
3717   static bool isEqual(const Instruction *LHS, const Instruction *RHS) {
3718     if (LHS == getEmptyKey() || RHS == getEmptyKey() ||
3719         LHS == getTombstoneKey() || RHS == getTombstoneKey())
3720       return LHS == RHS;
3721     return LHS->isIdenticalTo(RHS);
3722   }
3723 };
3724 
3725 } // end anonymous namespace
3726 
3727 ///Perform cse of induction variable instructions.
3728 static void cse(BasicBlock *BB) {
3729   // Perform simple cse.
3730   SmallDenseMap<Instruction *, Instruction *, 4, CSEDenseMapInfo> CSEMap;
3731   for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) {
3732     Instruction *In = &*I++;
3733 
3734     if (!CSEDenseMapInfo::canHandle(In))
3735       continue;
3736 
3737     // Check if we can replace this instruction with any of the
3738     // visited instructions.
3739     if (Instruction *V = CSEMap.lookup(In)) {
3740       In->replaceAllUsesWith(V);
3741       In->eraseFromParent();
3742       continue;
3743     }
3744 
3745     CSEMap[In] = In;
3746   }
3747 }
3748 
3749 InstructionCost
3750 LoopVectorizationCostModel::getVectorCallCost(CallInst *CI, ElementCount VF,
3751                                               bool &NeedToScalarize) {
3752   assert(!VF.isScalable() && "scalable vectors not yet supported.");
3753   Function *F = CI->getCalledFunction();
3754   Type *ScalarRetTy = CI->getType();
3755   SmallVector<Type *, 4> Tys, ScalarTys;
3756   for (auto &ArgOp : CI->arg_operands())
3757     ScalarTys.push_back(ArgOp->getType());
3758 
3759   // Estimate cost of scalarized vector call. The source operands are assumed
3760   // to be vectors, so we need to extract individual elements from there,
3761   // execute VF scalar calls, and then gather the result into the vector return
3762   // value.
3763   InstructionCost ScalarCallCost =
3764       TTI.getCallInstrCost(F, ScalarRetTy, ScalarTys, TTI::TCK_RecipThroughput);
3765   if (VF.isScalar())
3766     return ScalarCallCost;
3767 
3768   // Compute corresponding vector type for return value and arguments.
3769   Type *RetTy = ToVectorTy(ScalarRetTy, VF);
3770   for (Type *ScalarTy : ScalarTys)
3771     Tys.push_back(ToVectorTy(ScalarTy, VF));
3772 
3773   // Compute costs of unpacking argument values for the scalar calls and
3774   // packing the return values to a vector.
3775   InstructionCost ScalarizationCost = getScalarizationOverhead(CI, VF);
3776 
3777   InstructionCost Cost =
3778       ScalarCallCost * VF.getKnownMinValue() + ScalarizationCost;
3779 
3780   // If we can't emit a vector call for this function, then the currently found
3781   // cost is the cost we need to return.
3782   NeedToScalarize = true;
3783   VFShape Shape = VFShape::get(*CI, VF, false /*HasGlobalPred*/);
3784   Function *VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape);
3785 
3786   if (!TLI || CI->isNoBuiltin() || !VecFunc)
3787     return Cost;
3788 
3789   // If the corresponding vector cost is cheaper, return its cost.
3790   InstructionCost VectorCallCost =
3791       TTI.getCallInstrCost(nullptr, RetTy, Tys, TTI::TCK_RecipThroughput);
3792   if (VectorCallCost < Cost) {
3793     NeedToScalarize = false;
3794     Cost = VectorCallCost;
3795   }
3796   return Cost;
3797 }
3798 
3799 InstructionCost
3800 LoopVectorizationCostModel::getVectorIntrinsicCost(CallInst *CI,
3801                                                    ElementCount VF) {
3802   auto MaybeVectorizeType = [](Type *Elt, ElementCount VF) -> Type * {
3803     if (VF.isScalar() || (!Elt->isIntOrPtrTy() && !Elt->isFloatingPointTy()))
3804       return Elt;
3805     return VectorType::get(Elt, VF);
3806   };
3807 
3808   Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
3809   assert(ID && "Expected intrinsic call!");
3810   Type *RetTy = MaybeVectorizeType(CI->getType(), VF);
3811   FastMathFlags FMF;
3812   if (auto *FPMO = dyn_cast<FPMathOperator>(CI))
3813     FMF = FPMO->getFastMathFlags();
3814 
3815   SmallVector<const Value *> Arguments(CI->arg_begin(), CI->arg_end());
3816   FunctionType *FTy = CI->getCalledFunction()->getFunctionType();
3817   SmallVector<Type *> ParamTys;
3818   std::transform(FTy->param_begin(), FTy->param_end(), ParamTys.begin(),
3819                  [&](Type *Ty) { return MaybeVectorizeType(Ty, VF); });
3820 
3821   IntrinsicCostAttributes CostAttrs(ID, RetTy, Arguments, ParamTys, FMF,
3822                                     dyn_cast<IntrinsicInst>(CI));
3823   return TTI.getIntrinsicInstrCost(CostAttrs,
3824                                    TargetTransformInfo::TCK_RecipThroughput);
3825 }
3826 
3827 static Type *smallestIntegerVectorType(Type *T1, Type *T2) {
3828   auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType());
3829   auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType());
3830   return I1->getBitWidth() < I2->getBitWidth() ? T1 : T2;
3831 }
3832 
3833 static Type *largestIntegerVectorType(Type *T1, Type *T2) {
3834   auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType());
3835   auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType());
3836   return I1->getBitWidth() > I2->getBitWidth() ? T1 : T2;
3837 }
3838 
3839 void InnerLoopVectorizer::truncateToMinimalBitwidths() {
3840   // For every instruction `I` in MinBWs, truncate the operands, create a
3841   // truncated version of `I` and reextend its result. InstCombine runs
3842   // later and will remove any ext/trunc pairs.
3843   SmallPtrSet<Value *, 4> Erased;
3844   for (const auto &KV : Cost->getMinimalBitwidths()) {
3845     // If the value wasn't vectorized, we must maintain the original scalar
3846     // type. The absence of the value from VectorLoopValueMap indicates that it
3847     // wasn't vectorized.
3848     if (!VectorLoopValueMap.hasAnyVectorValue(KV.first))
3849       continue;
3850     for (unsigned Part = 0; Part < UF; ++Part) {
3851       Value *I = getOrCreateVectorValue(KV.first, Part);
3852       if (Erased.count(I) || I->use_empty() || !isa<Instruction>(I))
3853         continue;
3854       Type *OriginalTy = I->getType();
3855       Type *ScalarTruncatedTy =
3856           IntegerType::get(OriginalTy->getContext(), KV.second);
3857       auto *TruncatedTy = FixedVectorType::get(
3858           ScalarTruncatedTy,
3859           cast<FixedVectorType>(OriginalTy)->getNumElements());
3860       if (TruncatedTy == OriginalTy)
3861         continue;
3862 
3863       IRBuilder<> B(cast<Instruction>(I));
3864       auto ShrinkOperand = [&](Value *V) -> Value * {
3865         if (auto *ZI = dyn_cast<ZExtInst>(V))
3866           if (ZI->getSrcTy() == TruncatedTy)
3867             return ZI->getOperand(0);
3868         return B.CreateZExtOrTrunc(V, TruncatedTy);
3869       };
3870 
3871       // The actual instruction modification depends on the instruction type,
3872       // unfortunately.
3873       Value *NewI = nullptr;
3874       if (auto *BO = dyn_cast<BinaryOperator>(I)) {
3875         NewI = B.CreateBinOp(BO->getOpcode(), ShrinkOperand(BO->getOperand(0)),
3876                              ShrinkOperand(BO->getOperand(1)));
3877 
3878         // Any wrapping introduced by shrinking this operation shouldn't be
3879         // considered undefined behavior. So, we can't unconditionally copy
3880         // arithmetic wrapping flags to NewI.
3881         cast<BinaryOperator>(NewI)->copyIRFlags(I, /*IncludeWrapFlags=*/false);
3882       } else if (auto *CI = dyn_cast<ICmpInst>(I)) {
3883         NewI =
3884             B.CreateICmp(CI->getPredicate(), ShrinkOperand(CI->getOperand(0)),
3885                          ShrinkOperand(CI->getOperand(1)));
3886       } else if (auto *SI = dyn_cast<SelectInst>(I)) {
3887         NewI = B.CreateSelect(SI->getCondition(),
3888                               ShrinkOperand(SI->getTrueValue()),
3889                               ShrinkOperand(SI->getFalseValue()));
3890       } else if (auto *CI = dyn_cast<CastInst>(I)) {
3891         switch (CI->getOpcode()) {
3892         default:
3893           llvm_unreachable("Unhandled cast!");
3894         case Instruction::Trunc:
3895           NewI = ShrinkOperand(CI->getOperand(0));
3896           break;
3897         case Instruction::SExt:
3898           NewI = B.CreateSExtOrTrunc(
3899               CI->getOperand(0),
3900               smallestIntegerVectorType(OriginalTy, TruncatedTy));
3901           break;
3902         case Instruction::ZExt:
3903           NewI = B.CreateZExtOrTrunc(
3904               CI->getOperand(0),
3905               smallestIntegerVectorType(OriginalTy, TruncatedTy));
3906           break;
3907         }
3908       } else if (auto *SI = dyn_cast<ShuffleVectorInst>(I)) {
3909         auto Elements0 = cast<FixedVectorType>(SI->getOperand(0)->getType())
3910                              ->getNumElements();
3911         auto *O0 = B.CreateZExtOrTrunc(
3912             SI->getOperand(0),
3913             FixedVectorType::get(ScalarTruncatedTy, Elements0));
3914         auto Elements1 = cast<FixedVectorType>(SI->getOperand(1)->getType())
3915                              ->getNumElements();
3916         auto *O1 = B.CreateZExtOrTrunc(
3917             SI->getOperand(1),
3918             FixedVectorType::get(ScalarTruncatedTy, Elements1));
3919 
3920         NewI = B.CreateShuffleVector(O0, O1, SI->getShuffleMask());
3921       } else if (isa<LoadInst>(I) || isa<PHINode>(I)) {
3922         // Don't do anything with the operands, just extend the result.
3923         continue;
3924       } else if (auto *IE = dyn_cast<InsertElementInst>(I)) {
3925         auto Elements = cast<FixedVectorType>(IE->getOperand(0)->getType())
3926                             ->getNumElements();
3927         auto *O0 = B.CreateZExtOrTrunc(
3928             IE->getOperand(0),
3929             FixedVectorType::get(ScalarTruncatedTy, Elements));
3930         auto *O1 = B.CreateZExtOrTrunc(IE->getOperand(1), ScalarTruncatedTy);
3931         NewI = B.CreateInsertElement(O0, O1, IE->getOperand(2));
3932       } else if (auto *EE = dyn_cast<ExtractElementInst>(I)) {
3933         auto Elements = cast<FixedVectorType>(EE->getOperand(0)->getType())
3934                             ->getNumElements();
3935         auto *O0 = B.CreateZExtOrTrunc(
3936             EE->getOperand(0),
3937             FixedVectorType::get(ScalarTruncatedTy, Elements));
3938         NewI = B.CreateExtractElement(O0, EE->getOperand(2));
3939       } else {
3940         // If we don't know what to do, be conservative and don't do anything.
3941         continue;
3942       }
3943 
3944       // Lastly, extend the result.
3945       NewI->takeName(cast<Instruction>(I));
3946       Value *Res = B.CreateZExtOrTrunc(NewI, OriginalTy);
3947       I->replaceAllUsesWith(Res);
3948       cast<Instruction>(I)->eraseFromParent();
3949       Erased.insert(I);
3950       VectorLoopValueMap.resetVectorValue(KV.first, Part, Res);
3951     }
3952   }
3953 
3954   // We'll have created a bunch of ZExts that are now parentless. Clean up.
3955   for (const auto &KV : Cost->getMinimalBitwidths()) {
3956     // If the value wasn't vectorized, we must maintain the original scalar
3957     // type. The absence of the value from VectorLoopValueMap indicates that it
3958     // wasn't vectorized.
3959     if (!VectorLoopValueMap.hasAnyVectorValue(KV.first))
3960       continue;
3961     for (unsigned Part = 0; Part < UF; ++Part) {
3962       Value *I = getOrCreateVectorValue(KV.first, Part);
3963       ZExtInst *Inst = dyn_cast<ZExtInst>(I);
3964       if (Inst && Inst->use_empty()) {
3965         Value *NewI = Inst->getOperand(0);
3966         Inst->eraseFromParent();
3967         VectorLoopValueMap.resetVectorValue(KV.first, Part, NewI);
3968       }
3969     }
3970   }
3971 }
3972 
3973 void InnerLoopVectorizer::fixVectorizedLoop() {
3974   // Insert truncates and extends for any truncated instructions as hints to
3975   // InstCombine.
3976   if (VF.isVector())
3977     truncateToMinimalBitwidths();
3978 
3979   // Fix widened non-induction PHIs by setting up the PHI operands.
3980   if (OrigPHIsToFix.size()) {
3981     assert(EnableVPlanNativePath &&
3982            "Unexpected non-induction PHIs for fixup in non VPlan-native path");
3983     fixNonInductionPHIs();
3984   }
3985 
3986   // At this point every instruction in the original loop is widened to a
3987   // vector form. Now we need to fix the recurrences in the loop. These PHI
3988   // nodes are currently empty because we did not want to introduce cycles.
3989   // This is the second stage of vectorizing recurrences.
3990   fixCrossIterationPHIs();
3991 
3992   // Forget the original basic block.
3993   PSE.getSE()->forgetLoop(OrigLoop);
3994 
3995   // Fix-up external users of the induction variables.
3996   for (auto &Entry : Legal->getInductionVars())
3997     fixupIVUsers(Entry.first, Entry.second,
3998                  getOrCreateVectorTripCount(LI->getLoopFor(LoopVectorBody)),
3999                  IVEndValues[Entry.first], LoopMiddleBlock);
4000 
4001   fixLCSSAPHIs();
4002   for (Instruction *PI : PredicatedInstructions)
4003     sinkScalarOperands(&*PI);
4004 
4005   // Remove redundant induction instructions.
4006   cse(LoopVectorBody);
4007 
4008   // Set/update profile weights for the vector and remainder loops as original
4009   // loop iterations are now distributed among them. Note that original loop
4010   // represented by LoopScalarBody becomes remainder loop after vectorization.
4011   //
4012   // For cases like foldTailByMasking() and requiresScalarEpiloque() we may
4013   // end up getting slightly roughened result but that should be OK since
4014   // profile is not inherently precise anyway. Note also possible bypass of
4015   // vector code caused by legality checks is ignored, assigning all the weight
4016   // to the vector loop, optimistically.
4017   //
4018   // For scalable vectorization we can't know at compile time how many iterations
4019   // of the loop are handled in one vector iteration, so instead assume a pessimistic
4020   // vscale of '1'.
4021   setProfileInfoAfterUnrolling(
4022       LI->getLoopFor(LoopScalarBody), LI->getLoopFor(LoopVectorBody),
4023       LI->getLoopFor(LoopScalarBody), VF.getKnownMinValue() * UF);
4024 }
4025 
4026 void InnerLoopVectorizer::fixCrossIterationPHIs() {
4027   // In order to support recurrences we need to be able to vectorize Phi nodes.
4028   // Phi nodes have cycles, so we need to vectorize them in two stages. This is
4029   // stage #2: We now need to fix the recurrences by adding incoming edges to
4030   // the currently empty PHI nodes. At this point every instruction in the
4031   // original loop is widened to a vector form so we can use them to construct
4032   // the incoming edges.
4033   for (PHINode &Phi : OrigLoop->getHeader()->phis()) {
4034     // Handle first-order recurrences and reductions that need to be fixed.
4035     if (Legal->isFirstOrderRecurrence(&Phi))
4036       fixFirstOrderRecurrence(&Phi);
4037     else if (Legal->isReductionVariable(&Phi))
4038       fixReduction(&Phi);
4039   }
4040 }
4041 
4042 void InnerLoopVectorizer::fixFirstOrderRecurrence(PHINode *Phi) {
4043   // This is the second phase of vectorizing first-order recurrences. An
4044   // overview of the transformation is described below. Suppose we have the
4045   // following loop.
4046   //
4047   //   for (int i = 0; i < n; ++i)
4048   //     b[i] = a[i] - a[i - 1];
4049   //
4050   // There is a first-order recurrence on "a". For this loop, the shorthand
4051   // scalar IR looks like:
4052   //
4053   //   scalar.ph:
4054   //     s_init = a[-1]
4055   //     br scalar.body
4056   //
4057   //   scalar.body:
4058   //     i = phi [0, scalar.ph], [i+1, scalar.body]
4059   //     s1 = phi [s_init, scalar.ph], [s2, scalar.body]
4060   //     s2 = a[i]
4061   //     b[i] = s2 - s1
4062   //     br cond, scalar.body, ...
4063   //
4064   // In this example, s1 is a recurrence because it's value depends on the
4065   // previous iteration. In the first phase of vectorization, we created a
4066   // temporary value for s1. We now complete the vectorization and produce the
4067   // shorthand vector IR shown below (for VF = 4, UF = 1).
4068   //
4069   //   vector.ph:
4070   //     v_init = vector(..., ..., ..., a[-1])
4071   //     br vector.body
4072   //
4073   //   vector.body
4074   //     i = phi [0, vector.ph], [i+4, vector.body]
4075   //     v1 = phi [v_init, vector.ph], [v2, vector.body]
4076   //     v2 = a[i, i+1, i+2, i+3];
4077   //     v3 = vector(v1(3), v2(0, 1, 2))
4078   //     b[i, i+1, i+2, i+3] = v2 - v3
4079   //     br cond, vector.body, middle.block
4080   //
4081   //   middle.block:
4082   //     x = v2(3)
4083   //     br scalar.ph
4084   //
4085   //   scalar.ph:
4086   //     s_init = phi [x, middle.block], [a[-1], otherwise]
4087   //     br scalar.body
4088   //
4089   // After execution completes the vector loop, we extract the next value of
4090   // the recurrence (x) to use as the initial value in the scalar loop.
4091 
4092   // Get the original loop preheader and single loop latch.
4093   auto *Preheader = OrigLoop->getLoopPreheader();
4094   auto *Latch = OrigLoop->getLoopLatch();
4095 
4096   // Get the initial and previous values of the scalar recurrence.
4097   auto *ScalarInit = Phi->getIncomingValueForBlock(Preheader);
4098   auto *Previous = Phi->getIncomingValueForBlock(Latch);
4099 
4100   // Create a vector from the initial value.
4101   auto *VectorInit = ScalarInit;
4102   if (VF.isVector()) {
4103     Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
4104     assert(!VF.isScalable() && "VF is assumed to be non scalable.");
4105     VectorInit = Builder.CreateInsertElement(
4106         PoisonValue::get(VectorType::get(VectorInit->getType(), VF)), VectorInit,
4107         Builder.getInt32(VF.getKnownMinValue() - 1), "vector.recur.init");
4108   }
4109 
4110   // We constructed a temporary phi node in the first phase of vectorization.
4111   // This phi node will eventually be deleted.
4112   Builder.SetInsertPoint(
4113       cast<Instruction>(VectorLoopValueMap.getVectorValue(Phi, 0)));
4114 
4115   // Create a phi node for the new recurrence. The current value will either be
4116   // the initial value inserted into a vector or loop-varying vector value.
4117   auto *VecPhi = Builder.CreatePHI(VectorInit->getType(), 2, "vector.recur");
4118   VecPhi->addIncoming(VectorInit, LoopVectorPreHeader);
4119 
4120   // Get the vectorized previous value of the last part UF - 1. It appears last
4121   // among all unrolled iterations, due to the order of their construction.
4122   Value *PreviousLastPart = getOrCreateVectorValue(Previous, UF - 1);
4123 
4124   // Find and set the insertion point after the previous value if it is an
4125   // instruction.
4126   BasicBlock::iterator InsertPt;
4127   // Note that the previous value may have been constant-folded so it is not
4128   // guaranteed to be an instruction in the vector loop.
4129   // FIXME: Loop invariant values do not form recurrences. We should deal with
4130   //        them earlier.
4131   if (LI->getLoopFor(LoopVectorBody)->isLoopInvariant(PreviousLastPart))
4132     InsertPt = LoopVectorBody->getFirstInsertionPt();
4133   else {
4134     Instruction *PreviousInst = cast<Instruction>(PreviousLastPart);
4135     if (isa<PHINode>(PreviousLastPart))
4136       // If the previous value is a phi node, we should insert after all the phi
4137       // nodes in the block containing the PHI to avoid breaking basic block
4138       // verification. Note that the basic block may be different to
4139       // LoopVectorBody, in case we predicate the loop.
4140       InsertPt = PreviousInst->getParent()->getFirstInsertionPt();
4141     else
4142       InsertPt = ++PreviousInst->getIterator();
4143   }
4144   Builder.SetInsertPoint(&*InsertPt);
4145 
4146   // We will construct a vector for the recurrence by combining the values for
4147   // the current and previous iterations. This is the required shuffle mask.
4148   assert(!VF.isScalable());
4149   SmallVector<int, 8> ShuffleMask(VF.getKnownMinValue());
4150   ShuffleMask[0] = VF.getKnownMinValue() - 1;
4151   for (unsigned I = 1; I < VF.getKnownMinValue(); ++I)
4152     ShuffleMask[I] = I + VF.getKnownMinValue() - 1;
4153 
4154   // The vector from which to take the initial value for the current iteration
4155   // (actual or unrolled). Initially, this is the vector phi node.
4156   Value *Incoming = VecPhi;
4157 
4158   // Shuffle the current and previous vector and update the vector parts.
4159   for (unsigned Part = 0; Part < UF; ++Part) {
4160     Value *PreviousPart = getOrCreateVectorValue(Previous, Part);
4161     Value *PhiPart = VectorLoopValueMap.getVectorValue(Phi, Part);
4162     auto *Shuffle =
4163         VF.isVector()
4164             ? Builder.CreateShuffleVector(Incoming, PreviousPart, ShuffleMask)
4165             : Incoming;
4166     PhiPart->replaceAllUsesWith(Shuffle);
4167     cast<Instruction>(PhiPart)->eraseFromParent();
4168     VectorLoopValueMap.resetVectorValue(Phi, Part, Shuffle);
4169     Incoming = PreviousPart;
4170   }
4171 
4172   // Fix the latch value of the new recurrence in the vector loop.
4173   VecPhi->addIncoming(Incoming, LI->getLoopFor(LoopVectorBody)->getLoopLatch());
4174 
4175   // Extract the last vector element in the middle block. This will be the
4176   // initial value for the recurrence when jumping to the scalar loop.
4177   auto *ExtractForScalar = Incoming;
4178   if (VF.isVector()) {
4179     Builder.SetInsertPoint(LoopMiddleBlock->getTerminator());
4180     ExtractForScalar = Builder.CreateExtractElement(
4181         ExtractForScalar, Builder.getInt32(VF.getKnownMinValue() - 1),
4182         "vector.recur.extract");
4183   }
4184   // Extract the second last element in the middle block if the
4185   // Phi is used outside the loop. We need to extract the phi itself
4186   // and not the last element (the phi update in the current iteration). This
4187   // will be the value when jumping to the exit block from the LoopMiddleBlock,
4188   // when the scalar loop is not run at all.
4189   Value *ExtractForPhiUsedOutsideLoop = nullptr;
4190   if (VF.isVector())
4191     ExtractForPhiUsedOutsideLoop = Builder.CreateExtractElement(
4192         Incoming, Builder.getInt32(VF.getKnownMinValue() - 2),
4193         "vector.recur.extract.for.phi");
4194   // When loop is unrolled without vectorizing, initialize
4195   // ExtractForPhiUsedOutsideLoop with the value just prior to unrolled value of
4196   // `Incoming`. This is analogous to the vectorized case above: extracting the
4197   // second last element when VF > 1.
4198   else if (UF > 1)
4199     ExtractForPhiUsedOutsideLoop = getOrCreateVectorValue(Previous, UF - 2);
4200 
4201   // Fix the initial value of the original recurrence in the scalar loop.
4202   Builder.SetInsertPoint(&*LoopScalarPreHeader->begin());
4203   auto *Start = Builder.CreatePHI(Phi->getType(), 2, "scalar.recur.init");
4204   for (auto *BB : predecessors(LoopScalarPreHeader)) {
4205     auto *Incoming = BB == LoopMiddleBlock ? ExtractForScalar : ScalarInit;
4206     Start->addIncoming(Incoming, BB);
4207   }
4208 
4209   Phi->setIncomingValueForBlock(LoopScalarPreHeader, Start);
4210   Phi->setName("scalar.recur");
4211 
4212   // Finally, fix users of the recurrence outside the loop. The users will need
4213   // either the last value of the scalar recurrence or the last value of the
4214   // vector recurrence we extracted in the middle block. Since the loop is in
4215   // LCSSA form, we just need to find all the phi nodes for the original scalar
4216   // recurrence in the exit block, and then add an edge for the middle block.
4217   // Note that LCSSA does not imply single entry when the original scalar loop
4218   // had multiple exiting edges (as we always run the last iteration in the
4219   // scalar epilogue); in that case, the exiting path through middle will be
4220   // dynamically dead and the value picked for the phi doesn't matter.
4221   for (PHINode &LCSSAPhi : LoopExitBlock->phis())
4222     if (any_of(LCSSAPhi.incoming_values(),
4223                [Phi](Value *V) { return V == Phi; }))
4224       LCSSAPhi.addIncoming(ExtractForPhiUsedOutsideLoop, LoopMiddleBlock);
4225 }
4226 
4227 void InnerLoopVectorizer::fixReduction(PHINode *Phi) {
4228   // Get it's reduction variable descriptor.
4229   assert(Legal->isReductionVariable(Phi) &&
4230          "Unable to find the reduction variable");
4231   RecurrenceDescriptor RdxDesc = Legal->getReductionVars()[Phi];
4232 
4233   RecurKind RK = RdxDesc.getRecurrenceKind();
4234   TrackingVH<Value> ReductionStartValue = RdxDesc.getRecurrenceStartValue();
4235   Instruction *LoopExitInst = RdxDesc.getLoopExitInstr();
4236   setDebugLocFromInst(Builder, ReductionStartValue);
4237   bool IsInLoopReductionPhi = Cost->isInLoopReduction(Phi);
4238 
4239   // This is the vector-clone of the value that leaves the loop.
4240   Type *VecTy = getOrCreateVectorValue(LoopExitInst, 0)->getType();
4241 
4242   // Wrap flags are in general invalid after vectorization, clear them.
4243   clearReductionWrapFlags(RdxDesc);
4244 
4245   // Fix the vector-loop phi.
4246 
4247   // Reductions do not have to start at zero. They can start with
4248   // any loop invariant values.
4249   BasicBlock *Latch = OrigLoop->getLoopLatch();
4250   Value *LoopVal = Phi->getIncomingValueForBlock(Latch);
4251 
4252   for (unsigned Part = 0; Part < UF; ++Part) {
4253     Value *VecRdxPhi = getOrCreateVectorValue(Phi, Part);
4254     Value *Val = getOrCreateVectorValue(LoopVal, Part);
4255     cast<PHINode>(VecRdxPhi)
4256       ->addIncoming(Val, LI->getLoopFor(LoopVectorBody)->getLoopLatch());
4257   }
4258 
4259   // Before each round, move the insertion point right between
4260   // the PHIs and the values we are going to write.
4261   // This allows us to write both PHINodes and the extractelement
4262   // instructions.
4263   Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt());
4264 
4265   setDebugLocFromInst(Builder, LoopExitInst);
4266 
4267   // If tail is folded by masking, the vector value to leave the loop should be
4268   // a Select choosing between the vectorized LoopExitInst and vectorized Phi,
4269   // instead of the former. For an inloop reduction the reduction will already
4270   // be predicated, and does not need to be handled here.
4271   if (Cost->foldTailByMasking() && !IsInLoopReductionPhi) {
4272     for (unsigned Part = 0; Part < UF; ++Part) {
4273       Value *VecLoopExitInst =
4274           VectorLoopValueMap.getVectorValue(LoopExitInst, Part);
4275       Value *Sel = nullptr;
4276       for (User *U : VecLoopExitInst->users()) {
4277         if (isa<SelectInst>(U)) {
4278           assert(!Sel && "Reduction exit feeding two selects");
4279           Sel = U;
4280         } else
4281           assert(isa<PHINode>(U) && "Reduction exit must feed Phi's or select");
4282       }
4283       assert(Sel && "Reduction exit feeds no select");
4284       VectorLoopValueMap.resetVectorValue(LoopExitInst, Part, Sel);
4285 
4286       // If the target can create a predicated operator for the reduction at no
4287       // extra cost in the loop (for example a predicated vadd), it can be
4288       // cheaper for the select to remain in the loop than be sunk out of it,
4289       // and so use the select value for the phi instead of the old
4290       // LoopExitValue.
4291       RecurrenceDescriptor RdxDesc = Legal->getReductionVars()[Phi];
4292       if (PreferPredicatedReductionSelect ||
4293           TTI->preferPredicatedReductionSelect(
4294               RdxDesc.getOpcode(), Phi->getType(),
4295               TargetTransformInfo::ReductionFlags())) {
4296         auto *VecRdxPhi = cast<PHINode>(getOrCreateVectorValue(Phi, Part));
4297         VecRdxPhi->setIncomingValueForBlock(
4298             LI->getLoopFor(LoopVectorBody)->getLoopLatch(), Sel);
4299       }
4300     }
4301   }
4302 
4303   // If the vector reduction can be performed in a smaller type, we truncate
4304   // then extend the loop exit value to enable InstCombine to evaluate the
4305   // entire expression in the smaller type.
4306   if (VF.isVector() && Phi->getType() != RdxDesc.getRecurrenceType()) {
4307     assert(!IsInLoopReductionPhi && "Unexpected truncated inloop reduction!");
4308     assert(!VF.isScalable() && "scalable vectors not yet supported.");
4309     Type *RdxVecTy = VectorType::get(RdxDesc.getRecurrenceType(), VF);
4310     Builder.SetInsertPoint(
4311         LI->getLoopFor(LoopVectorBody)->getLoopLatch()->getTerminator());
4312     VectorParts RdxParts(UF);
4313     for (unsigned Part = 0; Part < UF; ++Part) {
4314       RdxParts[Part] = VectorLoopValueMap.getVectorValue(LoopExitInst, Part);
4315       Value *Trunc = Builder.CreateTrunc(RdxParts[Part], RdxVecTy);
4316       Value *Extnd = RdxDesc.isSigned() ? Builder.CreateSExt(Trunc, VecTy)
4317                                         : Builder.CreateZExt(Trunc, VecTy);
4318       for (Value::user_iterator UI = RdxParts[Part]->user_begin();
4319            UI != RdxParts[Part]->user_end();)
4320         if (*UI != Trunc) {
4321           (*UI++)->replaceUsesOfWith(RdxParts[Part], Extnd);
4322           RdxParts[Part] = Extnd;
4323         } else {
4324           ++UI;
4325         }
4326     }
4327     Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt());
4328     for (unsigned Part = 0; Part < UF; ++Part) {
4329       RdxParts[Part] = Builder.CreateTrunc(RdxParts[Part], RdxVecTy);
4330       VectorLoopValueMap.resetVectorValue(LoopExitInst, Part, RdxParts[Part]);
4331     }
4332   }
4333 
4334   // Reduce all of the unrolled parts into a single vector.
4335   Value *ReducedPartRdx = VectorLoopValueMap.getVectorValue(LoopExitInst, 0);
4336   unsigned Op = RecurrenceDescriptor::getOpcode(RK);
4337 
4338   // The middle block terminator has already been assigned a DebugLoc here (the
4339   // OrigLoop's single latch terminator). We want the whole middle block to
4340   // appear to execute on this line because: (a) it is all compiler generated,
4341   // (b) these instructions are always executed after evaluating the latch
4342   // conditional branch, and (c) other passes may add new predecessors which
4343   // terminate on this line. This is the easiest way to ensure we don't
4344   // accidentally cause an extra step back into the loop while debugging.
4345   setDebugLocFromInst(Builder, LoopMiddleBlock->getTerminator());
4346   {
4347     // Floating-point operations should have some FMF to enable the reduction.
4348     IRBuilderBase::FastMathFlagGuard FMFG(Builder);
4349     Builder.setFastMathFlags(RdxDesc.getFastMathFlags());
4350     for (unsigned Part = 1; Part < UF; ++Part) {
4351       Value *RdxPart = VectorLoopValueMap.getVectorValue(LoopExitInst, Part);
4352       if (Op != Instruction::ICmp && Op != Instruction::FCmp) {
4353         ReducedPartRdx = Builder.CreateBinOp(
4354             (Instruction::BinaryOps)Op, RdxPart, ReducedPartRdx, "bin.rdx");
4355       } else {
4356         ReducedPartRdx = createMinMaxOp(Builder, RK, ReducedPartRdx, RdxPart);
4357       }
4358     }
4359   }
4360 
4361   // Create the reduction after the loop. Note that inloop reductions create the
4362   // target reduction in the loop using a Reduction recipe.
4363   if (VF.isVector() && !IsInLoopReductionPhi) {
4364     ReducedPartRdx =
4365         createTargetReduction(Builder, TTI, RdxDesc, ReducedPartRdx);
4366     // If the reduction can be performed in a smaller type, we need to extend
4367     // the reduction to the wider type before we branch to the original loop.
4368     if (Phi->getType() != RdxDesc.getRecurrenceType())
4369       ReducedPartRdx =
4370         RdxDesc.isSigned()
4371         ? Builder.CreateSExt(ReducedPartRdx, Phi->getType())
4372         : Builder.CreateZExt(ReducedPartRdx, Phi->getType());
4373   }
4374 
4375   // Create a phi node that merges control-flow from the backedge-taken check
4376   // block and the middle block.
4377   PHINode *BCBlockPhi = PHINode::Create(Phi->getType(), 2, "bc.merge.rdx",
4378                                         LoopScalarPreHeader->getTerminator());
4379   for (unsigned I = 0, E = LoopBypassBlocks.size(); I != E; ++I)
4380     BCBlockPhi->addIncoming(ReductionStartValue, LoopBypassBlocks[I]);
4381   BCBlockPhi->addIncoming(ReducedPartRdx, LoopMiddleBlock);
4382 
4383   // Now, we need to fix the users of the reduction variable
4384   // inside and outside of the scalar remainder loop.
4385 
4386   // We know that the loop is in LCSSA form. We need to update the PHI nodes
4387   // in the exit blocks.  See comment on analogous loop in
4388   // fixFirstOrderRecurrence for a more complete explaination of the logic.
4389   for (PHINode &LCSSAPhi : LoopExitBlock->phis())
4390     if (any_of(LCSSAPhi.incoming_values(),
4391                [LoopExitInst](Value *V) { return V == LoopExitInst; }))
4392       LCSSAPhi.addIncoming(ReducedPartRdx, LoopMiddleBlock);
4393 
4394   // Fix the scalar loop reduction variable with the incoming reduction sum
4395   // from the vector body and from the backedge value.
4396   int IncomingEdgeBlockIdx =
4397     Phi->getBasicBlockIndex(OrigLoop->getLoopLatch());
4398   assert(IncomingEdgeBlockIdx >= 0 && "Invalid block index");
4399   // Pick the other block.
4400   int SelfEdgeBlockIdx = (IncomingEdgeBlockIdx ? 0 : 1);
4401   Phi->setIncomingValue(SelfEdgeBlockIdx, BCBlockPhi);
4402   Phi->setIncomingValue(IncomingEdgeBlockIdx, LoopExitInst);
4403 }
4404 
4405 void InnerLoopVectorizer::clearReductionWrapFlags(
4406     RecurrenceDescriptor &RdxDesc) {
4407   RecurKind RK = RdxDesc.getRecurrenceKind();
4408   if (RK != RecurKind::Add && RK != RecurKind::Mul)
4409     return;
4410 
4411   Instruction *LoopExitInstr = RdxDesc.getLoopExitInstr();
4412   assert(LoopExitInstr && "null loop exit instruction");
4413   SmallVector<Instruction *, 8> Worklist;
4414   SmallPtrSet<Instruction *, 8> Visited;
4415   Worklist.push_back(LoopExitInstr);
4416   Visited.insert(LoopExitInstr);
4417 
4418   while (!Worklist.empty()) {
4419     Instruction *Cur = Worklist.pop_back_val();
4420     if (isa<OverflowingBinaryOperator>(Cur))
4421       for (unsigned Part = 0; Part < UF; ++Part) {
4422         Value *V = getOrCreateVectorValue(Cur, Part);
4423         cast<Instruction>(V)->dropPoisonGeneratingFlags();
4424       }
4425 
4426     for (User *U : Cur->users()) {
4427       Instruction *UI = cast<Instruction>(U);
4428       if ((Cur != LoopExitInstr || OrigLoop->contains(UI->getParent())) &&
4429           Visited.insert(UI).second)
4430         Worklist.push_back(UI);
4431     }
4432   }
4433 }
4434 
4435 void InnerLoopVectorizer::fixLCSSAPHIs() {
4436   for (PHINode &LCSSAPhi : LoopExitBlock->phis()) {
4437     if (LCSSAPhi.getBasicBlockIndex(LoopMiddleBlock) != -1)
4438       // Some phis were already hand updated by the reduction and recurrence
4439       // code above, leave them alone.
4440       continue;
4441 
4442     auto *IncomingValue = LCSSAPhi.getIncomingValue(0);
4443     // Non-instruction incoming values will have only one value.
4444     unsigned LastLane = 0;
4445     if (isa<Instruction>(IncomingValue))
4446       LastLane = Cost->isUniformAfterVectorization(
4447                      cast<Instruction>(IncomingValue), VF)
4448                      ? 0
4449                      : VF.getKnownMinValue() - 1;
4450     assert((!VF.isScalable() || LastLane == 0) &&
4451            "scalable vectors dont support non-uniform scalars yet");
4452     // Can be a loop invariant incoming value or the last scalar value to be
4453     // extracted from the vectorized loop.
4454     Builder.SetInsertPoint(LoopMiddleBlock->getTerminator());
4455     Value *lastIncomingValue =
4456         getOrCreateScalarValue(IncomingValue, VPIteration(UF - 1, LastLane));
4457     LCSSAPhi.addIncoming(lastIncomingValue, LoopMiddleBlock);
4458   }
4459 }
4460 
4461 void InnerLoopVectorizer::sinkScalarOperands(Instruction *PredInst) {
4462   // The basic block and loop containing the predicated instruction.
4463   auto *PredBB = PredInst->getParent();
4464   auto *VectorLoop = LI->getLoopFor(PredBB);
4465 
4466   // Initialize a worklist with the operands of the predicated instruction.
4467   SetVector<Value *> Worklist(PredInst->op_begin(), PredInst->op_end());
4468 
4469   // Holds instructions that we need to analyze again. An instruction may be
4470   // reanalyzed if we don't yet know if we can sink it or not.
4471   SmallVector<Instruction *, 8> InstsToReanalyze;
4472 
4473   // Returns true if a given use occurs in the predicated block. Phi nodes use
4474   // their operands in their corresponding predecessor blocks.
4475   auto isBlockOfUsePredicated = [&](Use &U) -> bool {
4476     auto *I = cast<Instruction>(U.getUser());
4477     BasicBlock *BB = I->getParent();
4478     if (auto *Phi = dyn_cast<PHINode>(I))
4479       BB = Phi->getIncomingBlock(
4480           PHINode::getIncomingValueNumForOperand(U.getOperandNo()));
4481     return BB == PredBB;
4482   };
4483 
4484   // Iteratively sink the scalarized operands of the predicated instruction
4485   // into the block we created for it. When an instruction is sunk, it's
4486   // operands are then added to the worklist. The algorithm ends after one pass
4487   // through the worklist doesn't sink a single instruction.
4488   bool Changed;
4489   do {
4490     // Add the instructions that need to be reanalyzed to the worklist, and
4491     // reset the changed indicator.
4492     Worklist.insert(InstsToReanalyze.begin(), InstsToReanalyze.end());
4493     InstsToReanalyze.clear();
4494     Changed = false;
4495 
4496     while (!Worklist.empty()) {
4497       auto *I = dyn_cast<Instruction>(Worklist.pop_back_val());
4498 
4499       // We can't sink an instruction if it is a phi node, is already in the
4500       // predicated block, is not in the loop, or may have side effects.
4501       if (!I || isa<PHINode>(I) || I->getParent() == PredBB ||
4502           !VectorLoop->contains(I) || I->mayHaveSideEffects())
4503         continue;
4504 
4505       // It's legal to sink the instruction if all its uses occur in the
4506       // predicated block. Otherwise, there's nothing to do yet, and we may
4507       // need to reanalyze the instruction.
4508       if (!llvm::all_of(I->uses(), isBlockOfUsePredicated)) {
4509         InstsToReanalyze.push_back(I);
4510         continue;
4511       }
4512 
4513       // Move the instruction to the beginning of the predicated block, and add
4514       // it's operands to the worklist.
4515       I->moveBefore(&*PredBB->getFirstInsertionPt());
4516       Worklist.insert(I->op_begin(), I->op_end());
4517 
4518       // The sinking may have enabled other instructions to be sunk, so we will
4519       // need to iterate.
4520       Changed = true;
4521     }
4522   } while (Changed);
4523 }
4524 
4525 void InnerLoopVectorizer::fixNonInductionPHIs() {
4526   for (PHINode *OrigPhi : OrigPHIsToFix) {
4527     PHINode *NewPhi =
4528         cast<PHINode>(VectorLoopValueMap.getVectorValue(OrigPhi, 0));
4529     unsigned NumIncomingValues = OrigPhi->getNumIncomingValues();
4530 
4531     SmallVector<BasicBlock *, 2> ScalarBBPredecessors(
4532         predecessors(OrigPhi->getParent()));
4533     SmallVector<BasicBlock *, 2> VectorBBPredecessors(
4534         predecessors(NewPhi->getParent()));
4535     assert(ScalarBBPredecessors.size() == VectorBBPredecessors.size() &&
4536            "Scalar and Vector BB should have the same number of predecessors");
4537 
4538     // The insertion point in Builder may be invalidated by the time we get
4539     // here. Force the Builder insertion point to something valid so that we do
4540     // not run into issues during insertion point restore in
4541     // getOrCreateVectorValue calls below.
4542     Builder.SetInsertPoint(NewPhi);
4543 
4544     // The predecessor order is preserved and we can rely on mapping between
4545     // scalar and vector block predecessors.
4546     for (unsigned i = 0; i < NumIncomingValues; ++i) {
4547       BasicBlock *NewPredBB = VectorBBPredecessors[i];
4548 
4549       // When looking up the new scalar/vector values to fix up, use incoming
4550       // values from original phi.
4551       Value *ScIncV =
4552           OrigPhi->getIncomingValueForBlock(ScalarBBPredecessors[i]);
4553 
4554       // Scalar incoming value may need a broadcast
4555       Value *NewIncV = getOrCreateVectorValue(ScIncV, 0);
4556       NewPhi->addIncoming(NewIncV, NewPredBB);
4557     }
4558   }
4559 }
4560 
4561 void InnerLoopVectorizer::widenGEP(GetElementPtrInst *GEP, VPValue *VPDef,
4562                                    VPUser &Operands, unsigned UF,
4563                                    ElementCount VF, bool IsPtrLoopInvariant,
4564                                    SmallBitVector &IsIndexLoopInvariant,
4565                                    VPTransformState &State) {
4566   // Construct a vector GEP by widening the operands of the scalar GEP as
4567   // necessary. We mark the vector GEP 'inbounds' if appropriate. A GEP
4568   // results in a vector of pointers when at least one operand of the GEP
4569   // is vector-typed. Thus, to keep the representation compact, we only use
4570   // vector-typed operands for loop-varying values.
4571 
4572   if (VF.isVector() && IsPtrLoopInvariant && IsIndexLoopInvariant.all()) {
4573     // If we are vectorizing, but the GEP has only loop-invariant operands,
4574     // the GEP we build (by only using vector-typed operands for
4575     // loop-varying values) would be a scalar pointer. Thus, to ensure we
4576     // produce a vector of pointers, we need to either arbitrarily pick an
4577     // operand to broadcast, or broadcast a clone of the original GEP.
4578     // Here, we broadcast a clone of the original.
4579     //
4580     // TODO: If at some point we decide to scalarize instructions having
4581     //       loop-invariant operands, this special case will no longer be
4582     //       required. We would add the scalarization decision to
4583     //       collectLoopScalars() and teach getVectorValue() to broadcast
4584     //       the lane-zero scalar value.
4585     auto *Clone = Builder.Insert(GEP->clone());
4586     for (unsigned Part = 0; Part < UF; ++Part) {
4587       Value *EntryPart = Builder.CreateVectorSplat(VF, Clone);
4588       State.set(VPDef, GEP, EntryPart, Part);
4589       addMetadata(EntryPart, GEP);
4590     }
4591   } else {
4592     // If the GEP has at least one loop-varying operand, we are sure to
4593     // produce a vector of pointers. But if we are only unrolling, we want
4594     // to produce a scalar GEP for each unroll part. Thus, the GEP we
4595     // produce with the code below will be scalar (if VF == 1) or vector
4596     // (otherwise). Note that for the unroll-only case, we still maintain
4597     // values in the vector mapping with initVector, as we do for other
4598     // instructions.
4599     for (unsigned Part = 0; Part < UF; ++Part) {
4600       // The pointer operand of the new GEP. If it's loop-invariant, we
4601       // won't broadcast it.
4602       auto *Ptr = IsPtrLoopInvariant
4603                       ? State.get(Operands.getOperand(0), VPIteration(0, 0))
4604                       : State.get(Operands.getOperand(0), Part);
4605 
4606       // Collect all the indices for the new GEP. If any index is
4607       // loop-invariant, we won't broadcast it.
4608       SmallVector<Value *, 4> Indices;
4609       for (unsigned I = 1, E = Operands.getNumOperands(); I < E; I++) {
4610         VPValue *Operand = Operands.getOperand(I);
4611         if (IsIndexLoopInvariant[I - 1])
4612           Indices.push_back(State.get(Operand, VPIteration(0, 0)));
4613         else
4614           Indices.push_back(State.get(Operand, Part));
4615       }
4616 
4617       // Create the new GEP. Note that this GEP may be a scalar if VF == 1,
4618       // but it should be a vector, otherwise.
4619       auto *NewGEP =
4620           GEP->isInBounds()
4621               ? Builder.CreateInBoundsGEP(GEP->getSourceElementType(), Ptr,
4622                                           Indices)
4623               : Builder.CreateGEP(GEP->getSourceElementType(), Ptr, Indices);
4624       assert((VF.isScalar() || NewGEP->getType()->isVectorTy()) &&
4625              "NewGEP is not a pointer vector");
4626       State.set(VPDef, GEP, NewGEP, Part);
4627       addMetadata(NewGEP, GEP);
4628     }
4629   }
4630 }
4631 
4632 void InnerLoopVectorizer::widenPHIInstruction(Instruction *PN,
4633                                               RecurrenceDescriptor *RdxDesc,
4634                                               Value *StartV, unsigned UF,
4635                                               ElementCount VF) {
4636   assert(!VF.isScalable() && "scalable vectors not yet supported.");
4637   PHINode *P = cast<PHINode>(PN);
4638   if (EnableVPlanNativePath) {
4639     // Currently we enter here in the VPlan-native path for non-induction
4640     // PHIs where all control flow is uniform. We simply widen these PHIs.
4641     // Create a vector phi with no operands - the vector phi operands will be
4642     // set at the end of vector code generation.
4643     Type *VecTy =
4644         (VF.isScalar()) ? PN->getType() : VectorType::get(PN->getType(), VF);
4645     Value *VecPhi = Builder.CreatePHI(VecTy, PN->getNumOperands(), "vec.phi");
4646     VectorLoopValueMap.setVectorValue(P, 0, VecPhi);
4647     OrigPHIsToFix.push_back(P);
4648 
4649     return;
4650   }
4651 
4652   assert(PN->getParent() == OrigLoop->getHeader() &&
4653          "Non-header phis should have been handled elsewhere");
4654 
4655   // In order to support recurrences we need to be able to vectorize Phi nodes.
4656   // Phi nodes have cycles, so we need to vectorize them in two stages. This is
4657   // stage #1: We create a new vector PHI node with no incoming edges. We'll use
4658   // this value when we vectorize all of the instructions that use the PHI.
4659   if (RdxDesc || Legal->isFirstOrderRecurrence(P)) {
4660     Value *Iden = nullptr;
4661     bool ScalarPHI =
4662         (VF.isScalar()) || Cost->isInLoopReduction(cast<PHINode>(PN));
4663     Type *VecTy =
4664         ScalarPHI ? PN->getType() : VectorType::get(PN->getType(), VF);
4665 
4666     if (RdxDesc) {
4667       assert(Legal->isReductionVariable(P) && StartV &&
4668              "RdxDesc should only be set for reduction variables; in that case "
4669              "a StartV is also required");
4670       RecurKind RK = RdxDesc->getRecurrenceKind();
4671       if (RecurrenceDescriptor::isMinMaxRecurrenceKind(RK)) {
4672         // MinMax reduction have the start value as their identify.
4673         if (ScalarPHI) {
4674           Iden = StartV;
4675         } else {
4676           IRBuilderBase::InsertPointGuard IPBuilder(Builder);
4677           Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
4678           StartV = Iden = Builder.CreateVectorSplat(VF, StartV, "minmax.ident");
4679         }
4680       } else {
4681         Constant *IdenC = RecurrenceDescriptor::getRecurrenceIdentity(
4682             RK, VecTy->getScalarType());
4683         Iden = IdenC;
4684 
4685         if (!ScalarPHI) {
4686           Iden = ConstantVector::getSplat(VF, IdenC);
4687           IRBuilderBase::InsertPointGuard IPBuilder(Builder);
4688           Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
4689           Constant *Zero = Builder.getInt32(0);
4690           StartV = Builder.CreateInsertElement(Iden, StartV, Zero);
4691         }
4692       }
4693     }
4694 
4695     for (unsigned Part = 0; Part < UF; ++Part) {
4696       // This is phase one of vectorizing PHIs.
4697       Value *EntryPart = PHINode::Create(
4698           VecTy, 2, "vec.phi", &*LoopVectorBody->getFirstInsertionPt());
4699       VectorLoopValueMap.setVectorValue(P, Part, EntryPart);
4700       if (StartV) {
4701         // Make sure to add the reduction start value only to the
4702         // first unroll part.
4703         Value *StartVal = (Part == 0) ? StartV : Iden;
4704         cast<PHINode>(EntryPart)->addIncoming(StartVal, LoopVectorPreHeader);
4705       }
4706     }
4707     return;
4708   }
4709 
4710   assert(!Legal->isReductionVariable(P) &&
4711          "reductions should be handled above");
4712 
4713   setDebugLocFromInst(Builder, P);
4714 
4715   // This PHINode must be an induction variable.
4716   // Make sure that we know about it.
4717   assert(Legal->getInductionVars().count(P) && "Not an induction variable");
4718 
4719   InductionDescriptor II = Legal->getInductionVars().lookup(P);
4720   const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout();
4721 
4722   // FIXME: The newly created binary instructions should contain nsw/nuw flags,
4723   // which can be found from the original scalar operations.
4724   switch (II.getKind()) {
4725   case InductionDescriptor::IK_NoInduction:
4726     llvm_unreachable("Unknown induction");
4727   case InductionDescriptor::IK_IntInduction:
4728   case InductionDescriptor::IK_FpInduction:
4729     llvm_unreachable("Integer/fp induction is handled elsewhere.");
4730   case InductionDescriptor::IK_PtrInduction: {
4731     // Handle the pointer induction variable case.
4732     assert(P->getType()->isPointerTy() && "Unexpected type.");
4733 
4734     if (Cost->isScalarAfterVectorization(P, VF)) {
4735       // This is the normalized GEP that starts counting at zero.
4736       Value *PtrInd =
4737           Builder.CreateSExtOrTrunc(Induction, II.getStep()->getType());
4738       // Determine the number of scalars we need to generate for each unroll
4739       // iteration. If the instruction is uniform, we only need to generate the
4740       // first lane. Otherwise, we generate all VF values.
4741       unsigned Lanes =
4742           Cost->isUniformAfterVectorization(P, VF) ? 1 : VF.getKnownMinValue();
4743       for (unsigned Part = 0; Part < UF; ++Part) {
4744         for (unsigned Lane = 0; Lane < Lanes; ++Lane) {
4745           Constant *Idx = ConstantInt::get(PtrInd->getType(),
4746                                            Lane + Part * VF.getKnownMinValue());
4747           Value *GlobalIdx = Builder.CreateAdd(PtrInd, Idx);
4748           Value *SclrGep =
4749               emitTransformedIndex(Builder, GlobalIdx, PSE.getSE(), DL, II);
4750           SclrGep->setName("next.gep");
4751           VectorLoopValueMap.setScalarValue(P, VPIteration(Part, Lane),
4752                                             SclrGep);
4753         }
4754       }
4755       return;
4756     }
4757     assert(isa<SCEVConstant>(II.getStep()) &&
4758            "Induction step not a SCEV constant!");
4759     Type *PhiType = II.getStep()->getType();
4760 
4761     // Build a pointer phi
4762     Value *ScalarStartValue = II.getStartValue();
4763     Type *ScStValueType = ScalarStartValue->getType();
4764     PHINode *NewPointerPhi =
4765         PHINode::Create(ScStValueType, 2, "pointer.phi", Induction);
4766     NewPointerPhi->addIncoming(ScalarStartValue, LoopVectorPreHeader);
4767 
4768     // A pointer induction, performed by using a gep
4769     BasicBlock *LoopLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch();
4770     Instruction *InductionLoc = LoopLatch->getTerminator();
4771     const SCEV *ScalarStep = II.getStep();
4772     SCEVExpander Exp(*PSE.getSE(), DL, "induction");
4773     Value *ScalarStepValue =
4774         Exp.expandCodeFor(ScalarStep, PhiType, InductionLoc);
4775     Value *InductionGEP = GetElementPtrInst::Create(
4776         ScStValueType->getPointerElementType(), NewPointerPhi,
4777         Builder.CreateMul(
4778             ScalarStepValue,
4779             ConstantInt::get(PhiType, VF.getKnownMinValue() * UF)),
4780         "ptr.ind", InductionLoc);
4781     NewPointerPhi->addIncoming(InductionGEP, LoopLatch);
4782 
4783     // Create UF many actual address geps that use the pointer
4784     // phi as base and a vectorized version of the step value
4785     // (<step*0, ..., step*N>) as offset.
4786     for (unsigned Part = 0; Part < UF; ++Part) {
4787       SmallVector<Constant *, 8> Indices;
4788       // Create a vector of consecutive numbers from zero to VF.
4789       for (unsigned i = 0; i < VF.getKnownMinValue(); ++i)
4790         Indices.push_back(
4791             ConstantInt::get(PhiType, i + Part * VF.getKnownMinValue()));
4792       Constant *StartOffset = ConstantVector::get(Indices);
4793 
4794       Value *GEP = Builder.CreateGEP(
4795           ScStValueType->getPointerElementType(), NewPointerPhi,
4796           Builder.CreateMul(
4797               StartOffset,
4798               Builder.CreateVectorSplat(VF.getKnownMinValue(), ScalarStepValue),
4799               "vector.gep"));
4800       VectorLoopValueMap.setVectorValue(P, Part, GEP);
4801     }
4802   }
4803   }
4804 }
4805 
4806 /// A helper function for checking whether an integer division-related
4807 /// instruction may divide by zero (in which case it must be predicated if
4808 /// executed conditionally in the scalar code).
4809 /// TODO: It may be worthwhile to generalize and check isKnownNonZero().
4810 /// Non-zero divisors that are non compile-time constants will not be
4811 /// converted into multiplication, so we will still end up scalarizing
4812 /// the division, but can do so w/o predication.
4813 static bool mayDivideByZero(Instruction &I) {
4814   assert((I.getOpcode() == Instruction::UDiv ||
4815           I.getOpcode() == Instruction::SDiv ||
4816           I.getOpcode() == Instruction::URem ||
4817           I.getOpcode() == Instruction::SRem) &&
4818          "Unexpected instruction");
4819   Value *Divisor = I.getOperand(1);
4820   auto *CInt = dyn_cast<ConstantInt>(Divisor);
4821   return !CInt || CInt->isZero();
4822 }
4823 
4824 void InnerLoopVectorizer::widenInstruction(Instruction &I, VPValue *Def,
4825                                            VPUser &User,
4826                                            VPTransformState &State) {
4827   switch (I.getOpcode()) {
4828   case Instruction::Call:
4829   case Instruction::Br:
4830   case Instruction::PHI:
4831   case Instruction::GetElementPtr:
4832   case Instruction::Select:
4833     llvm_unreachable("This instruction is handled by a different recipe.");
4834   case Instruction::UDiv:
4835   case Instruction::SDiv:
4836   case Instruction::SRem:
4837   case Instruction::URem:
4838   case Instruction::Add:
4839   case Instruction::FAdd:
4840   case Instruction::Sub:
4841   case Instruction::FSub:
4842   case Instruction::FNeg:
4843   case Instruction::Mul:
4844   case Instruction::FMul:
4845   case Instruction::FDiv:
4846   case Instruction::FRem:
4847   case Instruction::Shl:
4848   case Instruction::LShr:
4849   case Instruction::AShr:
4850   case Instruction::And:
4851   case Instruction::Or:
4852   case Instruction::Xor: {
4853     // Just widen unops and binops.
4854     setDebugLocFromInst(Builder, &I);
4855 
4856     for (unsigned Part = 0; Part < UF; ++Part) {
4857       SmallVector<Value *, 2> Ops;
4858       for (VPValue *VPOp : User.operands())
4859         Ops.push_back(State.get(VPOp, Part));
4860 
4861       Value *V = Builder.CreateNAryOp(I.getOpcode(), Ops);
4862 
4863       if (auto *VecOp = dyn_cast<Instruction>(V))
4864         VecOp->copyIRFlags(&I);
4865 
4866       // Use this vector value for all users of the original instruction.
4867       State.set(Def, &I, V, Part);
4868       addMetadata(V, &I);
4869     }
4870 
4871     break;
4872   }
4873   case Instruction::ICmp:
4874   case Instruction::FCmp: {
4875     // Widen compares. Generate vector compares.
4876     bool FCmp = (I.getOpcode() == Instruction::FCmp);
4877     auto *Cmp = cast<CmpInst>(&I);
4878     setDebugLocFromInst(Builder, Cmp);
4879     for (unsigned Part = 0; Part < UF; ++Part) {
4880       Value *A = State.get(User.getOperand(0), Part);
4881       Value *B = State.get(User.getOperand(1), Part);
4882       Value *C = nullptr;
4883       if (FCmp) {
4884         // Propagate fast math flags.
4885         IRBuilder<>::FastMathFlagGuard FMFG(Builder);
4886         Builder.setFastMathFlags(Cmp->getFastMathFlags());
4887         C = Builder.CreateFCmp(Cmp->getPredicate(), A, B);
4888       } else {
4889         C = Builder.CreateICmp(Cmp->getPredicate(), A, B);
4890       }
4891       State.set(Def, &I, C, Part);
4892       addMetadata(C, &I);
4893     }
4894 
4895     break;
4896   }
4897 
4898   case Instruction::ZExt:
4899   case Instruction::SExt:
4900   case Instruction::FPToUI:
4901   case Instruction::FPToSI:
4902   case Instruction::FPExt:
4903   case Instruction::PtrToInt:
4904   case Instruction::IntToPtr:
4905   case Instruction::SIToFP:
4906   case Instruction::UIToFP:
4907   case Instruction::Trunc:
4908   case Instruction::FPTrunc:
4909   case Instruction::BitCast: {
4910     auto *CI = cast<CastInst>(&I);
4911     setDebugLocFromInst(Builder, CI);
4912 
4913     /// Vectorize casts.
4914     Type *DestTy =
4915         (VF.isScalar()) ? CI->getType() : VectorType::get(CI->getType(), VF);
4916 
4917     for (unsigned Part = 0; Part < UF; ++Part) {
4918       Value *A = State.get(User.getOperand(0), Part);
4919       Value *Cast = Builder.CreateCast(CI->getOpcode(), A, DestTy);
4920       State.set(Def, &I, Cast, Part);
4921       addMetadata(Cast, &I);
4922     }
4923     break;
4924   }
4925   default:
4926     // This instruction is not vectorized by simple widening.
4927     LLVM_DEBUG(dbgs() << "LV: Found an unhandled instruction: " << I);
4928     llvm_unreachable("Unhandled instruction!");
4929   } // end of switch.
4930 }
4931 
4932 void InnerLoopVectorizer::widenCallInstruction(CallInst &I, VPValue *Def,
4933                                                VPUser &ArgOperands,
4934                                                VPTransformState &State) {
4935   assert(!isa<DbgInfoIntrinsic>(I) &&
4936          "DbgInfoIntrinsic should have been dropped during VPlan construction");
4937   setDebugLocFromInst(Builder, &I);
4938 
4939   Module *M = I.getParent()->getParent()->getParent();
4940   auto *CI = cast<CallInst>(&I);
4941 
4942   SmallVector<Type *, 4> Tys;
4943   for (Value *ArgOperand : CI->arg_operands())
4944     Tys.push_back(ToVectorTy(ArgOperand->getType(), VF.getKnownMinValue()));
4945 
4946   Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
4947 
4948   // The flag shows whether we use Intrinsic or a usual Call for vectorized
4949   // version of the instruction.
4950   // Is it beneficial to perform intrinsic call compared to lib call?
4951   bool NeedToScalarize = false;
4952   InstructionCost CallCost = Cost->getVectorCallCost(CI, VF, NeedToScalarize);
4953   InstructionCost IntrinsicCost = ID ? Cost->getVectorIntrinsicCost(CI, VF) : 0;
4954   bool UseVectorIntrinsic = ID && IntrinsicCost <= CallCost;
4955   assert((UseVectorIntrinsic || !NeedToScalarize) &&
4956          "Instruction should be scalarized elsewhere.");
4957   assert(IntrinsicCost.isValid() && CallCost.isValid() &&
4958          "Cannot have invalid costs while widening");
4959 
4960   for (unsigned Part = 0; Part < UF; ++Part) {
4961     SmallVector<Value *, 4> Args;
4962     for (auto &I : enumerate(ArgOperands.operands())) {
4963       // Some intrinsics have a scalar argument - don't replace it with a
4964       // vector.
4965       Value *Arg;
4966       if (!UseVectorIntrinsic || !hasVectorInstrinsicScalarOpd(ID, I.index()))
4967         Arg = State.get(I.value(), Part);
4968       else
4969         Arg = State.get(I.value(), VPIteration(0, 0));
4970       Args.push_back(Arg);
4971     }
4972 
4973     Function *VectorF;
4974     if (UseVectorIntrinsic) {
4975       // Use vector version of the intrinsic.
4976       Type *TysForDecl[] = {CI->getType()};
4977       if (VF.isVector()) {
4978         assert(!VF.isScalable() && "VF is assumed to be non scalable.");
4979         TysForDecl[0] = VectorType::get(CI->getType()->getScalarType(), VF);
4980       }
4981       VectorF = Intrinsic::getDeclaration(M, ID, TysForDecl);
4982       assert(VectorF && "Can't retrieve vector intrinsic.");
4983     } else {
4984       // Use vector version of the function call.
4985       const VFShape Shape = VFShape::get(*CI, VF, false /*HasGlobalPred*/);
4986 #ifndef NDEBUG
4987       assert(VFDatabase(*CI).getVectorizedFunction(Shape) != nullptr &&
4988              "Can't create vector function.");
4989 #endif
4990         VectorF = VFDatabase(*CI).getVectorizedFunction(Shape);
4991     }
4992       SmallVector<OperandBundleDef, 1> OpBundles;
4993       CI->getOperandBundlesAsDefs(OpBundles);
4994       CallInst *V = Builder.CreateCall(VectorF, Args, OpBundles);
4995 
4996       if (isa<FPMathOperator>(V))
4997         V->copyFastMathFlags(CI);
4998 
4999       State.set(Def, &I, V, Part);
5000       addMetadata(V, &I);
5001   }
5002 }
5003 
5004 void InnerLoopVectorizer::widenSelectInstruction(SelectInst &I, VPValue *VPDef,
5005                                                  VPUser &Operands,
5006                                                  bool InvariantCond,
5007                                                  VPTransformState &State) {
5008   setDebugLocFromInst(Builder, &I);
5009 
5010   // The condition can be loop invariant  but still defined inside the
5011   // loop. This means that we can't just use the original 'cond' value.
5012   // We have to take the 'vectorized' value and pick the first lane.
5013   // Instcombine will make this a no-op.
5014   auto *InvarCond = InvariantCond
5015                         ? State.get(Operands.getOperand(0), VPIteration(0, 0))
5016                         : nullptr;
5017 
5018   for (unsigned Part = 0; Part < UF; ++Part) {
5019     Value *Cond =
5020         InvarCond ? InvarCond : State.get(Operands.getOperand(0), Part);
5021     Value *Op0 = State.get(Operands.getOperand(1), Part);
5022     Value *Op1 = State.get(Operands.getOperand(2), Part);
5023     Value *Sel = Builder.CreateSelect(Cond, Op0, Op1);
5024     State.set(VPDef, &I, Sel, Part);
5025     addMetadata(Sel, &I);
5026   }
5027 }
5028 
5029 void LoopVectorizationCostModel::collectLoopScalars(ElementCount VF) {
5030   // We should not collect Scalars more than once per VF. Right now, this
5031   // function is called from collectUniformsAndScalars(), which already does
5032   // this check. Collecting Scalars for VF=1 does not make any sense.
5033   assert(VF.isVector() && Scalars.find(VF) == Scalars.end() &&
5034          "This function should not be visited twice for the same VF");
5035 
5036   SmallSetVector<Instruction *, 8> Worklist;
5037 
5038   // These sets are used to seed the analysis with pointers used by memory
5039   // accesses that will remain scalar.
5040   SmallSetVector<Instruction *, 8> ScalarPtrs;
5041   SmallPtrSet<Instruction *, 8> PossibleNonScalarPtrs;
5042   auto *Latch = TheLoop->getLoopLatch();
5043 
5044   // A helper that returns true if the use of Ptr by MemAccess will be scalar.
5045   // The pointer operands of loads and stores will be scalar as long as the
5046   // memory access is not a gather or scatter operation. The value operand of a
5047   // store will remain scalar if the store is scalarized.
5048   auto isScalarUse = [&](Instruction *MemAccess, Value *Ptr) {
5049     InstWidening WideningDecision = getWideningDecision(MemAccess, VF);
5050     assert(WideningDecision != CM_Unknown &&
5051            "Widening decision should be ready at this moment");
5052     if (auto *Store = dyn_cast<StoreInst>(MemAccess))
5053       if (Ptr == Store->getValueOperand())
5054         return WideningDecision == CM_Scalarize;
5055     assert(Ptr == getLoadStorePointerOperand(MemAccess) &&
5056            "Ptr is neither a value or pointer operand");
5057     return WideningDecision != CM_GatherScatter;
5058   };
5059 
5060   // A helper that returns true if the given value is a bitcast or
5061   // getelementptr instruction contained in the loop.
5062   auto isLoopVaryingBitCastOrGEP = [&](Value *V) {
5063     return ((isa<BitCastInst>(V) && V->getType()->isPointerTy()) ||
5064             isa<GetElementPtrInst>(V)) &&
5065            !TheLoop->isLoopInvariant(V);
5066   };
5067 
5068   auto isScalarPtrInduction = [&](Instruction *MemAccess, Value *Ptr) {
5069     if (!isa<PHINode>(Ptr) ||
5070         !Legal->getInductionVars().count(cast<PHINode>(Ptr)))
5071       return false;
5072     auto &Induction = Legal->getInductionVars()[cast<PHINode>(Ptr)];
5073     if (Induction.getKind() != InductionDescriptor::IK_PtrInduction)
5074       return false;
5075     return isScalarUse(MemAccess, Ptr);
5076   };
5077 
5078   // A helper that evaluates a memory access's use of a pointer. If the
5079   // pointer is actually the pointer induction of a loop, it is being
5080   // inserted into Worklist. If the use will be a scalar use, and the
5081   // pointer is only used by memory accesses, we place the pointer in
5082   // ScalarPtrs. Otherwise, the pointer is placed in PossibleNonScalarPtrs.
5083   auto evaluatePtrUse = [&](Instruction *MemAccess, Value *Ptr) {
5084     if (isScalarPtrInduction(MemAccess, Ptr)) {
5085       Worklist.insert(cast<Instruction>(Ptr));
5086       Instruction *Update = cast<Instruction>(
5087           cast<PHINode>(Ptr)->getIncomingValueForBlock(Latch));
5088       Worklist.insert(Update);
5089       LLVM_DEBUG(dbgs() << "LV: Found new scalar instruction: " << *Ptr
5090                         << "\n");
5091       LLVM_DEBUG(dbgs() << "LV: Found new scalar instruction: " << *Update
5092                         << "\n");
5093       return;
5094     }
5095     // We only care about bitcast and getelementptr instructions contained in
5096     // the loop.
5097     if (!isLoopVaryingBitCastOrGEP(Ptr))
5098       return;
5099 
5100     // If the pointer has already been identified as scalar (e.g., if it was
5101     // also identified as uniform), there's nothing to do.
5102     auto *I = cast<Instruction>(Ptr);
5103     if (Worklist.count(I))
5104       return;
5105 
5106     // If the use of the pointer will be a scalar use, and all users of the
5107     // pointer are memory accesses, place the pointer in ScalarPtrs. Otherwise,
5108     // place the pointer in PossibleNonScalarPtrs.
5109     if (isScalarUse(MemAccess, Ptr) && llvm::all_of(I->users(), [&](User *U) {
5110           return isa<LoadInst>(U) || isa<StoreInst>(U);
5111         }))
5112       ScalarPtrs.insert(I);
5113     else
5114       PossibleNonScalarPtrs.insert(I);
5115   };
5116 
5117   // We seed the scalars analysis with three classes of instructions: (1)
5118   // instructions marked uniform-after-vectorization and (2) bitcast,
5119   // getelementptr and (pointer) phi instructions used by memory accesses
5120   // requiring a scalar use.
5121   //
5122   // (1) Add to the worklist all instructions that have been identified as
5123   // uniform-after-vectorization.
5124   Worklist.insert(Uniforms[VF].begin(), Uniforms[VF].end());
5125 
5126   // (2) Add to the worklist all bitcast and getelementptr instructions used by
5127   // memory accesses requiring a scalar use. The pointer operands of loads and
5128   // stores will be scalar as long as the memory accesses is not a gather or
5129   // scatter operation. The value operand of a store will remain scalar if the
5130   // store is scalarized.
5131   for (auto *BB : TheLoop->blocks())
5132     for (auto &I : *BB) {
5133       if (auto *Load = dyn_cast<LoadInst>(&I)) {
5134         evaluatePtrUse(Load, Load->getPointerOperand());
5135       } else if (auto *Store = dyn_cast<StoreInst>(&I)) {
5136         evaluatePtrUse(Store, Store->getPointerOperand());
5137         evaluatePtrUse(Store, Store->getValueOperand());
5138       }
5139     }
5140   for (auto *I : ScalarPtrs)
5141     if (!PossibleNonScalarPtrs.count(I)) {
5142       LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *I << "\n");
5143       Worklist.insert(I);
5144     }
5145 
5146   // Insert the forced scalars.
5147   // FIXME: Currently widenPHIInstruction() often creates a dead vector
5148   // induction variable when the PHI user is scalarized.
5149   auto ForcedScalar = ForcedScalars.find(VF);
5150   if (ForcedScalar != ForcedScalars.end())
5151     for (auto *I : ForcedScalar->second)
5152       Worklist.insert(I);
5153 
5154   // Expand the worklist by looking through any bitcasts and getelementptr
5155   // instructions we've already identified as scalar. This is similar to the
5156   // expansion step in collectLoopUniforms(); however, here we're only
5157   // expanding to include additional bitcasts and getelementptr instructions.
5158   unsigned Idx = 0;
5159   while (Idx != Worklist.size()) {
5160     Instruction *Dst = Worklist[Idx++];
5161     if (!isLoopVaryingBitCastOrGEP(Dst->getOperand(0)))
5162       continue;
5163     auto *Src = cast<Instruction>(Dst->getOperand(0));
5164     if (llvm::all_of(Src->users(), [&](User *U) -> bool {
5165           auto *J = cast<Instruction>(U);
5166           return !TheLoop->contains(J) || Worklist.count(J) ||
5167                  ((isa<LoadInst>(J) || isa<StoreInst>(J)) &&
5168                   isScalarUse(J, Src));
5169         })) {
5170       Worklist.insert(Src);
5171       LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Src << "\n");
5172     }
5173   }
5174 
5175   // An induction variable will remain scalar if all users of the induction
5176   // variable and induction variable update remain scalar.
5177   for (auto &Induction : Legal->getInductionVars()) {
5178     auto *Ind = Induction.first;
5179     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
5180 
5181     // If tail-folding is applied, the primary induction variable will be used
5182     // to feed a vector compare.
5183     if (Ind == Legal->getPrimaryInduction() && foldTailByMasking())
5184       continue;
5185 
5186     // Determine if all users of the induction variable are scalar after
5187     // vectorization.
5188     auto ScalarInd = llvm::all_of(Ind->users(), [&](User *U) -> bool {
5189       auto *I = cast<Instruction>(U);
5190       return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I);
5191     });
5192     if (!ScalarInd)
5193       continue;
5194 
5195     // Determine if all users of the induction variable update instruction are
5196     // scalar after vectorization.
5197     auto ScalarIndUpdate =
5198         llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
5199           auto *I = cast<Instruction>(U);
5200           return I == Ind || !TheLoop->contains(I) || Worklist.count(I);
5201         });
5202     if (!ScalarIndUpdate)
5203       continue;
5204 
5205     // The induction variable and its update instruction will remain scalar.
5206     Worklist.insert(Ind);
5207     Worklist.insert(IndUpdate);
5208     LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n");
5209     LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate
5210                       << "\n");
5211   }
5212 
5213   Scalars[VF].insert(Worklist.begin(), Worklist.end());
5214 }
5215 
5216 bool LoopVectorizationCostModel::isScalarWithPredication(Instruction *I,
5217                                                          ElementCount VF) {
5218   if (!blockNeedsPredication(I->getParent()))
5219     return false;
5220   switch(I->getOpcode()) {
5221   default:
5222     break;
5223   case Instruction::Load:
5224   case Instruction::Store: {
5225     if (!Legal->isMaskRequired(I))
5226       return false;
5227     auto *Ptr = getLoadStorePointerOperand(I);
5228     auto *Ty = getMemInstValueType(I);
5229     // We have already decided how to vectorize this instruction, get that
5230     // result.
5231     if (VF.isVector()) {
5232       InstWidening WideningDecision = getWideningDecision(I, VF);
5233       assert(WideningDecision != CM_Unknown &&
5234              "Widening decision should be ready at this moment");
5235       return WideningDecision == CM_Scalarize;
5236     }
5237     const Align Alignment = getLoadStoreAlignment(I);
5238     return isa<LoadInst>(I) ? !(isLegalMaskedLoad(Ty, Ptr, Alignment) ||
5239                                 isLegalMaskedGather(Ty, Alignment))
5240                             : !(isLegalMaskedStore(Ty, Ptr, Alignment) ||
5241                                 isLegalMaskedScatter(Ty, Alignment));
5242   }
5243   case Instruction::UDiv:
5244   case Instruction::SDiv:
5245   case Instruction::SRem:
5246   case Instruction::URem:
5247     return mayDivideByZero(*I);
5248   }
5249   return false;
5250 }
5251 
5252 bool LoopVectorizationCostModel::interleavedAccessCanBeWidened(
5253     Instruction *I, ElementCount VF) {
5254   assert(isAccessInterleaved(I) && "Expecting interleaved access.");
5255   assert(getWideningDecision(I, VF) == CM_Unknown &&
5256          "Decision should not be set yet.");
5257   auto *Group = getInterleavedAccessGroup(I);
5258   assert(Group && "Must have a group.");
5259 
5260   // If the instruction's allocated size doesn't equal it's type size, it
5261   // requires padding and will be scalarized.
5262   auto &DL = I->getModule()->getDataLayout();
5263   auto *ScalarTy = getMemInstValueType(I);
5264   if (hasIrregularType(ScalarTy, DL, VF))
5265     return false;
5266 
5267   // Check if masking is required.
5268   // A Group may need masking for one of two reasons: it resides in a block that
5269   // needs predication, or it was decided to use masking to deal with gaps.
5270   bool PredicatedAccessRequiresMasking =
5271       Legal->blockNeedsPredication(I->getParent()) && Legal->isMaskRequired(I);
5272   bool AccessWithGapsRequiresMasking =
5273       Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed();
5274   if (!PredicatedAccessRequiresMasking && !AccessWithGapsRequiresMasking)
5275     return true;
5276 
5277   // If masked interleaving is required, we expect that the user/target had
5278   // enabled it, because otherwise it either wouldn't have been created or
5279   // it should have been invalidated by the CostModel.
5280   assert(useMaskedInterleavedAccesses(TTI) &&
5281          "Masked interleave-groups for predicated accesses are not enabled.");
5282 
5283   auto *Ty = getMemInstValueType(I);
5284   const Align Alignment = getLoadStoreAlignment(I);
5285   return isa<LoadInst>(I) ? TTI.isLegalMaskedLoad(Ty, Alignment)
5286                           : TTI.isLegalMaskedStore(Ty, Alignment);
5287 }
5288 
5289 bool LoopVectorizationCostModel::memoryInstructionCanBeWidened(
5290     Instruction *I, ElementCount VF) {
5291   // Get and ensure we have a valid memory instruction.
5292   LoadInst *LI = dyn_cast<LoadInst>(I);
5293   StoreInst *SI = dyn_cast<StoreInst>(I);
5294   assert((LI || SI) && "Invalid memory instruction");
5295 
5296   auto *Ptr = getLoadStorePointerOperand(I);
5297 
5298   // In order to be widened, the pointer should be consecutive, first of all.
5299   if (!Legal->isConsecutivePtr(Ptr))
5300     return false;
5301 
5302   // If the instruction is a store located in a predicated block, it will be
5303   // scalarized.
5304   if (isScalarWithPredication(I))
5305     return false;
5306 
5307   // If the instruction's allocated size doesn't equal it's type size, it
5308   // requires padding and will be scalarized.
5309   auto &DL = I->getModule()->getDataLayout();
5310   auto *ScalarTy = LI ? LI->getType() : SI->getValueOperand()->getType();
5311   if (hasIrregularType(ScalarTy, DL, VF))
5312     return false;
5313 
5314   return true;
5315 }
5316 
5317 void LoopVectorizationCostModel::collectLoopUniforms(ElementCount VF) {
5318   // We should not collect Uniforms more than once per VF. Right now,
5319   // this function is called from collectUniformsAndScalars(), which
5320   // already does this check. Collecting Uniforms for VF=1 does not make any
5321   // sense.
5322 
5323   assert(VF.isVector() && Uniforms.find(VF) == Uniforms.end() &&
5324          "This function should not be visited twice for the same VF");
5325 
5326   // Visit the list of Uniforms. If we'll not find any uniform value, we'll
5327   // not analyze again.  Uniforms.count(VF) will return 1.
5328   Uniforms[VF].clear();
5329 
5330   // We now know that the loop is vectorizable!
5331   // Collect instructions inside the loop that will remain uniform after
5332   // vectorization.
5333 
5334   // Global values, params and instructions outside of current loop are out of
5335   // scope.
5336   auto isOutOfScope = [&](Value *V) -> bool {
5337     Instruction *I = dyn_cast<Instruction>(V);
5338     return (!I || !TheLoop->contains(I));
5339   };
5340 
5341   SetVector<Instruction *> Worklist;
5342   BasicBlock *Latch = TheLoop->getLoopLatch();
5343 
5344   // Instructions that are scalar with predication must not be considered
5345   // uniform after vectorization, because that would create an erroneous
5346   // replicating region where only a single instance out of VF should be formed.
5347   // TODO: optimize such seldom cases if found important, see PR40816.
5348   auto addToWorklistIfAllowed = [&](Instruction *I) -> void {
5349     if (isOutOfScope(I)) {
5350       LLVM_DEBUG(dbgs() << "LV: Found not uniform due to scope: "
5351                         << *I << "\n");
5352       return;
5353     }
5354     if (isScalarWithPredication(I, VF)) {
5355       LLVM_DEBUG(dbgs() << "LV: Found not uniform being ScalarWithPredication: "
5356                         << *I << "\n");
5357       return;
5358     }
5359     LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *I << "\n");
5360     Worklist.insert(I);
5361   };
5362 
5363   // Start with the conditional branch. If the branch condition is an
5364   // instruction contained in the loop that is only used by the branch, it is
5365   // uniform.
5366   auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0));
5367   if (Cmp && TheLoop->contains(Cmp) && Cmp->hasOneUse())
5368     addToWorklistIfAllowed(Cmp);
5369 
5370   auto isUniformDecision = [&](Instruction *I, ElementCount VF) {
5371     InstWidening WideningDecision = getWideningDecision(I, VF);
5372     assert(WideningDecision != CM_Unknown &&
5373            "Widening decision should be ready at this moment");
5374 
5375     // A uniform memory op is itself uniform.  We exclude uniform stores
5376     // here as they demand the last lane, not the first one.
5377     if (isa<LoadInst>(I) && Legal->isUniformMemOp(*I)) {
5378       assert(WideningDecision == CM_Scalarize);
5379       return true;
5380     }
5381 
5382     return (WideningDecision == CM_Widen ||
5383             WideningDecision == CM_Widen_Reverse ||
5384             WideningDecision == CM_Interleave);
5385   };
5386 
5387 
5388   // Returns true if Ptr is the pointer operand of a memory access instruction
5389   // I, and I is known to not require scalarization.
5390   auto isVectorizedMemAccessUse = [&](Instruction *I, Value *Ptr) -> bool {
5391     return getLoadStorePointerOperand(I) == Ptr && isUniformDecision(I, VF);
5392   };
5393 
5394   // Holds a list of values which are known to have at least one uniform use.
5395   // Note that there may be other uses which aren't uniform.  A "uniform use"
5396   // here is something which only demands lane 0 of the unrolled iterations;
5397   // it does not imply that all lanes produce the same value (e.g. this is not
5398   // the usual meaning of uniform)
5399   SmallPtrSet<Value *, 8> HasUniformUse;
5400 
5401   // Scan the loop for instructions which are either a) known to have only
5402   // lane 0 demanded or b) are uses which demand only lane 0 of their operand.
5403   for (auto *BB : TheLoop->blocks())
5404     for (auto &I : *BB) {
5405       // If there's no pointer operand, there's nothing to do.
5406       auto *Ptr = getLoadStorePointerOperand(&I);
5407       if (!Ptr)
5408         continue;
5409 
5410       // A uniform memory op is itself uniform.  We exclude uniform stores
5411       // here as they demand the last lane, not the first one.
5412       if (isa<LoadInst>(I) && Legal->isUniformMemOp(I))
5413         addToWorklistIfAllowed(&I);
5414 
5415       if (isUniformDecision(&I, VF)) {
5416         assert(isVectorizedMemAccessUse(&I, Ptr) && "consistency check");
5417         HasUniformUse.insert(Ptr);
5418       }
5419     }
5420 
5421   // Add to the worklist any operands which have *only* uniform (e.g. lane 0
5422   // demanding) users.  Since loops are assumed to be in LCSSA form, this
5423   // disallows uses outside the loop as well.
5424   for (auto *V : HasUniformUse) {
5425     if (isOutOfScope(V))
5426       continue;
5427     auto *I = cast<Instruction>(V);
5428     auto UsersAreMemAccesses =
5429       llvm::all_of(I->users(), [&](User *U) -> bool {
5430         return isVectorizedMemAccessUse(cast<Instruction>(U), V);
5431       });
5432     if (UsersAreMemAccesses)
5433       addToWorklistIfAllowed(I);
5434   }
5435 
5436   // Expand Worklist in topological order: whenever a new instruction
5437   // is added , its users should be already inside Worklist.  It ensures
5438   // a uniform instruction will only be used by uniform instructions.
5439   unsigned idx = 0;
5440   while (idx != Worklist.size()) {
5441     Instruction *I = Worklist[idx++];
5442 
5443     for (auto OV : I->operand_values()) {
5444       // isOutOfScope operands cannot be uniform instructions.
5445       if (isOutOfScope(OV))
5446         continue;
5447       // First order recurrence Phi's should typically be considered
5448       // non-uniform.
5449       auto *OP = dyn_cast<PHINode>(OV);
5450       if (OP && Legal->isFirstOrderRecurrence(OP))
5451         continue;
5452       // If all the users of the operand are uniform, then add the
5453       // operand into the uniform worklist.
5454       auto *OI = cast<Instruction>(OV);
5455       if (llvm::all_of(OI->users(), [&](User *U) -> bool {
5456             auto *J = cast<Instruction>(U);
5457             return Worklist.count(J) || isVectorizedMemAccessUse(J, OI);
5458           }))
5459         addToWorklistIfAllowed(OI);
5460     }
5461   }
5462 
5463   // For an instruction to be added into Worklist above, all its users inside
5464   // the loop should also be in Worklist. However, this condition cannot be
5465   // true for phi nodes that form a cyclic dependence. We must process phi
5466   // nodes separately. An induction variable will remain uniform if all users
5467   // of the induction variable and induction variable update remain uniform.
5468   // The code below handles both pointer and non-pointer induction variables.
5469   for (auto &Induction : Legal->getInductionVars()) {
5470     auto *Ind = Induction.first;
5471     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
5472 
5473     // Determine if all users of the induction variable are uniform after
5474     // vectorization.
5475     auto UniformInd = llvm::all_of(Ind->users(), [&](User *U) -> bool {
5476       auto *I = cast<Instruction>(U);
5477       return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) ||
5478              isVectorizedMemAccessUse(I, Ind);
5479     });
5480     if (!UniformInd)
5481       continue;
5482 
5483     // Determine if all users of the induction variable update instruction are
5484     // uniform after vectorization.
5485     auto UniformIndUpdate =
5486         llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
5487           auto *I = cast<Instruction>(U);
5488           return I == Ind || !TheLoop->contains(I) || Worklist.count(I) ||
5489                  isVectorizedMemAccessUse(I, IndUpdate);
5490         });
5491     if (!UniformIndUpdate)
5492       continue;
5493 
5494     // The induction variable and its update instruction will remain uniform.
5495     addToWorklistIfAllowed(Ind);
5496     addToWorklistIfAllowed(IndUpdate);
5497   }
5498 
5499   Uniforms[VF].insert(Worklist.begin(), Worklist.end());
5500 }
5501 
5502 bool LoopVectorizationCostModel::runtimeChecksRequired() {
5503   LLVM_DEBUG(dbgs() << "LV: Performing code size checks.\n");
5504 
5505   if (Legal->getRuntimePointerChecking()->Need) {
5506     reportVectorizationFailure("Runtime ptr check is required with -Os/-Oz",
5507         "runtime pointer checks needed. Enable vectorization of this "
5508         "loop with '#pragma clang loop vectorize(enable)' when "
5509         "compiling with -Os/-Oz",
5510         "CantVersionLoopWithOptForSize", ORE, TheLoop);
5511     return true;
5512   }
5513 
5514   if (!PSE.getUnionPredicate().getPredicates().empty()) {
5515     reportVectorizationFailure("Runtime SCEV check is required with -Os/-Oz",
5516         "runtime SCEV checks needed. Enable vectorization of this "
5517         "loop with '#pragma clang loop vectorize(enable)' when "
5518         "compiling with -Os/-Oz",
5519         "CantVersionLoopWithOptForSize", ORE, TheLoop);
5520     return true;
5521   }
5522 
5523   // FIXME: Avoid specializing for stride==1 instead of bailing out.
5524   if (!Legal->getLAI()->getSymbolicStrides().empty()) {
5525     reportVectorizationFailure("Runtime stride check for small trip count",
5526         "runtime stride == 1 checks needed. Enable vectorization of "
5527         "this loop without such check by compiling with -Os/-Oz",
5528         "CantVersionLoopWithOptForSize", ORE, TheLoop);
5529     return true;
5530   }
5531 
5532   return false;
5533 }
5534 
5535 Optional<ElementCount>
5536 LoopVectorizationCostModel::computeMaxVF(ElementCount UserVF, unsigned UserIC) {
5537   if (Legal->getRuntimePointerChecking()->Need && TTI.hasBranchDivergence()) {
5538     // TODO: It may by useful to do since it's still likely to be dynamically
5539     // uniform if the target can skip.
5540     reportVectorizationFailure(
5541         "Not inserting runtime ptr check for divergent target",
5542         "runtime pointer checks needed. Not enabled for divergent target",
5543         "CantVersionLoopWithDivergentTarget", ORE, TheLoop);
5544     return None;
5545   }
5546 
5547   unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop);
5548   LLVM_DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n');
5549   if (TC == 1) {
5550     reportVectorizationFailure("Single iteration (non) loop",
5551         "loop trip count is one, irrelevant for vectorization",
5552         "SingleIterationLoop", ORE, TheLoop);
5553     return None;
5554   }
5555 
5556   switch (ScalarEpilogueStatus) {
5557   case CM_ScalarEpilogueAllowed:
5558     return computeFeasibleMaxVF(TC, UserVF);
5559   case CM_ScalarEpilogueNotAllowedUsePredicate:
5560     LLVM_FALLTHROUGH;
5561   case CM_ScalarEpilogueNotNeededUsePredicate:
5562     LLVM_DEBUG(
5563         dbgs() << "LV: vector predicate hint/switch found.\n"
5564                << "LV: Not allowing scalar epilogue, creating predicated "
5565                << "vector loop.\n");
5566     break;
5567   case CM_ScalarEpilogueNotAllowedLowTripLoop:
5568     // fallthrough as a special case of OptForSize
5569   case CM_ScalarEpilogueNotAllowedOptSize:
5570     if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedOptSize)
5571       LLVM_DEBUG(
5572           dbgs() << "LV: Not allowing scalar epilogue due to -Os/-Oz.\n");
5573     else
5574       LLVM_DEBUG(dbgs() << "LV: Not allowing scalar epilogue due to low trip "
5575                         << "count.\n");
5576 
5577     // Bail if runtime checks are required, which are not good when optimising
5578     // for size.
5579     if (runtimeChecksRequired())
5580       return None;
5581 
5582     break;
5583   }
5584 
5585   // The only loops we can vectorize without a scalar epilogue, are loops with
5586   // a bottom-test and a single exiting block. We'd have to handle the fact
5587   // that not every instruction executes on the last iteration.  This will
5588   // require a lane mask which varies through the vector loop body.  (TODO)
5589   if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) {
5590     // If there was a tail-folding hint/switch, but we can't fold the tail by
5591     // masking, fallback to a vectorization with a scalar epilogue.
5592     if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) {
5593       LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a "
5594                            "scalar epilogue instead.\n");
5595       ScalarEpilogueStatus = CM_ScalarEpilogueAllowed;
5596       return computeFeasibleMaxVF(TC, UserVF);
5597     }
5598     return None;
5599   }
5600 
5601   // Now try the tail folding
5602 
5603   // Invalidate interleave groups that require an epilogue if we can't mask
5604   // the interleave-group.
5605   if (!useMaskedInterleavedAccesses(TTI)) {
5606     assert(WideningDecisions.empty() && Uniforms.empty() && Scalars.empty() &&
5607            "No decisions should have been taken at this point");
5608     // Note: There is no need to invalidate any cost modeling decisions here, as
5609     // non where taken so far.
5610     InterleaveInfo.invalidateGroupsRequiringScalarEpilogue();
5611   }
5612 
5613   ElementCount MaxVF = computeFeasibleMaxVF(TC, UserVF);
5614   assert(!MaxVF.isScalable() &&
5615          "Scalable vectors do not yet support tail folding");
5616   assert((UserVF.isNonZero() || isPowerOf2_32(MaxVF.getFixedValue())) &&
5617          "MaxVF must be a power of 2");
5618   unsigned MaxVFtimesIC =
5619       UserIC ? MaxVF.getFixedValue() * UserIC : MaxVF.getFixedValue();
5620   // Avoid tail folding if the trip count is known to be a multiple of any VF we
5621   // chose.
5622   ScalarEvolution *SE = PSE.getSE();
5623   const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount();
5624   const SCEV *ExitCount = SE->getAddExpr(
5625       BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType()));
5626   const SCEV *Rem = SE->getURemExpr(
5627       SE->applyLoopGuards(ExitCount, TheLoop),
5628       SE->getConstant(BackedgeTakenCount->getType(), MaxVFtimesIC));
5629   if (Rem->isZero()) {
5630     // Accept MaxVF if we do not have a tail.
5631     LLVM_DEBUG(dbgs() << "LV: No tail will remain for any chosen VF.\n");
5632     return MaxVF;
5633   }
5634 
5635   // If we don't know the precise trip count, or if the trip count that we
5636   // found modulo the vectorization factor is not zero, try to fold the tail
5637   // by masking.
5638   // FIXME: look for a smaller MaxVF that does divide TC rather than masking.
5639   if (Legal->prepareToFoldTailByMasking()) {
5640     FoldTailByMasking = true;
5641     return MaxVF;
5642   }
5643 
5644   // If there was a tail-folding hint/switch, but we can't fold the tail by
5645   // masking, fallback to a vectorization with a scalar epilogue.
5646   if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) {
5647     LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a "
5648                          "scalar epilogue instead.\n");
5649     ScalarEpilogueStatus = CM_ScalarEpilogueAllowed;
5650     return MaxVF;
5651   }
5652 
5653   if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedUsePredicate) {
5654     LLVM_DEBUG(dbgs() << "LV: Can't fold tail by masking: don't vectorize\n");
5655     return None;
5656   }
5657 
5658   if (TC == 0) {
5659     reportVectorizationFailure(
5660         "Unable to calculate the loop count due to complex control flow",
5661         "unable to calculate the loop count due to complex control flow",
5662         "UnknownLoopCountComplexCFG", ORE, TheLoop);
5663     return None;
5664   }
5665 
5666   reportVectorizationFailure(
5667       "Cannot optimize for size and vectorize at the same time.",
5668       "cannot optimize for size and vectorize at the same time. "
5669       "Enable vectorization of this loop with '#pragma clang loop "
5670       "vectorize(enable)' when compiling with -Os/-Oz",
5671       "NoTailLoopWithOptForSize", ORE, TheLoop);
5672   return None;
5673 }
5674 
5675 ElementCount
5676 LoopVectorizationCostModel::computeFeasibleMaxVF(unsigned ConstTripCount,
5677                                                  ElementCount UserVF) {
5678   bool IgnoreScalableUserVF = UserVF.isScalable() &&
5679                               !TTI.supportsScalableVectors() &&
5680                               !ForceTargetSupportsScalableVectors;
5681   if (IgnoreScalableUserVF) {
5682     LLVM_DEBUG(
5683         dbgs() << "LV: Ignoring VF=" << UserVF
5684                << " because target does not support scalable vectors.\n");
5685     ORE->emit([&]() {
5686       return OptimizationRemarkAnalysis(DEBUG_TYPE, "IgnoreScalableUserVF",
5687                                         TheLoop->getStartLoc(),
5688                                         TheLoop->getHeader())
5689              << "Ignoring VF=" << ore::NV("UserVF", UserVF)
5690              << " because target does not support scalable vectors.";
5691     });
5692   }
5693 
5694   // Beyond this point two scenarios are handled. If UserVF isn't specified
5695   // then a suitable VF is chosen. If UserVF is specified and there are
5696   // dependencies, check if it's legal. However, if a UserVF is specified and
5697   // there are no dependencies, then there's nothing to do.
5698   if (UserVF.isNonZero() && !IgnoreScalableUserVF &&
5699       Legal->isSafeForAnyVectorWidth())
5700     return UserVF;
5701 
5702   MinBWs = computeMinimumValueSizes(TheLoop->getBlocks(), *DB, &TTI);
5703   unsigned SmallestType, WidestType;
5704   std::tie(SmallestType, WidestType) = getSmallestAndWidestTypes();
5705   unsigned WidestRegister = TTI.getRegisterBitWidth(true);
5706 
5707   // Get the maximum safe dependence distance in bits computed by LAA.
5708   // It is computed by MaxVF * sizeOf(type) * 8, where type is taken from
5709   // the memory accesses that is most restrictive (involved in the smallest
5710   // dependence distance).
5711   unsigned MaxSafeVectorWidthInBits = Legal->getMaxSafeVectorWidthInBits();
5712 
5713   // If the user vectorization factor is legally unsafe, clamp it to a safe
5714   // value. Otherwise, return as is.
5715   if (UserVF.isNonZero() && !IgnoreScalableUserVF) {
5716     unsigned MaxSafeElements =
5717         PowerOf2Floor(MaxSafeVectorWidthInBits / WidestType);
5718     ElementCount MaxSafeVF = ElementCount::getFixed(MaxSafeElements);
5719 
5720     if (UserVF.isScalable()) {
5721       Optional<unsigned> MaxVScale = TTI.getMaxVScale();
5722 
5723       // Scale VF by vscale before checking if it's safe.
5724       MaxSafeVF = ElementCount::getScalable(
5725           MaxVScale ? (MaxSafeElements / MaxVScale.getValue()) : 0);
5726 
5727       if (MaxSafeVF.isZero()) {
5728         // The dependence distance is too small to use scalable vectors,
5729         // fallback on fixed.
5730         LLVM_DEBUG(
5731             dbgs()
5732             << "LV: Max legal vector width too small, scalable vectorization "
5733                "unfeasible. Using fixed-width vectorization instead.\n");
5734         ORE->emit([&]() {
5735           return OptimizationRemarkAnalysis(DEBUG_TYPE, "ScalableVFUnfeasible",
5736                                             TheLoop->getStartLoc(),
5737                                             TheLoop->getHeader())
5738                  << "Max legal vector width too small, scalable vectorization "
5739                  << "unfeasible. Using fixed-width vectorization instead.";
5740         });
5741         return computeFeasibleMaxVF(
5742             ConstTripCount, ElementCount::getFixed(UserVF.getKnownMinValue()));
5743       }
5744     }
5745 
5746     LLVM_DEBUG(dbgs() << "LV: The max safe VF is: " << MaxSafeVF << ".\n");
5747 
5748     if (ElementCount::isKnownLE(UserVF, MaxSafeVF))
5749       return UserVF;
5750 
5751     LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF
5752                       << " is unsafe, clamping to max safe VF=" << MaxSafeVF
5753                       << ".\n");
5754     ORE->emit([&]() {
5755       return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor",
5756                                         TheLoop->getStartLoc(),
5757                                         TheLoop->getHeader())
5758              << "User-specified vectorization factor "
5759              << ore::NV("UserVectorizationFactor", UserVF)
5760              << " is unsafe, clamping to maximum safe vectorization factor "
5761              << ore::NV("VectorizationFactor", MaxSafeVF);
5762     });
5763     return MaxSafeVF;
5764   }
5765 
5766   WidestRegister = std::min(WidestRegister, MaxSafeVectorWidthInBits);
5767 
5768   // Ensure MaxVF is a power of 2; the dependence distance bound may not be.
5769   // Note that both WidestRegister and WidestType may not be a powers of 2.
5770   unsigned MaxVectorSize = PowerOf2Floor(WidestRegister / WidestType);
5771 
5772   LLVM_DEBUG(dbgs() << "LV: The Smallest and Widest types: " << SmallestType
5773                     << " / " << WidestType << " bits.\n");
5774   LLVM_DEBUG(dbgs() << "LV: The Widest register safe to use is: "
5775                     << WidestRegister << " bits.\n");
5776 
5777   assert(MaxVectorSize <= WidestRegister &&
5778          "Did not expect to pack so many elements"
5779          " into one vector!");
5780   if (MaxVectorSize == 0) {
5781     LLVM_DEBUG(dbgs() << "LV: The target has no vector registers.\n");
5782     MaxVectorSize = 1;
5783     return ElementCount::getFixed(MaxVectorSize);
5784   } else if (ConstTripCount && ConstTripCount < MaxVectorSize &&
5785              isPowerOf2_32(ConstTripCount)) {
5786     // We need to clamp the VF to be the ConstTripCount. There is no point in
5787     // choosing a higher viable VF as done in the loop below.
5788     LLVM_DEBUG(dbgs() << "LV: Clamping the MaxVF to the constant trip count: "
5789                       << ConstTripCount << "\n");
5790     MaxVectorSize = ConstTripCount;
5791     return ElementCount::getFixed(MaxVectorSize);
5792   }
5793 
5794   unsigned MaxVF = MaxVectorSize;
5795   if (TTI.shouldMaximizeVectorBandwidth(!isScalarEpilogueAllowed()) ||
5796       (MaximizeBandwidth && isScalarEpilogueAllowed())) {
5797     // Collect all viable vectorization factors larger than the default MaxVF
5798     // (i.e. MaxVectorSize).
5799     SmallVector<ElementCount, 8> VFs;
5800     unsigned NewMaxVectorSize = WidestRegister / SmallestType;
5801     for (unsigned VS = MaxVectorSize * 2; VS <= NewMaxVectorSize; VS *= 2)
5802       VFs.push_back(ElementCount::getFixed(VS));
5803 
5804     // For each VF calculate its register usage.
5805     auto RUs = calculateRegisterUsage(VFs);
5806 
5807     // Select the largest VF which doesn't require more registers than existing
5808     // ones.
5809     for (int i = RUs.size() - 1; i >= 0; --i) {
5810       bool Selected = true;
5811       for (auto& pair : RUs[i].MaxLocalUsers) {
5812         unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first);
5813         if (pair.second > TargetNumRegisters)
5814           Selected = false;
5815       }
5816       if (Selected) {
5817         MaxVF = VFs[i].getKnownMinValue();
5818         break;
5819       }
5820     }
5821     if (unsigned MinVF = TTI.getMinimumVF(SmallestType)) {
5822       if (MaxVF < MinVF) {
5823         LLVM_DEBUG(dbgs() << "LV: Overriding calculated MaxVF(" << MaxVF
5824                           << ") with target's minimum: " << MinVF << '\n');
5825         MaxVF = MinVF;
5826       }
5827     }
5828   }
5829   return ElementCount::getFixed(MaxVF);
5830 }
5831 
5832 VectorizationFactor
5833 LoopVectorizationCostModel::selectVectorizationFactor(ElementCount MaxVF) {
5834   // FIXME: This can be fixed for scalable vectors later, because at this stage
5835   // the LoopVectorizer will only consider vectorizing a loop with scalable
5836   // vectors when the loop has a hint to enable vectorization for a given VF.
5837   assert(!MaxVF.isScalable() && "scalable vectors not yet supported");
5838 
5839   InstructionCost ExpectedCost = expectedCost(ElementCount::getFixed(1)).first;
5840   LLVM_DEBUG(dbgs() << "LV: Scalar loop costs: " << ExpectedCost << ".\n");
5841   assert(ExpectedCost.isValid() && "Unexpected invalid cost for scalar loop");
5842 
5843   unsigned Width = 1;
5844   const float ScalarCost = *ExpectedCost.getValue();
5845   float Cost = ScalarCost;
5846 
5847   bool ForceVectorization = Hints->getForce() == LoopVectorizeHints::FK_Enabled;
5848   if (ForceVectorization && MaxVF.isVector()) {
5849     // Ignore scalar width, because the user explicitly wants vectorization.
5850     // Initialize cost to max so that VF = 2 is, at least, chosen during cost
5851     // evaluation.
5852     Cost = std::numeric_limits<float>::max();
5853   }
5854 
5855   for (unsigned i = 2; i <= MaxVF.getFixedValue(); i *= 2) {
5856     // Notice that the vector loop needs to be executed less times, so
5857     // we need to divide the cost of the vector loops by the width of
5858     // the vector elements.
5859     VectorizationCostTy C = expectedCost(ElementCount::getFixed(i));
5860     assert(C.first.isValid() && "Unexpected invalid cost for vector loop");
5861     float VectorCost = *C.first.getValue() / (float)i;
5862     LLVM_DEBUG(dbgs() << "LV: Vector loop of width " << i
5863                       << " costs: " << (int)VectorCost << ".\n");
5864     if (!C.second && !ForceVectorization) {
5865       LLVM_DEBUG(
5866           dbgs() << "LV: Not considering vector loop of width " << i
5867                  << " because it will not generate any vector instructions.\n");
5868       continue;
5869     }
5870 
5871     // If profitable add it to ProfitableVF list.
5872     if (VectorCost < ScalarCost) {
5873       ProfitableVFs.push_back(VectorizationFactor(
5874           {ElementCount::getFixed(i), (unsigned)VectorCost}));
5875     }
5876 
5877     if (VectorCost < Cost) {
5878       Cost = VectorCost;
5879       Width = i;
5880     }
5881   }
5882 
5883   if (!EnableCondStoresVectorization && NumPredStores) {
5884     reportVectorizationFailure("There are conditional stores.",
5885         "store that is conditionally executed prevents vectorization",
5886         "ConditionalStore", ORE, TheLoop);
5887     Width = 1;
5888     Cost = ScalarCost;
5889   }
5890 
5891   LLVM_DEBUG(if (ForceVectorization && Width > 1 && Cost >= ScalarCost) dbgs()
5892              << "LV: Vectorization seems to be not beneficial, "
5893              << "but was forced by a user.\n");
5894   LLVM_DEBUG(dbgs() << "LV: Selecting VF: " << Width << ".\n");
5895   VectorizationFactor Factor = {ElementCount::getFixed(Width),
5896                                 (unsigned)(Width * Cost)};
5897   return Factor;
5898 }
5899 
5900 bool LoopVectorizationCostModel::isCandidateForEpilogueVectorization(
5901     const Loop &L, ElementCount VF) const {
5902   // Cross iteration phis such as reductions need special handling and are
5903   // currently unsupported.
5904   if (any_of(L.getHeader()->phis(), [&](PHINode &Phi) {
5905         return Legal->isFirstOrderRecurrence(&Phi) ||
5906                Legal->isReductionVariable(&Phi);
5907       }))
5908     return false;
5909 
5910   // Phis with uses outside of the loop require special handling and are
5911   // currently unsupported.
5912   for (auto &Entry : Legal->getInductionVars()) {
5913     // Look for uses of the value of the induction at the last iteration.
5914     Value *PostInc = Entry.first->getIncomingValueForBlock(L.getLoopLatch());
5915     for (User *U : PostInc->users())
5916       if (!L.contains(cast<Instruction>(U)))
5917         return false;
5918     // Look for uses of penultimate value of the induction.
5919     for (User *U : Entry.first->users())
5920       if (!L.contains(cast<Instruction>(U)))
5921         return false;
5922   }
5923 
5924   // Induction variables that are widened require special handling that is
5925   // currently not supported.
5926   if (any_of(Legal->getInductionVars(), [&](auto &Entry) {
5927         return !(this->isScalarAfterVectorization(Entry.first, VF) ||
5928                  this->isProfitableToScalarize(Entry.first, VF));
5929       }))
5930     return false;
5931 
5932   return true;
5933 }
5934 
5935 bool LoopVectorizationCostModel::isEpilogueVectorizationProfitable(
5936     const ElementCount VF) const {
5937   // FIXME: We need a much better cost-model to take different parameters such
5938   // as register pressure, code size increase and cost of extra branches into
5939   // account. For now we apply a very crude heuristic and only consider loops
5940   // with vectorization factors larger than a certain value.
5941   // We also consider epilogue vectorization unprofitable for targets that don't
5942   // consider interleaving beneficial (eg. MVE).
5943   if (TTI.getMaxInterleaveFactor(VF.getKnownMinValue()) <= 1)
5944     return false;
5945   if (VF.getFixedValue() >= EpilogueVectorizationMinVF)
5946     return true;
5947   return false;
5948 }
5949 
5950 VectorizationFactor
5951 LoopVectorizationCostModel::selectEpilogueVectorizationFactor(
5952     const ElementCount MainLoopVF, const LoopVectorizationPlanner &LVP) {
5953   VectorizationFactor Result = VectorizationFactor::Disabled();
5954   if (!EnableEpilogueVectorization) {
5955     LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization is disabled.\n";);
5956     return Result;
5957   }
5958 
5959   if (!isScalarEpilogueAllowed()) {
5960     LLVM_DEBUG(
5961         dbgs() << "LEV: Unable to vectorize epilogue because no epilogue is "
5962                   "allowed.\n";);
5963     return Result;
5964   }
5965 
5966   // FIXME: This can be fixed for scalable vectors later, because at this stage
5967   // the LoopVectorizer will only consider vectorizing a loop with scalable
5968   // vectors when the loop has a hint to enable vectorization for a given VF.
5969   if (MainLoopVF.isScalable()) {
5970     LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization for scalable vectors not "
5971                          "yet supported.\n");
5972     return Result;
5973   }
5974 
5975   // Not really a cost consideration, but check for unsupported cases here to
5976   // simplify the logic.
5977   if (!isCandidateForEpilogueVectorization(*TheLoop, MainLoopVF)) {
5978     LLVM_DEBUG(
5979         dbgs() << "LEV: Unable to vectorize epilogue because the loop is "
5980                   "not a supported candidate.\n";);
5981     return Result;
5982   }
5983 
5984   if (EpilogueVectorizationForceVF > 1) {
5985     LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization factor is forced.\n";);
5986     if (LVP.hasPlanWithVFs(
5987             {MainLoopVF, ElementCount::getFixed(EpilogueVectorizationForceVF)}))
5988       return {ElementCount::getFixed(EpilogueVectorizationForceVF), 0};
5989     else {
5990       LLVM_DEBUG(
5991           dbgs()
5992               << "LEV: Epilogue vectorization forced factor is not viable.\n";);
5993       return Result;
5994     }
5995   }
5996 
5997   if (TheLoop->getHeader()->getParent()->hasOptSize() ||
5998       TheLoop->getHeader()->getParent()->hasMinSize()) {
5999     LLVM_DEBUG(
6000         dbgs()
6001             << "LEV: Epilogue vectorization skipped due to opt for size.\n";);
6002     return Result;
6003   }
6004 
6005   if (!isEpilogueVectorizationProfitable(MainLoopVF))
6006     return Result;
6007 
6008   for (auto &NextVF : ProfitableVFs)
6009     if (ElementCount::isKnownLT(NextVF.Width, MainLoopVF) &&
6010         (Result.Width.getFixedValue() == 1 || NextVF.Cost < Result.Cost) &&
6011         LVP.hasPlanWithVFs({MainLoopVF, NextVF.Width}))
6012       Result = NextVF;
6013 
6014   if (Result != VectorizationFactor::Disabled())
6015     LLVM_DEBUG(dbgs() << "LEV: Vectorizing epilogue loop with VF = "
6016                       << Result.Width.getFixedValue() << "\n";);
6017   return Result;
6018 }
6019 
6020 std::pair<unsigned, unsigned>
6021 LoopVectorizationCostModel::getSmallestAndWidestTypes() {
6022   unsigned MinWidth = -1U;
6023   unsigned MaxWidth = 8;
6024   const DataLayout &DL = TheFunction->getParent()->getDataLayout();
6025 
6026   // For each block.
6027   for (BasicBlock *BB : TheLoop->blocks()) {
6028     // For each instruction in the loop.
6029     for (Instruction &I : BB->instructionsWithoutDebug()) {
6030       Type *T = I.getType();
6031 
6032       // Skip ignored values.
6033       if (ValuesToIgnore.count(&I))
6034         continue;
6035 
6036       // Only examine Loads, Stores and PHINodes.
6037       if (!isa<LoadInst>(I) && !isa<StoreInst>(I) && !isa<PHINode>(I))
6038         continue;
6039 
6040       // Examine PHI nodes that are reduction variables. Update the type to
6041       // account for the recurrence type.
6042       if (auto *PN = dyn_cast<PHINode>(&I)) {
6043         if (!Legal->isReductionVariable(PN))
6044           continue;
6045         RecurrenceDescriptor RdxDesc = Legal->getReductionVars()[PN];
6046         if (PreferInLoopReductions ||
6047             TTI.preferInLoopReduction(RdxDesc.getOpcode(),
6048                                       RdxDesc.getRecurrenceType(),
6049                                       TargetTransformInfo::ReductionFlags()))
6050           continue;
6051         T = RdxDesc.getRecurrenceType();
6052       }
6053 
6054       // Examine the stored values.
6055       if (auto *ST = dyn_cast<StoreInst>(&I))
6056         T = ST->getValueOperand()->getType();
6057 
6058       // Ignore loaded pointer types and stored pointer types that are not
6059       // vectorizable.
6060       //
6061       // FIXME: The check here attempts to predict whether a load or store will
6062       //        be vectorized. We only know this for certain after a VF has
6063       //        been selected. Here, we assume that if an access can be
6064       //        vectorized, it will be. We should also look at extending this
6065       //        optimization to non-pointer types.
6066       //
6067       if (T->isPointerTy() && !isConsecutiveLoadOrStore(&I) &&
6068           !isAccessInterleaved(&I) && !isLegalGatherOrScatter(&I))
6069         continue;
6070 
6071       MinWidth = std::min(MinWidth,
6072                           (unsigned)DL.getTypeSizeInBits(T->getScalarType()));
6073       MaxWidth = std::max(MaxWidth,
6074                           (unsigned)DL.getTypeSizeInBits(T->getScalarType()));
6075     }
6076   }
6077 
6078   return {MinWidth, MaxWidth};
6079 }
6080 
6081 unsigned LoopVectorizationCostModel::selectInterleaveCount(ElementCount VF,
6082                                                            unsigned LoopCost) {
6083   // -- The interleave heuristics --
6084   // We interleave the loop in order to expose ILP and reduce the loop overhead.
6085   // There are many micro-architectural considerations that we can't predict
6086   // at this level. For example, frontend pressure (on decode or fetch) due to
6087   // code size, or the number and capabilities of the execution ports.
6088   //
6089   // We use the following heuristics to select the interleave count:
6090   // 1. If the code has reductions, then we interleave to break the cross
6091   // iteration dependency.
6092   // 2. If the loop is really small, then we interleave to reduce the loop
6093   // overhead.
6094   // 3. We don't interleave if we think that we will spill registers to memory
6095   // due to the increased register pressure.
6096 
6097   if (!isScalarEpilogueAllowed())
6098     return 1;
6099 
6100   // We used the distance for the interleave count.
6101   if (Legal->getMaxSafeDepDistBytes() != -1U)
6102     return 1;
6103 
6104   auto BestKnownTC = getSmallBestKnownTC(*PSE.getSE(), TheLoop);
6105   const bool HasReductions = !Legal->getReductionVars().empty();
6106   // Do not interleave loops with a relatively small known or estimated trip
6107   // count. But we will interleave when InterleaveSmallLoopScalarReduction is
6108   // enabled, and the code has scalar reductions(HasReductions && VF = 1),
6109   // because with the above conditions interleaving can expose ILP and break
6110   // cross iteration dependences for reductions.
6111   if (BestKnownTC && (*BestKnownTC < TinyTripCountInterleaveThreshold) &&
6112       !(InterleaveSmallLoopScalarReduction && HasReductions && VF.isScalar()))
6113     return 1;
6114 
6115   RegisterUsage R = calculateRegisterUsage({VF})[0];
6116   // We divide by these constants so assume that we have at least one
6117   // instruction that uses at least one register.
6118   for (auto& pair : R.MaxLocalUsers) {
6119     pair.second = std::max(pair.second, 1U);
6120   }
6121 
6122   // We calculate the interleave count using the following formula.
6123   // Subtract the number of loop invariants from the number of available
6124   // registers. These registers are used by all of the interleaved instances.
6125   // Next, divide the remaining registers by the number of registers that is
6126   // required by the loop, in order to estimate how many parallel instances
6127   // fit without causing spills. All of this is rounded down if necessary to be
6128   // a power of two. We want power of two interleave count to simplify any
6129   // addressing operations or alignment considerations.
6130   // We also want power of two interleave counts to ensure that the induction
6131   // variable of the vector loop wraps to zero, when tail is folded by masking;
6132   // this currently happens when OptForSize, in which case IC is set to 1 above.
6133   unsigned IC = UINT_MAX;
6134 
6135   for (auto& pair : R.MaxLocalUsers) {
6136     unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first);
6137     LLVM_DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters
6138                       << " registers of "
6139                       << TTI.getRegisterClassName(pair.first) << " register class\n");
6140     if (VF.isScalar()) {
6141       if (ForceTargetNumScalarRegs.getNumOccurrences() > 0)
6142         TargetNumRegisters = ForceTargetNumScalarRegs;
6143     } else {
6144       if (ForceTargetNumVectorRegs.getNumOccurrences() > 0)
6145         TargetNumRegisters = ForceTargetNumVectorRegs;
6146     }
6147     unsigned MaxLocalUsers = pair.second;
6148     unsigned LoopInvariantRegs = 0;
6149     if (R.LoopInvariantRegs.find(pair.first) != R.LoopInvariantRegs.end())
6150       LoopInvariantRegs = R.LoopInvariantRegs[pair.first];
6151 
6152     unsigned TmpIC = PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs) / MaxLocalUsers);
6153     // Don't count the induction variable as interleaved.
6154     if (EnableIndVarRegisterHeur) {
6155       TmpIC =
6156           PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs - 1) /
6157                         std::max(1U, (MaxLocalUsers - 1)));
6158     }
6159 
6160     IC = std::min(IC, TmpIC);
6161   }
6162 
6163   // Clamp the interleave ranges to reasonable counts.
6164   unsigned MaxInterleaveCount =
6165       TTI.getMaxInterleaveFactor(VF.getKnownMinValue());
6166 
6167   // Check if the user has overridden the max.
6168   if (VF.isScalar()) {
6169     if (ForceTargetMaxScalarInterleaveFactor.getNumOccurrences() > 0)
6170       MaxInterleaveCount = ForceTargetMaxScalarInterleaveFactor;
6171   } else {
6172     if (ForceTargetMaxVectorInterleaveFactor.getNumOccurrences() > 0)
6173       MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor;
6174   }
6175 
6176   // If trip count is known or estimated compile time constant, limit the
6177   // interleave count to be less than the trip count divided by VF, provided it
6178   // is at least 1.
6179   //
6180   // For scalable vectors we can't know if interleaving is beneficial. It may
6181   // not be beneficial for small loops if none of the lanes in the second vector
6182   // iterations is enabled. However, for larger loops, there is likely to be a
6183   // similar benefit as for fixed-width vectors. For now, we choose to leave
6184   // the InterleaveCount as if vscale is '1', although if some information about
6185   // the vector is known (e.g. min vector size), we can make a better decision.
6186   if (BestKnownTC) {
6187     MaxInterleaveCount =
6188         std::min(*BestKnownTC / VF.getKnownMinValue(), MaxInterleaveCount);
6189     // Make sure MaxInterleaveCount is greater than 0.
6190     MaxInterleaveCount = std::max(1u, MaxInterleaveCount);
6191   }
6192 
6193   assert(MaxInterleaveCount > 0 &&
6194          "Maximum interleave count must be greater than 0");
6195 
6196   // Clamp the calculated IC to be between the 1 and the max interleave count
6197   // that the target and trip count allows.
6198   if (IC > MaxInterleaveCount)
6199     IC = MaxInterleaveCount;
6200   else
6201     // Make sure IC is greater than 0.
6202     IC = std::max(1u, IC);
6203 
6204   assert(IC > 0 && "Interleave count must be greater than 0.");
6205 
6206   // If we did not calculate the cost for VF (because the user selected the VF)
6207   // then we calculate the cost of VF here.
6208   if (LoopCost == 0) {
6209     assert(expectedCost(VF).first.isValid() && "Expected a valid cost");
6210     LoopCost = *expectedCost(VF).first.getValue();
6211   }
6212 
6213   assert(LoopCost && "Non-zero loop cost expected");
6214 
6215   // Interleave if we vectorized this loop and there is a reduction that could
6216   // benefit from interleaving.
6217   if (VF.isVector() && HasReductions) {
6218     LLVM_DEBUG(dbgs() << "LV: Interleaving because of reductions.\n");
6219     return IC;
6220   }
6221 
6222   // Note that if we've already vectorized the loop we will have done the
6223   // runtime check and so interleaving won't require further checks.
6224   bool InterleavingRequiresRuntimePointerCheck =
6225       (VF.isScalar() && Legal->getRuntimePointerChecking()->Need);
6226 
6227   // We want to interleave small loops in order to reduce the loop overhead and
6228   // potentially expose ILP opportunities.
6229   LLVM_DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n'
6230                     << "LV: IC is " << IC << '\n'
6231                     << "LV: VF is " << VF << '\n');
6232   const bool AggressivelyInterleaveReductions =
6233       TTI.enableAggressiveInterleaving(HasReductions);
6234   if (!InterleavingRequiresRuntimePointerCheck && LoopCost < SmallLoopCost) {
6235     // We assume that the cost overhead is 1 and we use the cost model
6236     // to estimate the cost of the loop and interleave until the cost of the
6237     // loop overhead is about 5% of the cost of the loop.
6238     unsigned SmallIC =
6239         std::min(IC, (unsigned)PowerOf2Floor(SmallLoopCost / LoopCost));
6240 
6241     // Interleave until store/load ports (estimated by max interleave count) are
6242     // saturated.
6243     unsigned NumStores = Legal->getNumStores();
6244     unsigned NumLoads = Legal->getNumLoads();
6245     unsigned StoresIC = IC / (NumStores ? NumStores : 1);
6246     unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1);
6247 
6248     // If we have a scalar reduction (vector reductions are already dealt with
6249     // by this point), we can increase the critical path length if the loop
6250     // we're interleaving is inside another loop. Limit, by default to 2, so the
6251     // critical path only gets increased by one reduction operation.
6252     if (HasReductions && TheLoop->getLoopDepth() > 1) {
6253       unsigned F = static_cast<unsigned>(MaxNestedScalarReductionIC);
6254       SmallIC = std::min(SmallIC, F);
6255       StoresIC = std::min(StoresIC, F);
6256       LoadsIC = std::min(LoadsIC, F);
6257     }
6258 
6259     if (EnableLoadStoreRuntimeInterleave &&
6260         std::max(StoresIC, LoadsIC) > SmallIC) {
6261       LLVM_DEBUG(
6262           dbgs() << "LV: Interleaving to saturate store or load ports.\n");
6263       return std::max(StoresIC, LoadsIC);
6264     }
6265 
6266     // If there are scalar reductions and TTI has enabled aggressive
6267     // interleaving for reductions, we will interleave to expose ILP.
6268     if (InterleaveSmallLoopScalarReduction && VF.isScalar() &&
6269         AggressivelyInterleaveReductions) {
6270       LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n");
6271       // Interleave no less than SmallIC but not as aggressive as the normal IC
6272       // to satisfy the rare situation when resources are too limited.
6273       return std::max(IC / 2, SmallIC);
6274     } else {
6275       LLVM_DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n");
6276       return SmallIC;
6277     }
6278   }
6279 
6280   // Interleave if this is a large loop (small loops are already dealt with by
6281   // this point) that could benefit from interleaving.
6282   if (AggressivelyInterleaveReductions) {
6283     LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n");
6284     return IC;
6285   }
6286 
6287   LLVM_DEBUG(dbgs() << "LV: Not Interleaving.\n");
6288   return 1;
6289 }
6290 
6291 SmallVector<LoopVectorizationCostModel::RegisterUsage, 8>
6292 LoopVectorizationCostModel::calculateRegisterUsage(ArrayRef<ElementCount> VFs) {
6293   // This function calculates the register usage by measuring the highest number
6294   // of values that are alive at a single location. Obviously, this is a very
6295   // rough estimation. We scan the loop in a topological order in order and
6296   // assign a number to each instruction. We use RPO to ensure that defs are
6297   // met before their users. We assume that each instruction that has in-loop
6298   // users starts an interval. We record every time that an in-loop value is
6299   // used, so we have a list of the first and last occurrences of each
6300   // instruction. Next, we transpose this data structure into a multi map that
6301   // holds the list of intervals that *end* at a specific location. This multi
6302   // map allows us to perform a linear search. We scan the instructions linearly
6303   // and record each time that a new interval starts, by placing it in a set.
6304   // If we find this value in the multi-map then we remove it from the set.
6305   // The max register usage is the maximum size of the set.
6306   // We also search for instructions that are defined outside the loop, but are
6307   // used inside the loop. We need this number separately from the max-interval
6308   // usage number because when we unroll, loop-invariant values do not take
6309   // more register.
6310   LoopBlocksDFS DFS(TheLoop);
6311   DFS.perform(LI);
6312 
6313   RegisterUsage RU;
6314 
6315   // Each 'key' in the map opens a new interval. The values
6316   // of the map are the index of the 'last seen' usage of the
6317   // instruction that is the key.
6318   using IntervalMap = DenseMap<Instruction *, unsigned>;
6319 
6320   // Maps instruction to its index.
6321   SmallVector<Instruction *, 64> IdxToInstr;
6322   // Marks the end of each interval.
6323   IntervalMap EndPoint;
6324   // Saves the list of instruction indices that are used in the loop.
6325   SmallPtrSet<Instruction *, 8> Ends;
6326   // Saves the list of values that are used in the loop but are
6327   // defined outside the loop, such as arguments and constants.
6328   SmallPtrSet<Value *, 8> LoopInvariants;
6329 
6330   for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) {
6331     for (Instruction &I : BB->instructionsWithoutDebug()) {
6332       IdxToInstr.push_back(&I);
6333 
6334       // Save the end location of each USE.
6335       for (Value *U : I.operands()) {
6336         auto *Instr = dyn_cast<Instruction>(U);
6337 
6338         // Ignore non-instruction values such as arguments, constants, etc.
6339         if (!Instr)
6340           continue;
6341 
6342         // If this instruction is outside the loop then record it and continue.
6343         if (!TheLoop->contains(Instr)) {
6344           LoopInvariants.insert(Instr);
6345           continue;
6346         }
6347 
6348         // Overwrite previous end points.
6349         EndPoint[Instr] = IdxToInstr.size();
6350         Ends.insert(Instr);
6351       }
6352     }
6353   }
6354 
6355   // Saves the list of intervals that end with the index in 'key'.
6356   using InstrList = SmallVector<Instruction *, 2>;
6357   DenseMap<unsigned, InstrList> TransposeEnds;
6358 
6359   // Transpose the EndPoints to a list of values that end at each index.
6360   for (auto &Interval : EndPoint)
6361     TransposeEnds[Interval.second].push_back(Interval.first);
6362 
6363   SmallPtrSet<Instruction *, 8> OpenIntervals;
6364   SmallVector<RegisterUsage, 8> RUs(VFs.size());
6365   SmallVector<SmallMapVector<unsigned, unsigned, 4>, 8> MaxUsages(VFs.size());
6366 
6367   LLVM_DEBUG(dbgs() << "LV(REG): Calculating max register usage:\n");
6368 
6369   // A lambda that gets the register usage for the given type and VF.
6370   const auto &TTICapture = TTI;
6371   auto GetRegUsage = [&TTICapture](Type *Ty, ElementCount VF) {
6372     if (Ty->isTokenTy() || !VectorType::isValidElementType(Ty))
6373       return 0U;
6374     return TTICapture.getRegUsageForType(VectorType::get(Ty, VF));
6375   };
6376 
6377   for (unsigned int i = 0, s = IdxToInstr.size(); i < s; ++i) {
6378     Instruction *I = IdxToInstr[i];
6379 
6380     // Remove all of the instructions that end at this location.
6381     InstrList &List = TransposeEnds[i];
6382     for (Instruction *ToRemove : List)
6383       OpenIntervals.erase(ToRemove);
6384 
6385     // Ignore instructions that are never used within the loop.
6386     if (!Ends.count(I))
6387       continue;
6388 
6389     // Skip ignored values.
6390     if (ValuesToIgnore.count(I))
6391       continue;
6392 
6393     // For each VF find the maximum usage of registers.
6394     for (unsigned j = 0, e = VFs.size(); j < e; ++j) {
6395       // Count the number of live intervals.
6396       SmallMapVector<unsigned, unsigned, 4> RegUsage;
6397 
6398       if (VFs[j].isScalar()) {
6399         for (auto Inst : OpenIntervals) {
6400           unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType());
6401           if (RegUsage.find(ClassID) == RegUsage.end())
6402             RegUsage[ClassID] = 1;
6403           else
6404             RegUsage[ClassID] += 1;
6405         }
6406       } else {
6407         collectUniformsAndScalars(VFs[j]);
6408         for (auto Inst : OpenIntervals) {
6409           // Skip ignored values for VF > 1.
6410           if (VecValuesToIgnore.count(Inst))
6411             continue;
6412           if (isScalarAfterVectorization(Inst, VFs[j])) {
6413             unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType());
6414             if (RegUsage.find(ClassID) == RegUsage.end())
6415               RegUsage[ClassID] = 1;
6416             else
6417               RegUsage[ClassID] += 1;
6418           } else {
6419             unsigned ClassID = TTI.getRegisterClassForType(true, Inst->getType());
6420             if (RegUsage.find(ClassID) == RegUsage.end())
6421               RegUsage[ClassID] = GetRegUsage(Inst->getType(), VFs[j]);
6422             else
6423               RegUsage[ClassID] += GetRegUsage(Inst->getType(), VFs[j]);
6424           }
6425         }
6426       }
6427 
6428       for (auto& pair : RegUsage) {
6429         if (MaxUsages[j].find(pair.first) != MaxUsages[j].end())
6430           MaxUsages[j][pair.first] = std::max(MaxUsages[j][pair.first], pair.second);
6431         else
6432           MaxUsages[j][pair.first] = pair.second;
6433       }
6434     }
6435 
6436     LLVM_DEBUG(dbgs() << "LV(REG): At #" << i << " Interval # "
6437                       << OpenIntervals.size() << '\n');
6438 
6439     // Add the current instruction to the list of open intervals.
6440     OpenIntervals.insert(I);
6441   }
6442 
6443   for (unsigned i = 0, e = VFs.size(); i < e; ++i) {
6444     SmallMapVector<unsigned, unsigned, 4> Invariant;
6445 
6446     for (auto Inst : LoopInvariants) {
6447       unsigned Usage =
6448           VFs[i].isScalar() ? 1 : GetRegUsage(Inst->getType(), VFs[i]);
6449       unsigned ClassID =
6450           TTI.getRegisterClassForType(VFs[i].isVector(), Inst->getType());
6451       if (Invariant.find(ClassID) == Invariant.end())
6452         Invariant[ClassID] = Usage;
6453       else
6454         Invariant[ClassID] += Usage;
6455     }
6456 
6457     LLVM_DEBUG({
6458       dbgs() << "LV(REG): VF = " << VFs[i] << '\n';
6459       dbgs() << "LV(REG): Found max usage: " << MaxUsages[i].size()
6460              << " item\n";
6461       for (const auto &pair : MaxUsages[i]) {
6462         dbgs() << "LV(REG): RegisterClass: "
6463                << TTI.getRegisterClassName(pair.first) << ", " << pair.second
6464                << " registers\n";
6465       }
6466       dbgs() << "LV(REG): Found invariant usage: " << Invariant.size()
6467              << " item\n";
6468       for (const auto &pair : Invariant) {
6469         dbgs() << "LV(REG): RegisterClass: "
6470                << TTI.getRegisterClassName(pair.first) << ", " << pair.second
6471                << " registers\n";
6472       }
6473     });
6474 
6475     RU.LoopInvariantRegs = Invariant;
6476     RU.MaxLocalUsers = MaxUsages[i];
6477     RUs[i] = RU;
6478   }
6479 
6480   return RUs;
6481 }
6482 
6483 bool LoopVectorizationCostModel::useEmulatedMaskMemRefHack(Instruction *I){
6484   // TODO: Cost model for emulated masked load/store is completely
6485   // broken. This hack guides the cost model to use an artificially
6486   // high enough value to practically disable vectorization with such
6487   // operations, except where previously deployed legality hack allowed
6488   // using very low cost values. This is to avoid regressions coming simply
6489   // from moving "masked load/store" check from legality to cost model.
6490   // Masked Load/Gather emulation was previously never allowed.
6491   // Limited number of Masked Store/Scatter emulation was allowed.
6492   assert(isPredicatedInst(I) && "Expecting a scalar emulated instruction");
6493   return isa<LoadInst>(I) ||
6494          (isa<StoreInst>(I) &&
6495           NumPredStores > NumberOfStoresToPredicate);
6496 }
6497 
6498 void LoopVectorizationCostModel::collectInstsToScalarize(ElementCount VF) {
6499   // If we aren't vectorizing the loop, or if we've already collected the
6500   // instructions to scalarize, there's nothing to do. Collection may already
6501   // have occurred if we have a user-selected VF and are now computing the
6502   // expected cost for interleaving.
6503   if (VF.isScalar() || VF.isZero() ||
6504       InstsToScalarize.find(VF) != InstsToScalarize.end())
6505     return;
6506 
6507   // Initialize a mapping for VF in InstsToScalalarize. If we find that it's
6508   // not profitable to scalarize any instructions, the presence of VF in the
6509   // map will indicate that we've analyzed it already.
6510   ScalarCostsTy &ScalarCostsVF = InstsToScalarize[VF];
6511 
6512   // Find all the instructions that are scalar with predication in the loop and
6513   // determine if it would be better to not if-convert the blocks they are in.
6514   // If so, we also record the instructions to scalarize.
6515   for (BasicBlock *BB : TheLoop->blocks()) {
6516     if (!blockNeedsPredication(BB))
6517       continue;
6518     for (Instruction &I : *BB)
6519       if (isScalarWithPredication(&I)) {
6520         ScalarCostsTy ScalarCosts;
6521         // Do not apply discount logic if hacked cost is needed
6522         // for emulated masked memrefs.
6523         if (!useEmulatedMaskMemRefHack(&I) &&
6524             computePredInstDiscount(&I, ScalarCosts, VF) >= 0)
6525           ScalarCostsVF.insert(ScalarCosts.begin(), ScalarCosts.end());
6526         // Remember that BB will remain after vectorization.
6527         PredicatedBBsAfterVectorization.insert(BB);
6528       }
6529   }
6530 }
6531 
6532 int LoopVectorizationCostModel::computePredInstDiscount(
6533     Instruction *PredInst, ScalarCostsTy &ScalarCosts, ElementCount VF) {
6534   assert(!isUniformAfterVectorization(PredInst, VF) &&
6535          "Instruction marked uniform-after-vectorization will be predicated");
6536 
6537   // Initialize the discount to zero, meaning that the scalar version and the
6538   // vector version cost the same.
6539   InstructionCost Discount = 0;
6540 
6541   // Holds instructions to analyze. The instructions we visit are mapped in
6542   // ScalarCosts. Those instructions are the ones that would be scalarized if
6543   // we find that the scalar version costs less.
6544   SmallVector<Instruction *, 8> Worklist;
6545 
6546   // Returns true if the given instruction can be scalarized.
6547   auto canBeScalarized = [&](Instruction *I) -> bool {
6548     // We only attempt to scalarize instructions forming a single-use chain
6549     // from the original predicated block that would otherwise be vectorized.
6550     // Although not strictly necessary, we give up on instructions we know will
6551     // already be scalar to avoid traversing chains that are unlikely to be
6552     // beneficial.
6553     if (!I->hasOneUse() || PredInst->getParent() != I->getParent() ||
6554         isScalarAfterVectorization(I, VF))
6555       return false;
6556 
6557     // If the instruction is scalar with predication, it will be analyzed
6558     // separately. We ignore it within the context of PredInst.
6559     if (isScalarWithPredication(I))
6560       return false;
6561 
6562     // If any of the instruction's operands are uniform after vectorization,
6563     // the instruction cannot be scalarized. This prevents, for example, a
6564     // masked load from being scalarized.
6565     //
6566     // We assume we will only emit a value for lane zero of an instruction
6567     // marked uniform after vectorization, rather than VF identical values.
6568     // Thus, if we scalarize an instruction that uses a uniform, we would
6569     // create uses of values corresponding to the lanes we aren't emitting code
6570     // for. This behavior can be changed by allowing getScalarValue to clone
6571     // the lane zero values for uniforms rather than asserting.
6572     for (Use &U : I->operands())
6573       if (auto *J = dyn_cast<Instruction>(U.get()))
6574         if (isUniformAfterVectorization(J, VF))
6575           return false;
6576 
6577     // Otherwise, we can scalarize the instruction.
6578     return true;
6579   };
6580 
6581   // Compute the expected cost discount from scalarizing the entire expression
6582   // feeding the predicated instruction. We currently only consider expressions
6583   // that are single-use instruction chains.
6584   Worklist.push_back(PredInst);
6585   while (!Worklist.empty()) {
6586     Instruction *I = Worklist.pop_back_val();
6587 
6588     // If we've already analyzed the instruction, there's nothing to do.
6589     if (ScalarCosts.find(I) != ScalarCosts.end())
6590       continue;
6591 
6592     // Compute the cost of the vector instruction. Note that this cost already
6593     // includes the scalarization overhead of the predicated instruction.
6594     InstructionCost VectorCost = getInstructionCost(I, VF).first;
6595 
6596     // Compute the cost of the scalarized instruction. This cost is the cost of
6597     // the instruction as if it wasn't if-converted and instead remained in the
6598     // predicated block. We will scale this cost by block probability after
6599     // computing the scalarization overhead.
6600     assert(!VF.isScalable() && "scalable vectors not yet supported.");
6601     InstructionCost ScalarCost =
6602         VF.getKnownMinValue() *
6603         getInstructionCost(I, ElementCount::getFixed(1)).first;
6604 
6605     // Compute the scalarization overhead of needed insertelement instructions
6606     // and phi nodes.
6607     if (isScalarWithPredication(I) && !I->getType()->isVoidTy()) {
6608       ScalarCost += TTI.getScalarizationOverhead(
6609           cast<VectorType>(ToVectorTy(I->getType(), VF)),
6610           APInt::getAllOnesValue(VF.getKnownMinValue()), true, false);
6611       assert(!VF.isScalable() && "scalable vectors not yet supported.");
6612       ScalarCost +=
6613           VF.getKnownMinValue() *
6614           TTI.getCFInstrCost(Instruction::PHI, TTI::TCK_RecipThroughput);
6615     }
6616 
6617     // Compute the scalarization overhead of needed extractelement
6618     // instructions. For each of the instruction's operands, if the operand can
6619     // be scalarized, add it to the worklist; otherwise, account for the
6620     // overhead.
6621     for (Use &U : I->operands())
6622       if (auto *J = dyn_cast<Instruction>(U.get())) {
6623         assert(VectorType::isValidElementType(J->getType()) &&
6624                "Instruction has non-scalar type");
6625         if (canBeScalarized(J))
6626           Worklist.push_back(J);
6627         else if (needsExtract(J, VF)) {
6628           assert(!VF.isScalable() && "scalable vectors not yet supported.");
6629           ScalarCost += TTI.getScalarizationOverhead(
6630               cast<VectorType>(ToVectorTy(J->getType(), VF)),
6631               APInt::getAllOnesValue(VF.getKnownMinValue()), false, true);
6632         }
6633       }
6634 
6635     // Scale the total scalar cost by block probability.
6636     ScalarCost /= getReciprocalPredBlockProb();
6637 
6638     // Compute the discount. A non-negative discount means the vector version
6639     // of the instruction costs more, and scalarizing would be beneficial.
6640     Discount += VectorCost - ScalarCost;
6641     ScalarCosts[I] = ScalarCost;
6642   }
6643 
6644   return *Discount.getValue();
6645 }
6646 
6647 LoopVectorizationCostModel::VectorizationCostTy
6648 LoopVectorizationCostModel::expectedCost(ElementCount VF) {
6649   VectorizationCostTy Cost;
6650 
6651   // For each block.
6652   for (BasicBlock *BB : TheLoop->blocks()) {
6653     VectorizationCostTy BlockCost;
6654 
6655     // For each instruction in the old loop.
6656     for (Instruction &I : BB->instructionsWithoutDebug()) {
6657       // Skip ignored values.
6658       if (ValuesToIgnore.count(&I) ||
6659           (VF.isVector() && VecValuesToIgnore.count(&I)))
6660         continue;
6661 
6662       VectorizationCostTy C = getInstructionCost(&I, VF);
6663 
6664       // Check if we should override the cost.
6665       if (ForceTargetInstructionCost.getNumOccurrences() > 0)
6666         C.first = InstructionCost(ForceTargetInstructionCost);
6667 
6668       BlockCost.first += C.first;
6669       BlockCost.second |= C.second;
6670       LLVM_DEBUG(dbgs() << "LV: Found an estimated cost of " << C.first
6671                         << " for VF " << VF << " For instruction: " << I
6672                         << '\n');
6673     }
6674 
6675     // If we are vectorizing a predicated block, it will have been
6676     // if-converted. This means that the block's instructions (aside from
6677     // stores and instructions that may divide by zero) will now be
6678     // unconditionally executed. For the scalar case, we may not always execute
6679     // the predicated block, if it is an if-else block. Thus, scale the block's
6680     // cost by the probability of executing it. blockNeedsPredication from
6681     // Legal is used so as to not include all blocks in tail folded loops.
6682     if (VF.isScalar() && Legal->blockNeedsPredication(BB))
6683       BlockCost.first /= getReciprocalPredBlockProb();
6684 
6685     Cost.first += BlockCost.first;
6686     Cost.second |= BlockCost.second;
6687   }
6688 
6689   return Cost;
6690 }
6691 
6692 /// Gets Address Access SCEV after verifying that the access pattern
6693 /// is loop invariant except the induction variable dependence.
6694 ///
6695 /// This SCEV can be sent to the Target in order to estimate the address
6696 /// calculation cost.
6697 static const SCEV *getAddressAccessSCEV(
6698               Value *Ptr,
6699               LoopVectorizationLegality *Legal,
6700               PredicatedScalarEvolution &PSE,
6701               const Loop *TheLoop) {
6702 
6703   auto *Gep = dyn_cast<GetElementPtrInst>(Ptr);
6704   if (!Gep)
6705     return nullptr;
6706 
6707   // We are looking for a gep with all loop invariant indices except for one
6708   // which should be an induction variable.
6709   auto SE = PSE.getSE();
6710   unsigned NumOperands = Gep->getNumOperands();
6711   for (unsigned i = 1; i < NumOperands; ++i) {
6712     Value *Opd = Gep->getOperand(i);
6713     if (!SE->isLoopInvariant(SE->getSCEV(Opd), TheLoop) &&
6714         !Legal->isInductionVariable(Opd))
6715       return nullptr;
6716   }
6717 
6718   // Now we know we have a GEP ptr, %inv, %ind, %inv. return the Ptr SCEV.
6719   return PSE.getSCEV(Ptr);
6720 }
6721 
6722 static bool isStrideMul(Instruction *I, LoopVectorizationLegality *Legal) {
6723   return Legal->hasStride(I->getOperand(0)) ||
6724          Legal->hasStride(I->getOperand(1));
6725 }
6726 
6727 InstructionCost
6728 LoopVectorizationCostModel::getMemInstScalarizationCost(Instruction *I,
6729                                                         ElementCount VF) {
6730   assert(VF.isVector() &&
6731          "Scalarization cost of instruction implies vectorization.");
6732   assert(!VF.isScalable() && "scalable vectors not yet supported.");
6733   Type *ValTy = getMemInstValueType(I);
6734   auto SE = PSE.getSE();
6735 
6736   unsigned AS = getLoadStoreAddressSpace(I);
6737   Value *Ptr = getLoadStorePointerOperand(I);
6738   Type *PtrTy = ToVectorTy(Ptr->getType(), VF);
6739 
6740   // Figure out whether the access is strided and get the stride value
6741   // if it's known in compile time
6742   const SCEV *PtrSCEV = getAddressAccessSCEV(Ptr, Legal, PSE, TheLoop);
6743 
6744   // Get the cost of the scalar memory instruction and address computation.
6745   InstructionCost Cost =
6746       VF.getKnownMinValue() * TTI.getAddressComputationCost(PtrTy, SE, PtrSCEV);
6747 
6748   // Don't pass *I here, since it is scalar but will actually be part of a
6749   // vectorized loop where the user of it is a vectorized instruction.
6750   const Align Alignment = getLoadStoreAlignment(I);
6751   Cost += VF.getKnownMinValue() *
6752           TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), Alignment,
6753                               AS, TTI::TCK_RecipThroughput);
6754 
6755   // Get the overhead of the extractelement and insertelement instructions
6756   // we might create due to scalarization.
6757   Cost += getScalarizationOverhead(I, VF);
6758 
6759   // If we have a predicated store, it may not be executed for each vector
6760   // lane. Scale the cost by the probability of executing the predicated
6761   // block.
6762   if (isPredicatedInst(I)) {
6763     Cost /= getReciprocalPredBlockProb();
6764 
6765     if (useEmulatedMaskMemRefHack(I))
6766       // Artificially setting to a high enough value to practically disable
6767       // vectorization with such operations.
6768       Cost = 3000000;
6769   }
6770 
6771   return Cost;
6772 }
6773 
6774 InstructionCost
6775 LoopVectorizationCostModel::getConsecutiveMemOpCost(Instruction *I,
6776                                                     ElementCount VF) {
6777   Type *ValTy = getMemInstValueType(I);
6778   auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
6779   Value *Ptr = getLoadStorePointerOperand(I);
6780   unsigned AS = getLoadStoreAddressSpace(I);
6781   int ConsecutiveStride = Legal->isConsecutivePtr(Ptr);
6782   enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
6783 
6784   assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) &&
6785          "Stride should be 1 or -1 for consecutive memory access");
6786   const Align Alignment = getLoadStoreAlignment(I);
6787   InstructionCost Cost = 0;
6788   if (Legal->isMaskRequired(I))
6789     Cost += TTI.getMaskedMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS,
6790                                       CostKind);
6791   else
6792     Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS,
6793                                 CostKind, I);
6794 
6795   bool Reverse = ConsecutiveStride < 0;
6796   if (Reverse)
6797     Cost += TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, 0);
6798   return Cost;
6799 }
6800 
6801 InstructionCost
6802 LoopVectorizationCostModel::getUniformMemOpCost(Instruction *I,
6803                                                 ElementCount VF) {
6804   assert(Legal->isUniformMemOp(*I));
6805 
6806   Type *ValTy = getMemInstValueType(I);
6807   auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
6808   const Align Alignment = getLoadStoreAlignment(I);
6809   unsigned AS = getLoadStoreAddressSpace(I);
6810   enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
6811   if (isa<LoadInst>(I)) {
6812     return TTI.getAddressComputationCost(ValTy) +
6813            TTI.getMemoryOpCost(Instruction::Load, ValTy, Alignment, AS,
6814                                CostKind) +
6815            TTI.getShuffleCost(TargetTransformInfo::SK_Broadcast, VectorTy);
6816   }
6817   StoreInst *SI = cast<StoreInst>(I);
6818 
6819   bool isLoopInvariantStoreValue = Legal->isUniform(SI->getValueOperand());
6820   return TTI.getAddressComputationCost(ValTy) +
6821          TTI.getMemoryOpCost(Instruction::Store, ValTy, Alignment, AS,
6822                              CostKind) +
6823          (isLoopInvariantStoreValue
6824               ? 0
6825               : TTI.getVectorInstrCost(Instruction::ExtractElement, VectorTy,
6826                                        VF.getKnownMinValue() - 1));
6827 }
6828 
6829 InstructionCost
6830 LoopVectorizationCostModel::getGatherScatterCost(Instruction *I,
6831                                                  ElementCount VF) {
6832   Type *ValTy = getMemInstValueType(I);
6833   auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
6834   const Align Alignment = getLoadStoreAlignment(I);
6835   const Value *Ptr = getLoadStorePointerOperand(I);
6836 
6837   return TTI.getAddressComputationCost(VectorTy) +
6838          TTI.getGatherScatterOpCost(
6839              I->getOpcode(), VectorTy, Ptr, Legal->isMaskRequired(I), Alignment,
6840              TargetTransformInfo::TCK_RecipThroughput, I);
6841 }
6842 
6843 InstructionCost
6844 LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I,
6845                                                    ElementCount VF) {
6846   // TODO: Once we have support for interleaving with scalable vectors
6847   // we can calculate the cost properly here.
6848   if (VF.isScalable())
6849     return InstructionCost::getInvalid();
6850 
6851   Type *ValTy = getMemInstValueType(I);
6852   auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
6853   unsigned AS = getLoadStoreAddressSpace(I);
6854 
6855   auto Group = getInterleavedAccessGroup(I);
6856   assert(Group && "Fail to get an interleaved access group.");
6857 
6858   unsigned InterleaveFactor = Group->getFactor();
6859   auto *WideVecTy = VectorType::get(ValTy, VF * InterleaveFactor);
6860 
6861   // Holds the indices of existing members in an interleaved load group.
6862   // An interleaved store group doesn't need this as it doesn't allow gaps.
6863   SmallVector<unsigned, 4> Indices;
6864   if (isa<LoadInst>(I)) {
6865     for (unsigned i = 0; i < InterleaveFactor; i++)
6866       if (Group->getMember(i))
6867         Indices.push_back(i);
6868   }
6869 
6870   // Calculate the cost of the whole interleaved group.
6871   bool UseMaskForGaps =
6872       Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed();
6873   InstructionCost Cost = TTI.getInterleavedMemoryOpCost(
6874       I->getOpcode(), WideVecTy, Group->getFactor(), Indices, Group->getAlign(),
6875       AS, TTI::TCK_RecipThroughput, Legal->isMaskRequired(I), UseMaskForGaps);
6876 
6877   if (Group->isReverse()) {
6878     // TODO: Add support for reversed masked interleaved access.
6879     assert(!Legal->isMaskRequired(I) &&
6880            "Reverse masked interleaved access not supported.");
6881     Cost += Group->getNumMembers() *
6882             TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, 0);
6883   }
6884   return Cost;
6885 }
6886 
6887 InstructionCost LoopVectorizationCostModel::getReductionPatternCost(
6888     Instruction *I, ElementCount VF, Type *Ty, TTI::TargetCostKind CostKind) {
6889   // Early exit for no inloop reductions
6890   if (InLoopReductionChains.empty() || VF.isScalar() || !isa<VectorType>(Ty))
6891     return InstructionCost::getInvalid();
6892   auto *VectorTy = cast<VectorType>(Ty);
6893 
6894   // We are looking for a pattern of, and finding the minimal acceptable cost:
6895   //  reduce(mul(ext(A), ext(B))) or
6896   //  reduce(mul(A, B)) or
6897   //  reduce(ext(A)) or
6898   //  reduce(A).
6899   // The basic idea is that we walk down the tree to do that, finding the root
6900   // reduction instruction in InLoopReductionImmediateChains. From there we find
6901   // the pattern of mul/ext and test the cost of the entire pattern vs the cost
6902   // of the components. If the reduction cost is lower then we return it for the
6903   // reduction instruction and 0 for the other instructions in the pattern. If
6904   // it is not we return an invalid cost specifying the orignal cost method
6905   // should be used.
6906   Instruction *RetI = I;
6907   if ((RetI->getOpcode() == Instruction::SExt ||
6908        RetI->getOpcode() == Instruction::ZExt)) {
6909     if (!RetI->hasOneUser())
6910       return InstructionCost::getInvalid();
6911     RetI = RetI->user_back();
6912   }
6913   if (RetI->getOpcode() == Instruction::Mul &&
6914       RetI->user_back()->getOpcode() == Instruction::Add) {
6915     if (!RetI->hasOneUser())
6916       return InstructionCost::getInvalid();
6917     RetI = RetI->user_back();
6918   }
6919 
6920   // Test if the found instruction is a reduction, and if not return an invalid
6921   // cost specifying the parent to use the original cost modelling.
6922   if (!InLoopReductionImmediateChains.count(RetI))
6923     return InstructionCost::getInvalid();
6924 
6925   // Find the reduction this chain is a part of and calculate the basic cost of
6926   // the reduction on its own.
6927   Instruction *LastChain = InLoopReductionImmediateChains[RetI];
6928   Instruction *ReductionPhi = LastChain;
6929   while (!isa<PHINode>(ReductionPhi))
6930     ReductionPhi = InLoopReductionImmediateChains[ReductionPhi];
6931 
6932   RecurrenceDescriptor RdxDesc =
6933       Legal->getReductionVars()[cast<PHINode>(ReductionPhi)];
6934   unsigned BaseCost = TTI.getArithmeticReductionCost(RdxDesc.getOpcode(),
6935                                                      VectorTy, false, CostKind);
6936 
6937   // Get the operand that was not the reduction chain and match it to one of the
6938   // patterns, returning the better cost if it is found.
6939   Instruction *RedOp = RetI->getOperand(1) == LastChain
6940                            ? dyn_cast<Instruction>(RetI->getOperand(0))
6941                            : dyn_cast<Instruction>(RetI->getOperand(1));
6942 
6943   VectorTy = VectorType::get(I->getOperand(0)->getType(), VectorTy);
6944 
6945   if (RedOp && (isa<SExtInst>(RedOp) || isa<ZExtInst>(RedOp)) &&
6946       !TheLoop->isLoopInvariant(RedOp)) {
6947     bool IsUnsigned = isa<ZExtInst>(RedOp);
6948     auto *ExtType = VectorType::get(RedOp->getOperand(0)->getType(), VectorTy);
6949     InstructionCost RedCost = TTI.getExtendedAddReductionCost(
6950         /*IsMLA=*/false, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType,
6951         CostKind);
6952 
6953     unsigned ExtCost =
6954         TTI.getCastInstrCost(RedOp->getOpcode(), VectorTy, ExtType,
6955                              TTI::CastContextHint::None, CostKind, RedOp);
6956     if (RedCost.isValid() && RedCost < BaseCost + ExtCost)
6957       return I == RetI ? *RedCost.getValue() : 0;
6958   } else if (RedOp && RedOp->getOpcode() == Instruction::Mul) {
6959     Instruction *Mul = RedOp;
6960     Instruction *Op0 = dyn_cast<Instruction>(Mul->getOperand(0));
6961     Instruction *Op1 = dyn_cast<Instruction>(Mul->getOperand(1));
6962     if (Op0 && Op1 && (isa<SExtInst>(Op0) || isa<ZExtInst>(Op0)) &&
6963         Op0->getOpcode() == Op1->getOpcode() &&
6964         Op0->getOperand(0)->getType() == Op1->getOperand(0)->getType() &&
6965         !TheLoop->isLoopInvariant(Op0) && !TheLoop->isLoopInvariant(Op1)) {
6966       bool IsUnsigned = isa<ZExtInst>(Op0);
6967       auto *ExtType = VectorType::get(Op0->getOperand(0)->getType(), VectorTy);
6968       // reduce(mul(ext, ext))
6969       unsigned ExtCost =
6970           TTI.getCastInstrCost(Op0->getOpcode(), VectorTy, ExtType,
6971                                TTI::CastContextHint::None, CostKind, Op0);
6972       unsigned MulCost =
6973           TTI.getArithmeticInstrCost(Mul->getOpcode(), VectorTy, CostKind);
6974 
6975       InstructionCost RedCost = TTI.getExtendedAddReductionCost(
6976           /*IsMLA=*/true, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType,
6977           CostKind);
6978 
6979       if (RedCost.isValid() && RedCost < ExtCost * 2 + MulCost + BaseCost)
6980         return I == RetI ? *RedCost.getValue() : 0;
6981     } else {
6982       unsigned MulCost =
6983           TTI.getArithmeticInstrCost(Mul->getOpcode(), VectorTy, CostKind);
6984 
6985       InstructionCost RedCost = TTI.getExtendedAddReductionCost(
6986           /*IsMLA=*/true, true, RdxDesc.getRecurrenceType(), VectorTy,
6987           CostKind);
6988 
6989       if (RedCost.isValid() && RedCost < MulCost + BaseCost)
6990         return I == RetI ? *RedCost.getValue() : 0;
6991     }
6992   }
6993 
6994   return I == RetI ? BaseCost : InstructionCost::getInvalid();
6995 }
6996 
6997 InstructionCost
6998 LoopVectorizationCostModel::getMemoryInstructionCost(Instruction *I,
6999                                                      ElementCount VF) {
7000   // Calculate scalar cost only. Vectorization cost should be ready at this
7001   // moment.
7002   if (VF.isScalar()) {
7003     Type *ValTy = getMemInstValueType(I);
7004     const Align Alignment = getLoadStoreAlignment(I);
7005     unsigned AS = getLoadStoreAddressSpace(I);
7006 
7007     return TTI.getAddressComputationCost(ValTy) +
7008            TTI.getMemoryOpCost(I->getOpcode(), ValTy, Alignment, AS,
7009                                TTI::TCK_RecipThroughput, I);
7010   }
7011   return getWideningCost(I, VF);
7012 }
7013 
7014 LoopVectorizationCostModel::VectorizationCostTy
7015 LoopVectorizationCostModel::getInstructionCost(Instruction *I,
7016                                                ElementCount VF) {
7017   // If we know that this instruction will remain uniform, check the cost of
7018   // the scalar version.
7019   if (isUniformAfterVectorization(I, VF))
7020     VF = ElementCount::getFixed(1);
7021 
7022   if (VF.isVector() && isProfitableToScalarize(I, VF))
7023     return VectorizationCostTy(InstsToScalarize[VF][I], false);
7024 
7025   // Forced scalars do not have any scalarization overhead.
7026   auto ForcedScalar = ForcedScalars.find(VF);
7027   if (VF.isVector() && ForcedScalar != ForcedScalars.end()) {
7028     auto InstSet = ForcedScalar->second;
7029     if (InstSet.count(I))
7030       return VectorizationCostTy(
7031           (getInstructionCost(I, ElementCount::getFixed(1)).first *
7032            VF.getKnownMinValue()),
7033           false);
7034   }
7035 
7036   Type *VectorTy;
7037   InstructionCost C = getInstructionCost(I, VF, VectorTy);
7038 
7039   bool TypeNotScalarized =
7040       VF.isVector() && VectorTy->isVectorTy() &&
7041       TTI.getNumberOfParts(VectorTy) < VF.getKnownMinValue();
7042   return VectorizationCostTy(C, TypeNotScalarized);
7043 }
7044 
7045 InstructionCost
7046 LoopVectorizationCostModel::getScalarizationOverhead(Instruction *I,
7047                                                      ElementCount VF) {
7048 
7049   assert(!VF.isScalable() &&
7050          "cannot compute scalarization overhead for scalable vectorization");
7051   if (VF.isScalar())
7052     return 0;
7053 
7054   InstructionCost Cost = 0;
7055   Type *RetTy = ToVectorTy(I->getType(), VF);
7056   if (!RetTy->isVoidTy() &&
7057       (!isa<LoadInst>(I) || !TTI.supportsEfficientVectorElementLoadStore()))
7058     Cost += TTI.getScalarizationOverhead(
7059         cast<VectorType>(RetTy), APInt::getAllOnesValue(VF.getKnownMinValue()),
7060         true, false);
7061 
7062   // Some targets keep addresses scalar.
7063   if (isa<LoadInst>(I) && !TTI.prefersVectorizedAddressing())
7064     return Cost;
7065 
7066   // Some targets support efficient element stores.
7067   if (isa<StoreInst>(I) && TTI.supportsEfficientVectorElementLoadStore())
7068     return Cost;
7069 
7070   // Collect operands to consider.
7071   CallInst *CI = dyn_cast<CallInst>(I);
7072   Instruction::op_range Ops = CI ? CI->arg_operands() : I->operands();
7073 
7074   // Skip operands that do not require extraction/scalarization and do not incur
7075   // any overhead.
7076   return Cost + TTI.getOperandsScalarizationOverhead(
7077                     filterExtractingOperands(Ops, VF), VF.getKnownMinValue());
7078 }
7079 
7080 void LoopVectorizationCostModel::setCostBasedWideningDecision(ElementCount VF) {
7081   if (VF.isScalar())
7082     return;
7083   NumPredStores = 0;
7084   for (BasicBlock *BB : TheLoop->blocks()) {
7085     // For each instruction in the old loop.
7086     for (Instruction &I : *BB) {
7087       Value *Ptr =  getLoadStorePointerOperand(&I);
7088       if (!Ptr)
7089         continue;
7090 
7091       // TODO: We should generate better code and update the cost model for
7092       // predicated uniform stores. Today they are treated as any other
7093       // predicated store (see added test cases in
7094       // invariant-store-vectorization.ll).
7095       if (isa<StoreInst>(&I) && isScalarWithPredication(&I))
7096         NumPredStores++;
7097 
7098       if (Legal->isUniformMemOp(I)) {
7099         // TODO: Avoid replicating loads and stores instead of
7100         // relying on instcombine to remove them.
7101         // Load: Scalar load + broadcast
7102         // Store: Scalar store + isLoopInvariantStoreValue ? 0 : extract
7103         InstructionCost Cost = getUniformMemOpCost(&I, VF);
7104         setWideningDecision(&I, VF, CM_Scalarize, Cost);
7105         continue;
7106       }
7107 
7108       // We assume that widening is the best solution when possible.
7109       if (memoryInstructionCanBeWidened(&I, VF)) {
7110         InstructionCost Cost = getConsecutiveMemOpCost(&I, VF);
7111         int ConsecutiveStride =
7112                Legal->isConsecutivePtr(getLoadStorePointerOperand(&I));
7113         assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) &&
7114                "Expected consecutive stride.");
7115         InstWidening Decision =
7116             ConsecutiveStride == 1 ? CM_Widen : CM_Widen_Reverse;
7117         setWideningDecision(&I, VF, Decision, Cost);
7118         continue;
7119       }
7120 
7121       // Choose between Interleaving, Gather/Scatter or Scalarization.
7122       InstructionCost InterleaveCost = InstructionCost::getInvalid();
7123       unsigned NumAccesses = 1;
7124       if (isAccessInterleaved(&I)) {
7125         auto Group = getInterleavedAccessGroup(&I);
7126         assert(Group && "Fail to get an interleaved access group.");
7127 
7128         // Make one decision for the whole group.
7129         if (getWideningDecision(&I, VF) != CM_Unknown)
7130           continue;
7131 
7132         NumAccesses = Group->getNumMembers();
7133         if (interleavedAccessCanBeWidened(&I, VF))
7134           InterleaveCost = getInterleaveGroupCost(&I, VF);
7135       }
7136 
7137       InstructionCost GatherScatterCost =
7138           isLegalGatherOrScatter(&I)
7139               ? getGatherScatterCost(&I, VF) * NumAccesses
7140               : InstructionCost::getInvalid();
7141 
7142       InstructionCost ScalarizationCost =
7143           !VF.isScalable() ? getMemInstScalarizationCost(&I, VF) * NumAccesses
7144                            : InstructionCost::getInvalid();
7145 
7146       // Choose better solution for the current VF,
7147       // write down this decision and use it during vectorization.
7148       InstructionCost Cost;
7149       InstWidening Decision;
7150       if (InterleaveCost <= GatherScatterCost &&
7151           InterleaveCost < ScalarizationCost) {
7152         Decision = CM_Interleave;
7153         Cost = InterleaveCost;
7154       } else if (GatherScatterCost < ScalarizationCost) {
7155         Decision = CM_GatherScatter;
7156         Cost = GatherScatterCost;
7157       } else {
7158         assert(!VF.isScalable() &&
7159                "We cannot yet scalarise for scalable vectors");
7160         Decision = CM_Scalarize;
7161         Cost = ScalarizationCost;
7162       }
7163       // If the instructions belongs to an interleave group, the whole group
7164       // receives the same decision. The whole group receives the cost, but
7165       // the cost will actually be assigned to one instruction.
7166       if (auto Group = getInterleavedAccessGroup(&I))
7167         setWideningDecision(Group, VF, Decision, Cost);
7168       else
7169         setWideningDecision(&I, VF, Decision, Cost);
7170     }
7171   }
7172 
7173   // Make sure that any load of address and any other address computation
7174   // remains scalar unless there is gather/scatter support. This avoids
7175   // inevitable extracts into address registers, and also has the benefit of
7176   // activating LSR more, since that pass can't optimize vectorized
7177   // addresses.
7178   if (TTI.prefersVectorizedAddressing())
7179     return;
7180 
7181   // Start with all scalar pointer uses.
7182   SmallPtrSet<Instruction *, 8> AddrDefs;
7183   for (BasicBlock *BB : TheLoop->blocks())
7184     for (Instruction &I : *BB) {
7185       Instruction *PtrDef =
7186         dyn_cast_or_null<Instruction>(getLoadStorePointerOperand(&I));
7187       if (PtrDef && TheLoop->contains(PtrDef) &&
7188           getWideningDecision(&I, VF) != CM_GatherScatter)
7189         AddrDefs.insert(PtrDef);
7190     }
7191 
7192   // Add all instructions used to generate the addresses.
7193   SmallVector<Instruction *, 4> Worklist;
7194   append_range(Worklist, AddrDefs);
7195   while (!Worklist.empty()) {
7196     Instruction *I = Worklist.pop_back_val();
7197     for (auto &Op : I->operands())
7198       if (auto *InstOp = dyn_cast<Instruction>(Op))
7199         if ((InstOp->getParent() == I->getParent()) && !isa<PHINode>(InstOp) &&
7200             AddrDefs.insert(InstOp).second)
7201           Worklist.push_back(InstOp);
7202   }
7203 
7204   for (auto *I : AddrDefs) {
7205     if (isa<LoadInst>(I)) {
7206       // Setting the desired widening decision should ideally be handled in
7207       // by cost functions, but since this involves the task of finding out
7208       // if the loaded register is involved in an address computation, it is
7209       // instead changed here when we know this is the case.
7210       InstWidening Decision = getWideningDecision(I, VF);
7211       if (Decision == CM_Widen || Decision == CM_Widen_Reverse)
7212         // Scalarize a widened load of address.
7213         setWideningDecision(
7214             I, VF, CM_Scalarize,
7215             (VF.getKnownMinValue() *
7216              getMemoryInstructionCost(I, ElementCount::getFixed(1))));
7217       else if (auto Group = getInterleavedAccessGroup(I)) {
7218         // Scalarize an interleave group of address loads.
7219         for (unsigned I = 0; I < Group->getFactor(); ++I) {
7220           if (Instruction *Member = Group->getMember(I))
7221             setWideningDecision(
7222                 Member, VF, CM_Scalarize,
7223                 (VF.getKnownMinValue() *
7224                  getMemoryInstructionCost(Member, ElementCount::getFixed(1))));
7225         }
7226       }
7227     } else
7228       // Make sure I gets scalarized and a cost estimate without
7229       // scalarization overhead.
7230       ForcedScalars[VF].insert(I);
7231   }
7232 }
7233 
7234 InstructionCost
7235 LoopVectorizationCostModel::getInstructionCost(Instruction *I, ElementCount VF,
7236                                                Type *&VectorTy) {
7237   Type *RetTy = I->getType();
7238   if (canTruncateToMinimalBitwidth(I, VF))
7239     RetTy = IntegerType::get(RetTy->getContext(), MinBWs[I]);
7240   VectorTy = isScalarAfterVectorization(I, VF) ? RetTy : ToVectorTy(RetTy, VF);
7241   auto SE = PSE.getSE();
7242   TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
7243 
7244   // TODO: We need to estimate the cost of intrinsic calls.
7245   switch (I->getOpcode()) {
7246   case Instruction::GetElementPtr:
7247     // We mark this instruction as zero-cost because the cost of GEPs in
7248     // vectorized code depends on whether the corresponding memory instruction
7249     // is scalarized or not. Therefore, we handle GEPs with the memory
7250     // instruction cost.
7251     return 0;
7252   case Instruction::Br: {
7253     // In cases of scalarized and predicated instructions, there will be VF
7254     // predicated blocks in the vectorized loop. Each branch around these
7255     // blocks requires also an extract of its vector compare i1 element.
7256     bool ScalarPredicatedBB = false;
7257     BranchInst *BI = cast<BranchInst>(I);
7258     if (VF.isVector() && BI->isConditional() &&
7259         (PredicatedBBsAfterVectorization.count(BI->getSuccessor(0)) ||
7260          PredicatedBBsAfterVectorization.count(BI->getSuccessor(1))))
7261       ScalarPredicatedBB = true;
7262 
7263     if (ScalarPredicatedBB) {
7264       // Return cost for branches around scalarized and predicated blocks.
7265       assert(!VF.isScalable() && "scalable vectors not yet supported.");
7266       auto *Vec_i1Ty =
7267           VectorType::get(IntegerType::getInt1Ty(RetTy->getContext()), VF);
7268       return (TTI.getScalarizationOverhead(
7269                   Vec_i1Ty, APInt::getAllOnesValue(VF.getKnownMinValue()),
7270                   false, true) +
7271               (TTI.getCFInstrCost(Instruction::Br, CostKind) *
7272                VF.getKnownMinValue()));
7273     } else if (I->getParent() == TheLoop->getLoopLatch() || VF.isScalar())
7274       // The back-edge branch will remain, as will all scalar branches.
7275       return TTI.getCFInstrCost(Instruction::Br, CostKind);
7276     else
7277       // This branch will be eliminated by if-conversion.
7278       return 0;
7279     // Note: We currently assume zero cost for an unconditional branch inside
7280     // a predicated block since it will become a fall-through, although we
7281     // may decide in the future to call TTI for all branches.
7282   }
7283   case Instruction::PHI: {
7284     auto *Phi = cast<PHINode>(I);
7285 
7286     // First-order recurrences are replaced by vector shuffles inside the loop.
7287     // NOTE: Don't use ToVectorTy as SK_ExtractSubvector expects a vector type.
7288     if (VF.isVector() && Legal->isFirstOrderRecurrence(Phi))
7289       return TTI.getShuffleCost(
7290           TargetTransformInfo::SK_ExtractSubvector, cast<VectorType>(VectorTy),
7291           VF.getKnownMinValue() - 1, FixedVectorType::get(RetTy, 1));
7292 
7293     // Phi nodes in non-header blocks (not inductions, reductions, etc.) are
7294     // converted into select instructions. We require N - 1 selects per phi
7295     // node, where N is the number of incoming values.
7296     if (VF.isVector() && Phi->getParent() != TheLoop->getHeader())
7297       return (Phi->getNumIncomingValues() - 1) *
7298              TTI.getCmpSelInstrCost(
7299                  Instruction::Select, ToVectorTy(Phi->getType(), VF),
7300                  ToVectorTy(Type::getInt1Ty(Phi->getContext()), VF),
7301                  CmpInst::BAD_ICMP_PREDICATE, CostKind);
7302 
7303     return TTI.getCFInstrCost(Instruction::PHI, CostKind);
7304   }
7305   case Instruction::UDiv:
7306   case Instruction::SDiv:
7307   case Instruction::URem:
7308   case Instruction::SRem:
7309     // If we have a predicated instruction, it may not be executed for each
7310     // vector lane. Get the scalarization cost and scale this amount by the
7311     // probability of executing the predicated block. If the instruction is not
7312     // predicated, we fall through to the next case.
7313     if (VF.isVector() && isScalarWithPredication(I)) {
7314       InstructionCost Cost = 0;
7315 
7316       // These instructions have a non-void type, so account for the phi nodes
7317       // that we will create. This cost is likely to be zero. The phi node
7318       // cost, if any, should be scaled by the block probability because it
7319       // models a copy at the end of each predicated block.
7320       Cost += VF.getKnownMinValue() *
7321               TTI.getCFInstrCost(Instruction::PHI, CostKind);
7322 
7323       // The cost of the non-predicated instruction.
7324       Cost += VF.getKnownMinValue() *
7325               TTI.getArithmeticInstrCost(I->getOpcode(), RetTy, CostKind);
7326 
7327       // The cost of insertelement and extractelement instructions needed for
7328       // scalarization.
7329       Cost += getScalarizationOverhead(I, VF);
7330 
7331       // Scale the cost by the probability of executing the predicated blocks.
7332       // This assumes the predicated block for each vector lane is equally
7333       // likely.
7334       return Cost / getReciprocalPredBlockProb();
7335     }
7336     LLVM_FALLTHROUGH;
7337   case Instruction::Add:
7338   case Instruction::FAdd:
7339   case Instruction::Sub:
7340   case Instruction::FSub:
7341   case Instruction::Mul:
7342   case Instruction::FMul:
7343   case Instruction::FDiv:
7344   case Instruction::FRem:
7345   case Instruction::Shl:
7346   case Instruction::LShr:
7347   case Instruction::AShr:
7348   case Instruction::And:
7349   case Instruction::Or:
7350   case Instruction::Xor: {
7351     // Since we will replace the stride by 1 the multiplication should go away.
7352     if (I->getOpcode() == Instruction::Mul && isStrideMul(I, Legal))
7353       return 0;
7354 
7355     // Detect reduction patterns
7356     InstructionCost RedCost;
7357     if ((RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind))
7358             .isValid())
7359       return RedCost;
7360 
7361     // Certain instructions can be cheaper to vectorize if they have a constant
7362     // second vector operand. One example of this are shifts on x86.
7363     Value *Op2 = I->getOperand(1);
7364     TargetTransformInfo::OperandValueProperties Op2VP;
7365     TargetTransformInfo::OperandValueKind Op2VK =
7366         TTI.getOperandInfo(Op2, Op2VP);
7367     if (Op2VK == TargetTransformInfo::OK_AnyValue && Legal->isUniform(Op2))
7368       Op2VK = TargetTransformInfo::OK_UniformValue;
7369 
7370     SmallVector<const Value *, 4> Operands(I->operand_values());
7371     unsigned N = isScalarAfterVectorization(I, VF) ? VF.getKnownMinValue() : 1;
7372     return N * TTI.getArithmeticInstrCost(
7373                    I->getOpcode(), VectorTy, CostKind,
7374                    TargetTransformInfo::OK_AnyValue,
7375                    Op2VK, TargetTransformInfo::OP_None, Op2VP, Operands, I);
7376   }
7377   case Instruction::FNeg: {
7378     assert(!VF.isScalable() && "VF is assumed to be non scalable.");
7379     unsigned N = isScalarAfterVectorization(I, VF) ? VF.getKnownMinValue() : 1;
7380     return N * TTI.getArithmeticInstrCost(
7381                    I->getOpcode(), VectorTy, CostKind,
7382                    TargetTransformInfo::OK_AnyValue,
7383                    TargetTransformInfo::OK_AnyValue,
7384                    TargetTransformInfo::OP_None, TargetTransformInfo::OP_None,
7385                    I->getOperand(0), I);
7386   }
7387   case Instruction::Select: {
7388     SelectInst *SI = cast<SelectInst>(I);
7389     const SCEV *CondSCEV = SE->getSCEV(SI->getCondition());
7390     bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop));
7391     Type *CondTy = SI->getCondition()->getType();
7392     if (!ScalarCond)
7393       CondTy = VectorType::get(CondTy, VF);
7394     return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, CondTy,
7395                                   CmpInst::BAD_ICMP_PREDICATE, CostKind, I);
7396   }
7397   case Instruction::ICmp:
7398   case Instruction::FCmp: {
7399     Type *ValTy = I->getOperand(0)->getType();
7400     Instruction *Op0AsInstruction = dyn_cast<Instruction>(I->getOperand(0));
7401     if (canTruncateToMinimalBitwidth(Op0AsInstruction, VF))
7402       ValTy = IntegerType::get(ValTy->getContext(), MinBWs[Op0AsInstruction]);
7403     VectorTy = ToVectorTy(ValTy, VF);
7404     return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, nullptr,
7405                                   CmpInst::BAD_ICMP_PREDICATE, CostKind, I);
7406   }
7407   case Instruction::Store:
7408   case Instruction::Load: {
7409     ElementCount Width = VF;
7410     if (Width.isVector()) {
7411       InstWidening Decision = getWideningDecision(I, Width);
7412       assert(Decision != CM_Unknown &&
7413              "CM decision should be taken at this point");
7414       if (Decision == CM_Scalarize)
7415         Width = ElementCount::getFixed(1);
7416     }
7417     VectorTy = ToVectorTy(getMemInstValueType(I), Width);
7418     return getMemoryInstructionCost(I, VF);
7419   }
7420   case Instruction::ZExt:
7421   case Instruction::SExt:
7422   case Instruction::FPToUI:
7423   case Instruction::FPToSI:
7424   case Instruction::FPExt:
7425   case Instruction::PtrToInt:
7426   case Instruction::IntToPtr:
7427   case Instruction::SIToFP:
7428   case Instruction::UIToFP:
7429   case Instruction::Trunc:
7430   case Instruction::FPTrunc:
7431   case Instruction::BitCast: {
7432     // Computes the CastContextHint from a Load/Store instruction.
7433     auto ComputeCCH = [&](Instruction *I) -> TTI::CastContextHint {
7434       assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
7435              "Expected a load or a store!");
7436 
7437       if (VF.isScalar() || !TheLoop->contains(I))
7438         return TTI::CastContextHint::Normal;
7439 
7440       switch (getWideningDecision(I, VF)) {
7441       case LoopVectorizationCostModel::CM_GatherScatter:
7442         return TTI::CastContextHint::GatherScatter;
7443       case LoopVectorizationCostModel::CM_Interleave:
7444         return TTI::CastContextHint::Interleave;
7445       case LoopVectorizationCostModel::CM_Scalarize:
7446       case LoopVectorizationCostModel::CM_Widen:
7447         return Legal->isMaskRequired(I) ? TTI::CastContextHint::Masked
7448                                         : TTI::CastContextHint::Normal;
7449       case LoopVectorizationCostModel::CM_Widen_Reverse:
7450         return TTI::CastContextHint::Reversed;
7451       case LoopVectorizationCostModel::CM_Unknown:
7452         llvm_unreachable("Instr did not go through cost modelling?");
7453       }
7454 
7455       llvm_unreachable("Unhandled case!");
7456     };
7457 
7458     unsigned Opcode = I->getOpcode();
7459     TTI::CastContextHint CCH = TTI::CastContextHint::None;
7460     // For Trunc, the context is the only user, which must be a StoreInst.
7461     if (Opcode == Instruction::Trunc || Opcode == Instruction::FPTrunc) {
7462       if (I->hasOneUse())
7463         if (StoreInst *Store = dyn_cast<StoreInst>(*I->user_begin()))
7464           CCH = ComputeCCH(Store);
7465     }
7466     // For Z/Sext, the context is the operand, which must be a LoadInst.
7467     else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt ||
7468              Opcode == Instruction::FPExt) {
7469       if (LoadInst *Load = dyn_cast<LoadInst>(I->getOperand(0)))
7470         CCH = ComputeCCH(Load);
7471     }
7472 
7473     // We optimize the truncation of induction variables having constant
7474     // integer steps. The cost of these truncations is the same as the scalar
7475     // operation.
7476     if (isOptimizableIVTruncate(I, VF)) {
7477       auto *Trunc = cast<TruncInst>(I);
7478       return TTI.getCastInstrCost(Instruction::Trunc, Trunc->getDestTy(),
7479                                   Trunc->getSrcTy(), CCH, CostKind, Trunc);
7480     }
7481 
7482     // Detect reduction patterns
7483     InstructionCost RedCost;
7484     if ((RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind))
7485             .isValid())
7486       return RedCost;
7487 
7488     Type *SrcScalarTy = I->getOperand(0)->getType();
7489     Type *SrcVecTy =
7490         VectorTy->isVectorTy() ? ToVectorTy(SrcScalarTy, VF) : SrcScalarTy;
7491     if (canTruncateToMinimalBitwidth(I, VF)) {
7492       // This cast is going to be shrunk. This may remove the cast or it might
7493       // turn it into slightly different cast. For example, if MinBW == 16,
7494       // "zext i8 %1 to i32" becomes "zext i8 %1 to i16".
7495       //
7496       // Calculate the modified src and dest types.
7497       Type *MinVecTy = VectorTy;
7498       if (Opcode == Instruction::Trunc) {
7499         SrcVecTy = smallestIntegerVectorType(SrcVecTy, MinVecTy);
7500         VectorTy =
7501             largestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy);
7502       } else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt) {
7503         SrcVecTy = largestIntegerVectorType(SrcVecTy, MinVecTy);
7504         VectorTy =
7505             smallestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy);
7506       }
7507     }
7508 
7509     unsigned N;
7510     if (isScalarAfterVectorization(I, VF)) {
7511       assert(!VF.isScalable() && "VF is assumed to be non scalable");
7512       N = VF.getKnownMinValue();
7513     } else
7514       N = 1;
7515     return N *
7516            TTI.getCastInstrCost(Opcode, VectorTy, SrcVecTy, CCH, CostKind, I);
7517   }
7518   case Instruction::Call: {
7519     bool NeedToScalarize;
7520     CallInst *CI = cast<CallInst>(I);
7521     InstructionCost CallCost = getVectorCallCost(CI, VF, NeedToScalarize);
7522     if (getVectorIntrinsicIDForCall(CI, TLI)) {
7523       InstructionCost IntrinsicCost = getVectorIntrinsicCost(CI, VF);
7524       return std::min(CallCost, IntrinsicCost);
7525     }
7526     return CallCost;
7527   }
7528   case Instruction::ExtractValue:
7529     return TTI.getInstructionCost(I, TTI::TCK_RecipThroughput);
7530   default:
7531     // The cost of executing VF copies of the scalar instruction. This opcode
7532     // is unknown. Assume that it is the same as 'mul'.
7533     return VF.getKnownMinValue() * TTI.getArithmeticInstrCost(
7534                                        Instruction::Mul, VectorTy, CostKind) +
7535            getScalarizationOverhead(I, VF);
7536   } // end of switch.
7537 }
7538 
7539 char LoopVectorize::ID = 0;
7540 
7541 static const char lv_name[] = "Loop Vectorization";
7542 
7543 INITIALIZE_PASS_BEGIN(LoopVectorize, LV_NAME, lv_name, false, false)
7544 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
7545 INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass)
7546 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
7547 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
7548 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
7549 INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass)
7550 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
7551 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
7552 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
7553 INITIALIZE_PASS_DEPENDENCY(LoopAccessLegacyAnalysis)
7554 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass)
7555 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass)
7556 INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass)
7557 INITIALIZE_PASS_DEPENDENCY(InjectTLIMappingsLegacy)
7558 INITIALIZE_PASS_END(LoopVectorize, LV_NAME, lv_name, false, false)
7559 
7560 namespace llvm {
7561 
7562 Pass *createLoopVectorizePass() { return new LoopVectorize(); }
7563 
7564 Pass *createLoopVectorizePass(bool InterleaveOnlyWhenForced,
7565                               bool VectorizeOnlyWhenForced) {
7566   return new LoopVectorize(InterleaveOnlyWhenForced, VectorizeOnlyWhenForced);
7567 }
7568 
7569 } // end namespace llvm
7570 
7571 bool LoopVectorizationCostModel::isConsecutiveLoadOrStore(Instruction *Inst) {
7572   // Check if the pointer operand of a load or store instruction is
7573   // consecutive.
7574   if (auto *Ptr = getLoadStorePointerOperand(Inst))
7575     return Legal->isConsecutivePtr(Ptr);
7576   return false;
7577 }
7578 
7579 void LoopVectorizationCostModel::collectValuesToIgnore() {
7580   // Ignore ephemeral values.
7581   CodeMetrics::collectEphemeralValues(TheLoop, AC, ValuesToIgnore);
7582 
7583   // Ignore type-promoting instructions we identified during reduction
7584   // detection.
7585   for (auto &Reduction : Legal->getReductionVars()) {
7586     RecurrenceDescriptor &RedDes = Reduction.second;
7587     const SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts();
7588     VecValuesToIgnore.insert(Casts.begin(), Casts.end());
7589   }
7590   // Ignore type-casting instructions we identified during induction
7591   // detection.
7592   for (auto &Induction : Legal->getInductionVars()) {
7593     InductionDescriptor &IndDes = Induction.second;
7594     const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts();
7595     VecValuesToIgnore.insert(Casts.begin(), Casts.end());
7596   }
7597 }
7598 
7599 void LoopVectorizationCostModel::collectInLoopReductions() {
7600   for (auto &Reduction : Legal->getReductionVars()) {
7601     PHINode *Phi = Reduction.first;
7602     RecurrenceDescriptor &RdxDesc = Reduction.second;
7603 
7604     // We don't collect reductions that are type promoted (yet).
7605     if (RdxDesc.getRecurrenceType() != Phi->getType())
7606       continue;
7607 
7608     // If the target would prefer this reduction to happen "in-loop", then we
7609     // want to record it as such.
7610     unsigned Opcode = RdxDesc.getOpcode();
7611     if (!PreferInLoopReductions &&
7612         !TTI.preferInLoopReduction(Opcode, Phi->getType(),
7613                                    TargetTransformInfo::ReductionFlags()))
7614       continue;
7615 
7616     // Check that we can correctly put the reductions into the loop, by
7617     // finding the chain of operations that leads from the phi to the loop
7618     // exit value.
7619     SmallVector<Instruction *, 4> ReductionOperations =
7620         RdxDesc.getReductionOpChain(Phi, TheLoop);
7621     bool InLoop = !ReductionOperations.empty();
7622     if (InLoop) {
7623       InLoopReductionChains[Phi] = ReductionOperations;
7624       // Add the elements to InLoopReductionImmediateChains for cost modelling.
7625       Instruction *LastChain = Phi;
7626       for (auto *I : ReductionOperations) {
7627         InLoopReductionImmediateChains[I] = LastChain;
7628         LastChain = I;
7629       }
7630     }
7631     LLVM_DEBUG(dbgs() << "LV: Using " << (InLoop ? "inloop" : "out of loop")
7632                       << " reduction for phi: " << *Phi << "\n");
7633   }
7634 }
7635 
7636 // TODO: we could return a pair of values that specify the max VF and
7637 // min VF, to be used in `buildVPlans(MinVF, MaxVF)` instead of
7638 // `buildVPlans(VF, VF)`. We cannot do it because VPLAN at the moment
7639 // doesn't have a cost model that can choose which plan to execute if
7640 // more than one is generated.
7641 static unsigned determineVPlanVF(const unsigned WidestVectorRegBits,
7642                                  LoopVectorizationCostModel &CM) {
7643   unsigned WidestType;
7644   std::tie(std::ignore, WidestType) = CM.getSmallestAndWidestTypes();
7645   return WidestVectorRegBits / WidestType;
7646 }
7647 
7648 VectorizationFactor
7649 LoopVectorizationPlanner::planInVPlanNativePath(ElementCount UserVF) {
7650   assert(!UserVF.isScalable() && "scalable vectors not yet supported");
7651   ElementCount VF = UserVF;
7652   // Outer loop handling: They may require CFG and instruction level
7653   // transformations before even evaluating whether vectorization is profitable.
7654   // Since we cannot modify the incoming IR, we need to build VPlan upfront in
7655   // the vectorization pipeline.
7656   if (!OrigLoop->isInnermost()) {
7657     // If the user doesn't provide a vectorization factor, determine a
7658     // reasonable one.
7659     if (UserVF.isZero()) {
7660       VF = ElementCount::getFixed(
7661           determineVPlanVF(TTI->getRegisterBitWidth(true /* Vector*/), CM));
7662       LLVM_DEBUG(dbgs() << "LV: VPlan computed VF " << VF << ".\n");
7663 
7664       // Make sure we have a VF > 1 for stress testing.
7665       if (VPlanBuildStressTest && (VF.isScalar() || VF.isZero())) {
7666         LLVM_DEBUG(dbgs() << "LV: VPlan stress testing: "
7667                           << "overriding computed VF.\n");
7668         VF = ElementCount::getFixed(4);
7669       }
7670     }
7671     assert(EnableVPlanNativePath && "VPlan-native path is not enabled.");
7672     assert(isPowerOf2_32(VF.getKnownMinValue()) &&
7673            "VF needs to be a power of two");
7674     LLVM_DEBUG(dbgs() << "LV: Using " << (!UserVF.isZero() ? "user " : "")
7675                       << "VF " << VF << " to build VPlans.\n");
7676     buildVPlans(VF, VF);
7677 
7678     // For VPlan build stress testing, we bail out after VPlan construction.
7679     if (VPlanBuildStressTest)
7680       return VectorizationFactor::Disabled();
7681 
7682     return {VF, 0 /*Cost*/};
7683   }
7684 
7685   LLVM_DEBUG(
7686       dbgs() << "LV: Not vectorizing. Inner loops aren't supported in the "
7687                 "VPlan-native path.\n");
7688   return VectorizationFactor::Disabled();
7689 }
7690 
7691 Optional<VectorizationFactor>
7692 LoopVectorizationPlanner::plan(ElementCount UserVF, unsigned UserIC) {
7693   assert(OrigLoop->isInnermost() && "Inner loop expected.");
7694   Optional<ElementCount> MaybeMaxVF = CM.computeMaxVF(UserVF, UserIC);
7695   if (!MaybeMaxVF) // Cases that should not to be vectorized nor interleaved.
7696     return None;
7697 
7698   // Invalidate interleave groups if all blocks of loop will be predicated.
7699   if (CM.blockNeedsPredication(OrigLoop->getHeader()) &&
7700       !useMaskedInterleavedAccesses(*TTI)) {
7701     LLVM_DEBUG(
7702         dbgs()
7703         << "LV: Invalidate all interleaved groups due to fold-tail by masking "
7704            "which requires masked-interleaved support.\n");
7705     if (CM.InterleaveInfo.invalidateGroups())
7706       // Invalidating interleave groups also requires invalidating all decisions
7707       // based on them, which includes widening decisions and uniform and scalar
7708       // values.
7709       CM.invalidateCostModelingDecisions();
7710   }
7711 
7712   ElementCount MaxVF = MaybeMaxVF.getValue();
7713   assert(MaxVF.isNonZero() && "MaxVF is zero.");
7714 
7715   bool UserVFIsLegal = ElementCount::isKnownLE(UserVF, MaxVF);
7716   if (!UserVF.isZero() &&
7717       (UserVFIsLegal || (UserVF.isScalable() && MaxVF.isScalable()))) {
7718     // FIXME: MaxVF is temporarily used inplace of UserVF for illegal scalable
7719     // VFs here, this should be reverted to only use legal UserVFs once the
7720     // loop below supports scalable VFs.
7721     ElementCount VF = UserVFIsLegal ? UserVF : MaxVF;
7722     LLVM_DEBUG(dbgs() << "LV: Using " << (UserVFIsLegal ? "user" : "max")
7723                       << " VF " << VF << ".\n");
7724     assert(isPowerOf2_32(VF.getKnownMinValue()) &&
7725            "VF needs to be a power of two");
7726     // Collect the instructions (and their associated costs) that will be more
7727     // profitable to scalarize.
7728     CM.selectUserVectorizationFactor(VF);
7729     CM.collectInLoopReductions();
7730     buildVPlansWithVPRecipes(VF, VF);
7731     LLVM_DEBUG(printPlans(dbgs()));
7732     return {{VF, 0}};
7733   }
7734 
7735   assert(!MaxVF.isScalable() &&
7736          "Scalable vectors not yet supported beyond this point");
7737 
7738   for (ElementCount VF = ElementCount::getFixed(1);
7739        ElementCount::isKnownLE(VF, MaxVF); VF *= 2) {
7740     // Collect Uniform and Scalar instructions after vectorization with VF.
7741     CM.collectUniformsAndScalars(VF);
7742 
7743     // Collect the instructions (and their associated costs) that will be more
7744     // profitable to scalarize.
7745     if (VF.isVector())
7746       CM.collectInstsToScalarize(VF);
7747   }
7748 
7749   CM.collectInLoopReductions();
7750 
7751   buildVPlansWithVPRecipes(ElementCount::getFixed(1), MaxVF);
7752   LLVM_DEBUG(printPlans(dbgs()));
7753   if (MaxVF.isScalar())
7754     return VectorizationFactor::Disabled();
7755 
7756   // Select the optimal vectorization factor.
7757   return CM.selectVectorizationFactor(MaxVF);
7758 }
7759 
7760 void LoopVectorizationPlanner::setBestPlan(ElementCount VF, unsigned UF) {
7761   LLVM_DEBUG(dbgs() << "Setting best plan to VF=" << VF << ", UF=" << UF
7762                     << '\n');
7763   BestVF = VF;
7764   BestUF = UF;
7765 
7766   erase_if(VPlans, [VF](const VPlanPtr &Plan) {
7767     return !Plan->hasVF(VF);
7768   });
7769   assert(VPlans.size() == 1 && "Best VF has not a single VPlan.");
7770 }
7771 
7772 void LoopVectorizationPlanner::executePlan(InnerLoopVectorizer &ILV,
7773                                            DominatorTree *DT) {
7774   // Perform the actual loop transformation.
7775 
7776   // 1. Create a new empty loop. Unlink the old loop and connect the new one.
7777   VPCallbackILV CallbackILV(ILV);
7778 
7779   assert(BestVF.hasValue() && "Vectorization Factor is missing");
7780 
7781   VPTransformState State{*BestVF,
7782                          BestUF,
7783                          LI,
7784                          DT,
7785                          ILV.Builder,
7786                          ILV.VectorLoopValueMap,
7787                          &ILV,
7788                          CallbackILV};
7789   State.CFG.PrevBB = ILV.createVectorizedLoopSkeleton();
7790   State.TripCount = ILV.getOrCreateTripCount(nullptr);
7791   State.CanonicalIV = ILV.Induction;
7792 
7793   ILV.printDebugTracesAtStart();
7794 
7795   //===------------------------------------------------===//
7796   //
7797   // Notice: any optimization or new instruction that go
7798   // into the code below should also be implemented in
7799   // the cost-model.
7800   //
7801   //===------------------------------------------------===//
7802 
7803   // 2. Copy and widen instructions from the old loop into the new loop.
7804   assert(VPlans.size() == 1 && "Not a single VPlan to execute.");
7805   VPlans.front()->execute(&State);
7806 
7807   // 3. Fix the vectorized code: take care of header phi's, live-outs,
7808   //    predication, updating analyses.
7809   ILV.fixVectorizedLoop();
7810 
7811   ILV.printDebugTracesAtEnd();
7812 }
7813 
7814 void LoopVectorizationPlanner::collectTriviallyDeadInstructions(
7815     SmallPtrSetImpl<Instruction *> &DeadInstructions) {
7816 
7817   // We create new control-flow for the vectorized loop, so the original exit
7818   // conditions will be dead after vectorization if it's only used by the
7819   // terminator
7820   SmallVector<BasicBlock*> ExitingBlocks;
7821   OrigLoop->getExitingBlocks(ExitingBlocks);
7822   for (auto *BB : ExitingBlocks) {
7823     auto *Cmp = dyn_cast<Instruction>(BB->getTerminator()->getOperand(0));
7824     if (!Cmp || !Cmp->hasOneUse())
7825       continue;
7826 
7827     // TODO: we should introduce a getUniqueExitingBlocks on Loop
7828     if (!DeadInstructions.insert(Cmp).second)
7829       continue;
7830 
7831     // The operands of the icmp is often a dead trunc, used by IndUpdate.
7832     // TODO: can recurse through operands in general
7833     for (Value *Op : Cmp->operands()) {
7834       if (isa<TruncInst>(Op) && Op->hasOneUse())
7835           DeadInstructions.insert(cast<Instruction>(Op));
7836     }
7837   }
7838 
7839   // We create new "steps" for induction variable updates to which the original
7840   // induction variables map. An original update instruction will be dead if
7841   // all its users except the induction variable are dead.
7842   auto *Latch = OrigLoop->getLoopLatch();
7843   for (auto &Induction : Legal->getInductionVars()) {
7844     PHINode *Ind = Induction.first;
7845     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
7846 
7847     // If the tail is to be folded by masking, the primary induction variable,
7848     // if exists, isn't dead: it will be used for masking. Don't kill it.
7849     if (CM.foldTailByMasking() && IndUpdate == Legal->getPrimaryInduction())
7850       continue;
7851 
7852     if (llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
7853           return U == Ind || DeadInstructions.count(cast<Instruction>(U));
7854         }))
7855       DeadInstructions.insert(IndUpdate);
7856 
7857     // We record as "Dead" also the type-casting instructions we had identified
7858     // during induction analysis. We don't need any handling for them in the
7859     // vectorized loop because we have proven that, under a proper runtime
7860     // test guarding the vectorized loop, the value of the phi, and the casted
7861     // value of the phi, are the same. The last instruction in this casting chain
7862     // will get its scalar/vector/widened def from the scalar/vector/widened def
7863     // of the respective phi node. Any other casts in the induction def-use chain
7864     // have no other uses outside the phi update chain, and will be ignored.
7865     InductionDescriptor &IndDes = Induction.second;
7866     const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts();
7867     DeadInstructions.insert(Casts.begin(), Casts.end());
7868   }
7869 }
7870 
7871 Value *InnerLoopUnroller::reverseVector(Value *Vec) { return Vec; }
7872 
7873 Value *InnerLoopUnroller::getBroadcastInstrs(Value *V) { return V; }
7874 
7875 Value *InnerLoopUnroller::getStepVector(Value *Val, int StartIdx, Value *Step,
7876                                         Instruction::BinaryOps BinOp) {
7877   // When unrolling and the VF is 1, we only need to add a simple scalar.
7878   Type *Ty = Val->getType();
7879   assert(!Ty->isVectorTy() && "Val must be a scalar");
7880 
7881   if (Ty->isFloatingPointTy()) {
7882     Constant *C = ConstantFP::get(Ty, (double)StartIdx);
7883 
7884     // Floating point operations had to be 'fast' to enable the unrolling.
7885     Value *MulOp = addFastMathFlag(Builder.CreateFMul(C, Step));
7886     return addFastMathFlag(Builder.CreateBinOp(BinOp, Val, MulOp));
7887   }
7888   Constant *C = ConstantInt::get(Ty, StartIdx);
7889   return Builder.CreateAdd(Val, Builder.CreateMul(C, Step), "induction");
7890 }
7891 
7892 static void AddRuntimeUnrollDisableMetaData(Loop *L) {
7893   SmallVector<Metadata *, 4> MDs;
7894   // Reserve first location for self reference to the LoopID metadata node.
7895   MDs.push_back(nullptr);
7896   bool IsUnrollMetadata = false;
7897   MDNode *LoopID = L->getLoopID();
7898   if (LoopID) {
7899     // First find existing loop unrolling disable metadata.
7900     for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) {
7901       auto *MD = dyn_cast<MDNode>(LoopID->getOperand(i));
7902       if (MD) {
7903         const auto *S = dyn_cast<MDString>(MD->getOperand(0));
7904         IsUnrollMetadata =
7905             S && S->getString().startswith("llvm.loop.unroll.disable");
7906       }
7907       MDs.push_back(LoopID->getOperand(i));
7908     }
7909   }
7910 
7911   if (!IsUnrollMetadata) {
7912     // Add runtime unroll disable metadata.
7913     LLVMContext &Context = L->getHeader()->getContext();
7914     SmallVector<Metadata *, 1> DisableOperands;
7915     DisableOperands.push_back(
7916         MDString::get(Context, "llvm.loop.unroll.runtime.disable"));
7917     MDNode *DisableNode = MDNode::get(Context, DisableOperands);
7918     MDs.push_back(DisableNode);
7919     MDNode *NewLoopID = MDNode::get(Context, MDs);
7920     // Set operand 0 to refer to the loop id itself.
7921     NewLoopID->replaceOperandWith(0, NewLoopID);
7922     L->setLoopID(NewLoopID);
7923   }
7924 }
7925 
7926 //===--------------------------------------------------------------------===//
7927 // EpilogueVectorizerMainLoop
7928 //===--------------------------------------------------------------------===//
7929 
7930 /// This function is partially responsible for generating the control flow
7931 /// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization.
7932 BasicBlock *EpilogueVectorizerMainLoop::createEpilogueVectorizedLoopSkeleton() {
7933   MDNode *OrigLoopID = OrigLoop->getLoopID();
7934   Loop *Lp = createVectorLoopSkeleton("");
7935 
7936   // Generate the code to check the minimum iteration count of the vector
7937   // epilogue (see below).
7938   EPI.EpilogueIterationCountCheck =
7939       emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader, true);
7940   EPI.EpilogueIterationCountCheck->setName("iter.check");
7941 
7942   // Generate the code to check any assumptions that we've made for SCEV
7943   // expressions.
7944   BasicBlock *SavedPreHeader = LoopVectorPreHeader;
7945   emitSCEVChecks(Lp, LoopScalarPreHeader);
7946 
7947   // If a safety check was generated save it.
7948   if (SavedPreHeader != LoopVectorPreHeader)
7949     EPI.SCEVSafetyCheck = SavedPreHeader;
7950 
7951   // Generate the code that checks at runtime if arrays overlap. We put the
7952   // checks into a separate block to make the more common case of few elements
7953   // faster.
7954   SavedPreHeader = LoopVectorPreHeader;
7955   emitMemRuntimeChecks(Lp, LoopScalarPreHeader);
7956 
7957   // If a safety check was generated save/overwite it.
7958   if (SavedPreHeader != LoopVectorPreHeader)
7959     EPI.MemSafetyCheck = SavedPreHeader;
7960 
7961   // Generate the iteration count check for the main loop, *after* the check
7962   // for the epilogue loop, so that the path-length is shorter for the case
7963   // that goes directly through the vector epilogue. The longer-path length for
7964   // the main loop is compensated for, by the gain from vectorizing the larger
7965   // trip count. Note: the branch will get updated later on when we vectorize
7966   // the epilogue.
7967   EPI.MainLoopIterationCountCheck =
7968       emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader, false);
7969 
7970   // Generate the induction variable.
7971   OldInduction = Legal->getPrimaryInduction();
7972   Type *IdxTy = Legal->getWidestInductionType();
7973   Value *StartIdx = ConstantInt::get(IdxTy, 0);
7974   Constant *Step = ConstantInt::get(IdxTy, VF.getKnownMinValue() * UF);
7975   Value *CountRoundDown = getOrCreateVectorTripCount(Lp);
7976   EPI.VectorTripCount = CountRoundDown;
7977   Induction =
7978       createInductionVariable(Lp, StartIdx, CountRoundDown, Step,
7979                               getDebugLocFromInstOrOperands(OldInduction));
7980 
7981   // Skip induction resume value creation here because they will be created in
7982   // the second pass. If we created them here, they wouldn't be used anyway,
7983   // because the vplan in the second pass still contains the inductions from the
7984   // original loop.
7985 
7986   return completeLoopSkeleton(Lp, OrigLoopID);
7987 }
7988 
7989 void EpilogueVectorizerMainLoop::printDebugTracesAtStart() {
7990   LLVM_DEBUG({
7991     dbgs() << "Create Skeleton for epilogue vectorized loop (first pass)\n"
7992            << "Main Loop VF:" << EPI.MainLoopVF.getKnownMinValue()
7993            << ", Main Loop UF:" << EPI.MainLoopUF
7994            << ", Epilogue Loop VF:" << EPI.EpilogueVF.getKnownMinValue()
7995            << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n";
7996   });
7997 }
7998 
7999 void EpilogueVectorizerMainLoop::printDebugTracesAtEnd() {
8000   DEBUG_WITH_TYPE(VerboseDebug, {
8001     dbgs() << "intermediate fn:\n" << *Induction->getFunction() << "\n";
8002   });
8003 }
8004 
8005 BasicBlock *EpilogueVectorizerMainLoop::emitMinimumIterationCountCheck(
8006     Loop *L, BasicBlock *Bypass, bool ForEpilogue) {
8007   assert(L && "Expected valid Loop.");
8008   assert(Bypass && "Expected valid bypass basic block.");
8009   unsigned VFactor =
8010       ForEpilogue ? EPI.EpilogueVF.getKnownMinValue() : VF.getKnownMinValue();
8011   unsigned UFactor = ForEpilogue ? EPI.EpilogueUF : UF;
8012   Value *Count = getOrCreateTripCount(L);
8013   // Reuse existing vector loop preheader for TC checks.
8014   // Note that new preheader block is generated for vector loop.
8015   BasicBlock *const TCCheckBlock = LoopVectorPreHeader;
8016   IRBuilder<> Builder(TCCheckBlock->getTerminator());
8017 
8018   // Generate code to check if the loop's trip count is less than VF * UF of the
8019   // main vector loop.
8020   auto P =
8021       Cost->requiresScalarEpilogue() ? ICmpInst::ICMP_ULE : ICmpInst::ICMP_ULT;
8022 
8023   Value *CheckMinIters = Builder.CreateICmp(
8024       P, Count, ConstantInt::get(Count->getType(), VFactor * UFactor),
8025       "min.iters.check");
8026 
8027   if (!ForEpilogue)
8028     TCCheckBlock->setName("vector.main.loop.iter.check");
8029 
8030   // Create new preheader for vector loop.
8031   LoopVectorPreHeader = SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(),
8032                                    DT, LI, nullptr, "vector.ph");
8033 
8034   if (ForEpilogue) {
8035     assert(DT->properlyDominates(DT->getNode(TCCheckBlock),
8036                                  DT->getNode(Bypass)->getIDom()) &&
8037            "TC check is expected to dominate Bypass");
8038 
8039     // Update dominator for Bypass & LoopExit.
8040     DT->changeImmediateDominator(Bypass, TCCheckBlock);
8041     DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock);
8042 
8043     LoopBypassBlocks.push_back(TCCheckBlock);
8044 
8045     // Save the trip count so we don't have to regenerate it in the
8046     // vec.epilog.iter.check. This is safe to do because the trip count
8047     // generated here dominates the vector epilog iter check.
8048     EPI.TripCount = Count;
8049   }
8050 
8051   ReplaceInstWithInst(
8052       TCCheckBlock->getTerminator(),
8053       BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters));
8054 
8055   return TCCheckBlock;
8056 }
8057 
8058 //===--------------------------------------------------------------------===//
8059 // EpilogueVectorizerEpilogueLoop
8060 //===--------------------------------------------------------------------===//
8061 
8062 /// This function is partially responsible for generating the control flow
8063 /// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization.
8064 BasicBlock *
8065 EpilogueVectorizerEpilogueLoop::createEpilogueVectorizedLoopSkeleton() {
8066   MDNode *OrigLoopID = OrigLoop->getLoopID();
8067   Loop *Lp = createVectorLoopSkeleton("vec.epilog.");
8068 
8069   // Now, compare the remaining count and if there aren't enough iterations to
8070   // execute the vectorized epilogue skip to the scalar part.
8071   BasicBlock *VecEpilogueIterationCountCheck = LoopVectorPreHeader;
8072   VecEpilogueIterationCountCheck->setName("vec.epilog.iter.check");
8073   LoopVectorPreHeader =
8074       SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT,
8075                  LI, nullptr, "vec.epilog.ph");
8076   emitMinimumVectorEpilogueIterCountCheck(Lp, LoopScalarPreHeader,
8077                                           VecEpilogueIterationCountCheck);
8078 
8079   // Adjust the control flow taking the state info from the main loop
8080   // vectorization into account.
8081   assert(EPI.MainLoopIterationCountCheck && EPI.EpilogueIterationCountCheck &&
8082          "expected this to be saved from the previous pass.");
8083   EPI.MainLoopIterationCountCheck->getTerminator()->replaceUsesOfWith(
8084       VecEpilogueIterationCountCheck, LoopVectorPreHeader);
8085 
8086   DT->changeImmediateDominator(LoopVectorPreHeader,
8087                                EPI.MainLoopIterationCountCheck);
8088 
8089   EPI.EpilogueIterationCountCheck->getTerminator()->replaceUsesOfWith(
8090       VecEpilogueIterationCountCheck, LoopScalarPreHeader);
8091 
8092   if (EPI.SCEVSafetyCheck)
8093     EPI.SCEVSafetyCheck->getTerminator()->replaceUsesOfWith(
8094         VecEpilogueIterationCountCheck, LoopScalarPreHeader);
8095   if (EPI.MemSafetyCheck)
8096     EPI.MemSafetyCheck->getTerminator()->replaceUsesOfWith(
8097         VecEpilogueIterationCountCheck, LoopScalarPreHeader);
8098 
8099   DT->changeImmediateDominator(
8100       VecEpilogueIterationCountCheck,
8101       VecEpilogueIterationCountCheck->getSinglePredecessor());
8102 
8103   DT->changeImmediateDominator(LoopScalarPreHeader,
8104                                EPI.EpilogueIterationCountCheck);
8105   DT->changeImmediateDominator(LoopExitBlock, EPI.EpilogueIterationCountCheck);
8106 
8107   // Keep track of bypass blocks, as they feed start values to the induction
8108   // phis in the scalar loop preheader.
8109   if (EPI.SCEVSafetyCheck)
8110     LoopBypassBlocks.push_back(EPI.SCEVSafetyCheck);
8111   if (EPI.MemSafetyCheck)
8112     LoopBypassBlocks.push_back(EPI.MemSafetyCheck);
8113   LoopBypassBlocks.push_back(EPI.EpilogueIterationCountCheck);
8114 
8115   // Generate a resume induction for the vector epilogue and put it in the
8116   // vector epilogue preheader
8117   Type *IdxTy = Legal->getWidestInductionType();
8118   PHINode *EPResumeVal = PHINode::Create(IdxTy, 2, "vec.epilog.resume.val",
8119                                          LoopVectorPreHeader->getFirstNonPHI());
8120   EPResumeVal->addIncoming(EPI.VectorTripCount, VecEpilogueIterationCountCheck);
8121   EPResumeVal->addIncoming(ConstantInt::get(IdxTy, 0),
8122                            EPI.MainLoopIterationCountCheck);
8123 
8124   // Generate the induction variable.
8125   OldInduction = Legal->getPrimaryInduction();
8126   Value *CountRoundDown = getOrCreateVectorTripCount(Lp);
8127   Constant *Step = ConstantInt::get(IdxTy, VF.getKnownMinValue() * UF);
8128   Value *StartIdx = EPResumeVal;
8129   Induction =
8130       createInductionVariable(Lp, StartIdx, CountRoundDown, Step,
8131                               getDebugLocFromInstOrOperands(OldInduction));
8132 
8133   // Generate induction resume values. These variables save the new starting
8134   // indexes for the scalar loop. They are used to test if there are any tail
8135   // iterations left once the vector loop has completed.
8136   // Note that when the vectorized epilogue is skipped due to iteration count
8137   // check, then the resume value for the induction variable comes from
8138   // the trip count of the main vector loop, hence passing the AdditionalBypass
8139   // argument.
8140   createInductionResumeValues(Lp, CountRoundDown,
8141                               {VecEpilogueIterationCountCheck,
8142                                EPI.VectorTripCount} /* AdditionalBypass */);
8143 
8144   AddRuntimeUnrollDisableMetaData(Lp);
8145   return completeLoopSkeleton(Lp, OrigLoopID);
8146 }
8147 
8148 BasicBlock *
8149 EpilogueVectorizerEpilogueLoop::emitMinimumVectorEpilogueIterCountCheck(
8150     Loop *L, BasicBlock *Bypass, BasicBlock *Insert) {
8151 
8152   assert(EPI.TripCount &&
8153          "Expected trip count to have been safed in the first pass.");
8154   assert(
8155       (!isa<Instruction>(EPI.TripCount) ||
8156        DT->dominates(cast<Instruction>(EPI.TripCount)->getParent(), Insert)) &&
8157       "saved trip count does not dominate insertion point.");
8158   Value *TC = EPI.TripCount;
8159   IRBuilder<> Builder(Insert->getTerminator());
8160   Value *Count = Builder.CreateSub(TC, EPI.VectorTripCount, "n.vec.remaining");
8161 
8162   // Generate code to check if the loop's trip count is less than VF * UF of the
8163   // vector epilogue loop.
8164   auto P =
8165       Cost->requiresScalarEpilogue() ? ICmpInst::ICMP_ULE : ICmpInst::ICMP_ULT;
8166 
8167   Value *CheckMinIters = Builder.CreateICmp(
8168       P, Count,
8169       ConstantInt::get(Count->getType(),
8170                        EPI.EpilogueVF.getKnownMinValue() * EPI.EpilogueUF),
8171       "min.epilog.iters.check");
8172 
8173   ReplaceInstWithInst(
8174       Insert->getTerminator(),
8175       BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters));
8176 
8177   LoopBypassBlocks.push_back(Insert);
8178   return Insert;
8179 }
8180 
8181 void EpilogueVectorizerEpilogueLoop::printDebugTracesAtStart() {
8182   LLVM_DEBUG({
8183     dbgs() << "Create Skeleton for epilogue vectorized loop (second pass)\n"
8184            << "Main Loop VF:" << EPI.MainLoopVF.getKnownMinValue()
8185            << ", Main Loop UF:" << EPI.MainLoopUF
8186            << ", Epilogue Loop VF:" << EPI.EpilogueVF.getKnownMinValue()
8187            << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n";
8188   });
8189 }
8190 
8191 void EpilogueVectorizerEpilogueLoop::printDebugTracesAtEnd() {
8192   DEBUG_WITH_TYPE(VerboseDebug, {
8193     dbgs() << "final fn:\n" << *Induction->getFunction() << "\n";
8194   });
8195 }
8196 
8197 bool LoopVectorizationPlanner::getDecisionAndClampRange(
8198     const std::function<bool(ElementCount)> &Predicate, VFRange &Range) {
8199   assert(!Range.isEmpty() && "Trying to test an empty VF range.");
8200   bool PredicateAtRangeStart = Predicate(Range.Start);
8201 
8202   for (ElementCount TmpVF = Range.Start * 2;
8203        ElementCount::isKnownLT(TmpVF, Range.End); TmpVF *= 2)
8204     if (Predicate(TmpVF) != PredicateAtRangeStart) {
8205       Range.End = TmpVF;
8206       break;
8207     }
8208 
8209   return PredicateAtRangeStart;
8210 }
8211 
8212 /// Build VPlans for the full range of feasible VF's = {\p MinVF, 2 * \p MinVF,
8213 /// 4 * \p MinVF, ..., \p MaxVF} by repeatedly building a VPlan for a sub-range
8214 /// of VF's starting at a given VF and extending it as much as possible. Each
8215 /// vectorization decision can potentially shorten this sub-range during
8216 /// buildVPlan().
8217 void LoopVectorizationPlanner::buildVPlans(ElementCount MinVF,
8218                                            ElementCount MaxVF) {
8219   auto MaxVFPlusOne = MaxVF.getWithIncrement(1);
8220   for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFPlusOne);) {
8221     VFRange SubRange = {VF, MaxVFPlusOne};
8222     VPlans.push_back(buildVPlan(SubRange));
8223     VF = SubRange.End;
8224   }
8225 }
8226 
8227 VPValue *VPRecipeBuilder::createEdgeMask(BasicBlock *Src, BasicBlock *Dst,
8228                                          VPlanPtr &Plan) {
8229   assert(is_contained(predecessors(Dst), Src) && "Invalid edge");
8230 
8231   // Look for cached value.
8232   std::pair<BasicBlock *, BasicBlock *> Edge(Src, Dst);
8233   EdgeMaskCacheTy::iterator ECEntryIt = EdgeMaskCache.find(Edge);
8234   if (ECEntryIt != EdgeMaskCache.end())
8235     return ECEntryIt->second;
8236 
8237   VPValue *SrcMask = createBlockInMask(Src, Plan);
8238 
8239   // The terminator has to be a branch inst!
8240   BranchInst *BI = dyn_cast<BranchInst>(Src->getTerminator());
8241   assert(BI && "Unexpected terminator found");
8242 
8243   if (!BI->isConditional() || BI->getSuccessor(0) == BI->getSuccessor(1))
8244     return EdgeMaskCache[Edge] = SrcMask;
8245 
8246   // If source is an exiting block, we know the exit edge is dynamically dead
8247   // in the vector loop, and thus we don't need to restrict the mask.  Avoid
8248   // adding uses of an otherwise potentially dead instruction.
8249   if (OrigLoop->isLoopExiting(Src))
8250     return EdgeMaskCache[Edge] = SrcMask;
8251 
8252   VPValue *EdgeMask = Plan->getOrAddVPValue(BI->getCondition());
8253   assert(EdgeMask && "No Edge Mask found for condition");
8254 
8255   if (BI->getSuccessor(0) != Dst)
8256     EdgeMask = Builder.createNot(EdgeMask);
8257 
8258   if (SrcMask) // Otherwise block in-mask is all-one, no need to AND.
8259     EdgeMask = Builder.createAnd(EdgeMask, SrcMask);
8260 
8261   return EdgeMaskCache[Edge] = EdgeMask;
8262 }
8263 
8264 VPValue *VPRecipeBuilder::createBlockInMask(BasicBlock *BB, VPlanPtr &Plan) {
8265   assert(OrigLoop->contains(BB) && "Block is not a part of a loop");
8266 
8267   // Look for cached value.
8268   BlockMaskCacheTy::iterator BCEntryIt = BlockMaskCache.find(BB);
8269   if (BCEntryIt != BlockMaskCache.end())
8270     return BCEntryIt->second;
8271 
8272   // All-one mask is modelled as no-mask following the convention for masked
8273   // load/store/gather/scatter. Initialize BlockMask to no-mask.
8274   VPValue *BlockMask = nullptr;
8275 
8276   if (OrigLoop->getHeader() == BB) {
8277     if (!CM.blockNeedsPredication(BB))
8278       return BlockMaskCache[BB] = BlockMask; // Loop incoming mask is all-one.
8279 
8280     // Create the block in mask as the first non-phi instruction in the block.
8281     VPBuilder::InsertPointGuard Guard(Builder);
8282     auto NewInsertionPoint = Builder.getInsertBlock()->getFirstNonPhi();
8283     Builder.setInsertPoint(Builder.getInsertBlock(), NewInsertionPoint);
8284 
8285     // Introduce the early-exit compare IV <= BTC to form header block mask.
8286     // This is used instead of IV < TC because TC may wrap, unlike BTC.
8287     // Start by constructing the desired canonical IV.
8288     VPValue *IV = nullptr;
8289     if (Legal->getPrimaryInduction())
8290       IV = Plan->getOrAddVPValue(Legal->getPrimaryInduction());
8291     else {
8292       auto IVRecipe = new VPWidenCanonicalIVRecipe();
8293       Builder.getInsertBlock()->insert(IVRecipe, NewInsertionPoint);
8294       IV = IVRecipe->getVPValue();
8295     }
8296     VPValue *BTC = Plan->getOrCreateBackedgeTakenCount();
8297     bool TailFolded = !CM.isScalarEpilogueAllowed();
8298 
8299     if (TailFolded && CM.TTI.emitGetActiveLaneMask()) {
8300       // While ActiveLaneMask is a binary op that consumes the loop tripcount
8301       // as a second argument, we only pass the IV here and extract the
8302       // tripcount from the transform state where codegen of the VP instructions
8303       // happen.
8304       BlockMask = Builder.createNaryOp(VPInstruction::ActiveLaneMask, {IV});
8305     } else {
8306       BlockMask = Builder.createNaryOp(VPInstruction::ICmpULE, {IV, BTC});
8307     }
8308     return BlockMaskCache[BB] = BlockMask;
8309   }
8310 
8311   // This is the block mask. We OR all incoming edges.
8312   for (auto *Predecessor : predecessors(BB)) {
8313     VPValue *EdgeMask = createEdgeMask(Predecessor, BB, Plan);
8314     if (!EdgeMask) // Mask of predecessor is all-one so mask of block is too.
8315       return BlockMaskCache[BB] = EdgeMask;
8316 
8317     if (!BlockMask) { // BlockMask has its initialized nullptr value.
8318       BlockMask = EdgeMask;
8319       continue;
8320     }
8321 
8322     BlockMask = Builder.createOr(BlockMask, EdgeMask);
8323   }
8324 
8325   return BlockMaskCache[BB] = BlockMask;
8326 }
8327 
8328 VPRecipeBase *VPRecipeBuilder::tryToWidenMemory(Instruction *I, VFRange &Range,
8329                                                 VPlanPtr &Plan) {
8330   assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
8331          "Must be called with either a load or store");
8332 
8333   auto willWiden = [&](ElementCount VF) -> bool {
8334     if (VF.isScalar())
8335       return false;
8336     LoopVectorizationCostModel::InstWidening Decision =
8337         CM.getWideningDecision(I, VF);
8338     assert(Decision != LoopVectorizationCostModel::CM_Unknown &&
8339            "CM decision should be taken at this point.");
8340     if (Decision == LoopVectorizationCostModel::CM_Interleave)
8341       return true;
8342     if (CM.isScalarAfterVectorization(I, VF) ||
8343         CM.isProfitableToScalarize(I, VF))
8344       return false;
8345     return Decision != LoopVectorizationCostModel::CM_Scalarize;
8346   };
8347 
8348   if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range))
8349     return nullptr;
8350 
8351   VPValue *Mask = nullptr;
8352   if (Legal->isMaskRequired(I))
8353     Mask = createBlockInMask(I->getParent(), Plan);
8354 
8355   VPValue *Addr = Plan->getOrAddVPValue(getLoadStorePointerOperand(I));
8356   if (LoadInst *Load = dyn_cast<LoadInst>(I))
8357     return new VPWidenMemoryInstructionRecipe(*Load, Addr, Mask);
8358 
8359   StoreInst *Store = cast<StoreInst>(I);
8360   VPValue *StoredValue = Plan->getOrAddVPValue(Store->getValueOperand());
8361   return new VPWidenMemoryInstructionRecipe(*Store, Addr, StoredValue, Mask);
8362 }
8363 
8364 VPWidenIntOrFpInductionRecipe *
8365 VPRecipeBuilder::tryToOptimizeInductionPHI(PHINode *Phi, VPlan &Plan) const {
8366   // Check if this is an integer or fp induction. If so, build the recipe that
8367   // produces its scalar and vector values.
8368   InductionDescriptor II = Legal->getInductionVars().lookup(Phi);
8369   if (II.getKind() == InductionDescriptor::IK_IntInduction ||
8370       II.getKind() == InductionDescriptor::IK_FpInduction) {
8371     VPValue *Start = Plan.getOrAddVPValue(II.getStartValue());
8372     const SmallVectorImpl<Instruction *> &Casts = II.getCastInsts();
8373     return new VPWidenIntOrFpInductionRecipe(
8374         Phi, Start, Casts.empty() ? nullptr : Casts.front());
8375   }
8376 
8377   return nullptr;
8378 }
8379 
8380 VPWidenIntOrFpInductionRecipe *
8381 VPRecipeBuilder::tryToOptimizeInductionTruncate(TruncInst *I, VFRange &Range,
8382                                                 VPlan &Plan) const {
8383   // Optimize the special case where the source is a constant integer
8384   // induction variable. Notice that we can only optimize the 'trunc' case
8385   // because (a) FP conversions lose precision, (b) sext/zext may wrap, and
8386   // (c) other casts depend on pointer size.
8387 
8388   // Determine whether \p K is a truncation based on an induction variable that
8389   // can be optimized.
8390   auto isOptimizableIVTruncate =
8391       [&](Instruction *K) -> std::function<bool(ElementCount)> {
8392     return [=](ElementCount VF) -> bool {
8393       return CM.isOptimizableIVTruncate(K, VF);
8394     };
8395   };
8396 
8397   if (LoopVectorizationPlanner::getDecisionAndClampRange(
8398           isOptimizableIVTruncate(I), Range)) {
8399 
8400     InductionDescriptor II =
8401         Legal->getInductionVars().lookup(cast<PHINode>(I->getOperand(0)));
8402     VPValue *Start = Plan.getOrAddVPValue(II.getStartValue());
8403     return new VPWidenIntOrFpInductionRecipe(cast<PHINode>(I->getOperand(0)),
8404                                              Start, nullptr, I);
8405   }
8406   return nullptr;
8407 }
8408 
8409 VPBlendRecipe *VPRecipeBuilder::tryToBlend(PHINode *Phi, VPlanPtr &Plan) {
8410   // We know that all PHIs in non-header blocks are converted into selects, so
8411   // we don't have to worry about the insertion order and we can just use the
8412   // builder. At this point we generate the predication tree. There may be
8413   // duplications since this is a simple recursive scan, but future
8414   // optimizations will clean it up.
8415 
8416   SmallVector<VPValue *, 2> Operands;
8417   unsigned NumIncoming = Phi->getNumIncomingValues();
8418   for (unsigned In = 0; In < NumIncoming; In++) {
8419     VPValue *EdgeMask =
8420       createEdgeMask(Phi->getIncomingBlock(In), Phi->getParent(), Plan);
8421     assert((EdgeMask || NumIncoming == 1) &&
8422            "Multiple predecessors with one having a full mask");
8423     Operands.push_back(Plan->getOrAddVPValue(Phi->getIncomingValue(In)));
8424     if (EdgeMask)
8425       Operands.push_back(EdgeMask);
8426   }
8427   return new VPBlendRecipe(Phi, Operands);
8428 }
8429 
8430 VPWidenCallRecipe *VPRecipeBuilder::tryToWidenCall(CallInst *CI, VFRange &Range,
8431                                                    VPlan &Plan) const {
8432 
8433   bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange(
8434       [this, CI](ElementCount VF) {
8435         return CM.isScalarWithPredication(CI, VF);
8436       },
8437       Range);
8438 
8439   if (IsPredicated)
8440     return nullptr;
8441 
8442   Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
8443   if (ID && (ID == Intrinsic::assume || ID == Intrinsic::lifetime_end ||
8444              ID == Intrinsic::lifetime_start || ID == Intrinsic::sideeffect ||
8445              ID == Intrinsic::pseudoprobe ||
8446              ID == Intrinsic::experimental_noalias_scope_decl))
8447     return nullptr;
8448 
8449   auto willWiden = [&](ElementCount VF) -> bool {
8450     Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
8451     // The following case may be scalarized depending on the VF.
8452     // The flag shows whether we use Intrinsic or a usual Call for vectorized
8453     // version of the instruction.
8454     // Is it beneficial to perform intrinsic call compared to lib call?
8455     bool NeedToScalarize = false;
8456     InstructionCost CallCost = CM.getVectorCallCost(CI, VF, NeedToScalarize);
8457     InstructionCost IntrinsicCost = ID ? CM.getVectorIntrinsicCost(CI, VF) : 0;
8458     bool UseVectorIntrinsic = ID && IntrinsicCost <= CallCost;
8459     assert(IntrinsicCost.isValid() && CallCost.isValid() &&
8460            "Cannot have invalid costs while widening");
8461     return UseVectorIntrinsic || !NeedToScalarize;
8462   };
8463 
8464   if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range))
8465     return nullptr;
8466 
8467   return new VPWidenCallRecipe(*CI, Plan.mapToVPValues(CI->arg_operands()));
8468 }
8469 
8470 bool VPRecipeBuilder::shouldWiden(Instruction *I, VFRange &Range) const {
8471   assert(!isa<BranchInst>(I) && !isa<PHINode>(I) && !isa<LoadInst>(I) &&
8472          !isa<StoreInst>(I) && "Instruction should have been handled earlier");
8473   // Instruction should be widened, unless it is scalar after vectorization,
8474   // scalarization is profitable or it is predicated.
8475   auto WillScalarize = [this, I](ElementCount VF) -> bool {
8476     return CM.isScalarAfterVectorization(I, VF) ||
8477            CM.isProfitableToScalarize(I, VF) ||
8478            CM.isScalarWithPredication(I, VF);
8479   };
8480   return !LoopVectorizationPlanner::getDecisionAndClampRange(WillScalarize,
8481                                                              Range);
8482 }
8483 
8484 VPWidenRecipe *VPRecipeBuilder::tryToWiden(Instruction *I, VPlan &Plan) const {
8485   auto IsVectorizableOpcode = [](unsigned Opcode) {
8486     switch (Opcode) {
8487     case Instruction::Add:
8488     case Instruction::And:
8489     case Instruction::AShr:
8490     case Instruction::BitCast:
8491     case Instruction::FAdd:
8492     case Instruction::FCmp:
8493     case Instruction::FDiv:
8494     case Instruction::FMul:
8495     case Instruction::FNeg:
8496     case Instruction::FPExt:
8497     case Instruction::FPToSI:
8498     case Instruction::FPToUI:
8499     case Instruction::FPTrunc:
8500     case Instruction::FRem:
8501     case Instruction::FSub:
8502     case Instruction::ICmp:
8503     case Instruction::IntToPtr:
8504     case Instruction::LShr:
8505     case Instruction::Mul:
8506     case Instruction::Or:
8507     case Instruction::PtrToInt:
8508     case Instruction::SDiv:
8509     case Instruction::Select:
8510     case Instruction::SExt:
8511     case Instruction::Shl:
8512     case Instruction::SIToFP:
8513     case Instruction::SRem:
8514     case Instruction::Sub:
8515     case Instruction::Trunc:
8516     case Instruction::UDiv:
8517     case Instruction::UIToFP:
8518     case Instruction::URem:
8519     case Instruction::Xor:
8520     case Instruction::ZExt:
8521       return true;
8522     }
8523     return false;
8524   };
8525 
8526   if (!IsVectorizableOpcode(I->getOpcode()))
8527     return nullptr;
8528 
8529   // Success: widen this instruction.
8530   return new VPWidenRecipe(*I, Plan.mapToVPValues(I->operands()));
8531 }
8532 
8533 VPBasicBlock *VPRecipeBuilder::handleReplication(
8534     Instruction *I, VFRange &Range, VPBasicBlock *VPBB,
8535     DenseMap<Instruction *, VPReplicateRecipe *> &PredInst2Recipe,
8536     VPlanPtr &Plan) {
8537   bool IsUniform = LoopVectorizationPlanner::getDecisionAndClampRange(
8538       [&](ElementCount VF) { return CM.isUniformAfterVectorization(I, VF); },
8539       Range);
8540 
8541   bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange(
8542       [&](ElementCount VF) { return CM.isScalarWithPredication(I, VF); },
8543       Range);
8544 
8545   auto *Recipe = new VPReplicateRecipe(I, Plan->mapToVPValues(I->operands()),
8546                                        IsUniform, IsPredicated);
8547   setRecipe(I, Recipe);
8548   Plan->addVPValue(I, Recipe);
8549 
8550   // Find if I uses a predicated instruction. If so, it will use its scalar
8551   // value. Avoid hoisting the insert-element which packs the scalar value into
8552   // a vector value, as that happens iff all users use the vector value.
8553   for (auto &Op : I->operands())
8554     if (auto *PredInst = dyn_cast<Instruction>(Op))
8555       if (PredInst2Recipe.find(PredInst) != PredInst2Recipe.end())
8556         PredInst2Recipe[PredInst]->setAlsoPack(false);
8557 
8558   // Finalize the recipe for Instr, first if it is not predicated.
8559   if (!IsPredicated) {
8560     LLVM_DEBUG(dbgs() << "LV: Scalarizing:" << *I << "\n");
8561     VPBB->appendRecipe(Recipe);
8562     return VPBB;
8563   }
8564   LLVM_DEBUG(dbgs() << "LV: Scalarizing and predicating:" << *I << "\n");
8565   assert(VPBB->getSuccessors().empty() &&
8566          "VPBB has successors when handling predicated replication.");
8567   // Record predicated instructions for above packing optimizations.
8568   PredInst2Recipe[I] = Recipe;
8569   VPBlockBase *Region = createReplicateRegion(I, Recipe, Plan);
8570   VPBlockUtils::insertBlockAfter(Region, VPBB);
8571   auto *RegSucc = new VPBasicBlock();
8572   VPBlockUtils::insertBlockAfter(RegSucc, Region);
8573   return RegSucc;
8574 }
8575 
8576 VPRegionBlock *VPRecipeBuilder::createReplicateRegion(Instruction *Instr,
8577                                                       VPRecipeBase *PredRecipe,
8578                                                       VPlanPtr &Plan) {
8579   // Instructions marked for predication are replicated and placed under an
8580   // if-then construct to prevent side-effects.
8581 
8582   // Generate recipes to compute the block mask for this region.
8583   VPValue *BlockInMask = createBlockInMask(Instr->getParent(), Plan);
8584 
8585   // Build the triangular if-then region.
8586   std::string RegionName = (Twine("pred.") + Instr->getOpcodeName()).str();
8587   assert(Instr->getParent() && "Predicated instruction not in any basic block");
8588   auto *BOMRecipe = new VPBranchOnMaskRecipe(BlockInMask);
8589   auto *Entry = new VPBasicBlock(Twine(RegionName) + ".entry", BOMRecipe);
8590   auto *PHIRecipe = Instr->getType()->isVoidTy()
8591                         ? nullptr
8592                         : new VPPredInstPHIRecipe(Plan->getOrAddVPValue(Instr));
8593   auto *Exit = new VPBasicBlock(Twine(RegionName) + ".continue", PHIRecipe);
8594   auto *Pred = new VPBasicBlock(Twine(RegionName) + ".if", PredRecipe);
8595   VPRegionBlock *Region = new VPRegionBlock(Entry, Exit, RegionName, true);
8596 
8597   // Note: first set Entry as region entry and then connect successors starting
8598   // from it in order, to propagate the "parent" of each VPBasicBlock.
8599   VPBlockUtils::insertTwoBlocksAfter(Pred, Exit, BlockInMask, Entry);
8600   VPBlockUtils::connectBlocks(Pred, Exit);
8601 
8602   return Region;
8603 }
8604 
8605 VPRecipeBase *VPRecipeBuilder::tryToCreateWidenRecipe(Instruction *Instr,
8606                                                       VFRange &Range,
8607                                                       VPlanPtr &Plan) {
8608   // First, check for specific widening recipes that deal with calls, memory
8609   // operations, inductions and Phi nodes.
8610   if (auto *CI = dyn_cast<CallInst>(Instr))
8611     return tryToWidenCall(CI, Range, *Plan);
8612 
8613   if (isa<LoadInst>(Instr) || isa<StoreInst>(Instr))
8614     return tryToWidenMemory(Instr, Range, Plan);
8615 
8616   VPRecipeBase *Recipe;
8617   if (auto Phi = dyn_cast<PHINode>(Instr)) {
8618     if (Phi->getParent() != OrigLoop->getHeader())
8619       return tryToBlend(Phi, Plan);
8620     if ((Recipe = tryToOptimizeInductionPHI(Phi, *Plan)))
8621       return Recipe;
8622 
8623     if (Legal->isReductionVariable(Phi)) {
8624       RecurrenceDescriptor &RdxDesc = Legal->getReductionVars()[Phi];
8625       VPValue *StartV =
8626           Plan->getOrAddVPValue(RdxDesc.getRecurrenceStartValue());
8627       return new VPWidenPHIRecipe(Phi, RdxDesc, *StartV);
8628     }
8629 
8630     return new VPWidenPHIRecipe(Phi);
8631   }
8632 
8633   if (isa<TruncInst>(Instr) && (Recipe = tryToOptimizeInductionTruncate(
8634                                     cast<TruncInst>(Instr), Range, *Plan)))
8635     return Recipe;
8636 
8637   if (!shouldWiden(Instr, Range))
8638     return nullptr;
8639 
8640   if (auto GEP = dyn_cast<GetElementPtrInst>(Instr))
8641     return new VPWidenGEPRecipe(GEP, Plan->mapToVPValues(GEP->operands()),
8642                                 OrigLoop);
8643 
8644   if (auto *SI = dyn_cast<SelectInst>(Instr)) {
8645     bool InvariantCond =
8646         PSE.getSE()->isLoopInvariant(PSE.getSCEV(SI->getOperand(0)), OrigLoop);
8647     return new VPWidenSelectRecipe(*SI, Plan->mapToVPValues(SI->operands()),
8648                                    InvariantCond);
8649   }
8650 
8651   return tryToWiden(Instr, *Plan);
8652 }
8653 
8654 void LoopVectorizationPlanner::buildVPlansWithVPRecipes(ElementCount MinVF,
8655                                                         ElementCount MaxVF) {
8656   assert(OrigLoop->isInnermost() && "Inner loop expected.");
8657 
8658   // Collect instructions from the original loop that will become trivially dead
8659   // in the vectorized loop. We don't need to vectorize these instructions. For
8660   // example, original induction update instructions can become dead because we
8661   // separately emit induction "steps" when generating code for the new loop.
8662   // Similarly, we create a new latch condition when setting up the structure
8663   // of the new loop, so the old one can become dead.
8664   SmallPtrSet<Instruction *, 4> DeadInstructions;
8665   collectTriviallyDeadInstructions(DeadInstructions);
8666 
8667   // Add assume instructions we need to drop to DeadInstructions, to prevent
8668   // them from being added to the VPlan.
8669   // TODO: We only need to drop assumes in blocks that get flattend. If the
8670   // control flow is preserved, we should keep them.
8671   auto &ConditionalAssumes = Legal->getConditionalAssumes();
8672   DeadInstructions.insert(ConditionalAssumes.begin(), ConditionalAssumes.end());
8673 
8674   DenseMap<Instruction *, Instruction *> &SinkAfter = Legal->getSinkAfter();
8675   // Dead instructions do not need sinking. Remove them from SinkAfter.
8676   for (Instruction *I : DeadInstructions)
8677     SinkAfter.erase(I);
8678 
8679   auto MaxVFPlusOne = MaxVF.getWithIncrement(1);
8680   for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFPlusOne);) {
8681     VFRange SubRange = {VF, MaxVFPlusOne};
8682     VPlans.push_back(
8683         buildVPlanWithVPRecipes(SubRange, DeadInstructions, SinkAfter));
8684     VF = SubRange.End;
8685   }
8686 }
8687 
8688 VPlanPtr LoopVectorizationPlanner::buildVPlanWithVPRecipes(
8689     VFRange &Range, SmallPtrSetImpl<Instruction *> &DeadInstructions,
8690     const DenseMap<Instruction *, Instruction *> &SinkAfter) {
8691 
8692   // Hold a mapping from predicated instructions to their recipes, in order to
8693   // fix their AlsoPack behavior if a user is determined to replicate and use a
8694   // scalar instead of vector value.
8695   DenseMap<Instruction *, VPReplicateRecipe *> PredInst2Recipe;
8696 
8697   SmallPtrSet<const InterleaveGroup<Instruction> *, 1> InterleaveGroups;
8698 
8699   VPRecipeBuilder RecipeBuilder(OrigLoop, TLI, Legal, CM, PSE, Builder);
8700 
8701   // ---------------------------------------------------------------------------
8702   // Pre-construction: record ingredients whose recipes we'll need to further
8703   // process after constructing the initial VPlan.
8704   // ---------------------------------------------------------------------------
8705 
8706   // Mark instructions we'll need to sink later and their targets as
8707   // ingredients whose recipe we'll need to record.
8708   for (auto &Entry : SinkAfter) {
8709     RecipeBuilder.recordRecipeOf(Entry.first);
8710     RecipeBuilder.recordRecipeOf(Entry.second);
8711   }
8712   for (auto &Reduction : CM.getInLoopReductionChains()) {
8713     PHINode *Phi = Reduction.first;
8714     RecurKind Kind = Legal->getReductionVars()[Phi].getRecurrenceKind();
8715     const SmallVector<Instruction *, 4> &ReductionOperations = Reduction.second;
8716 
8717     RecipeBuilder.recordRecipeOf(Phi);
8718     for (auto &R : ReductionOperations) {
8719       RecipeBuilder.recordRecipeOf(R);
8720       // For min/max reducitons, where we have a pair of icmp/select, we also
8721       // need to record the ICmp recipe, so it can be removed later.
8722       if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind))
8723         RecipeBuilder.recordRecipeOf(cast<Instruction>(R->getOperand(0)));
8724     }
8725   }
8726 
8727   // For each interleave group which is relevant for this (possibly trimmed)
8728   // Range, add it to the set of groups to be later applied to the VPlan and add
8729   // placeholders for its members' Recipes which we'll be replacing with a
8730   // single VPInterleaveRecipe.
8731   for (InterleaveGroup<Instruction> *IG : IAI.getInterleaveGroups()) {
8732     auto applyIG = [IG, this](ElementCount VF) -> bool {
8733       return (VF.isVector() && // Query is illegal for VF == 1
8734               CM.getWideningDecision(IG->getInsertPos(), VF) ==
8735                   LoopVectorizationCostModel::CM_Interleave);
8736     };
8737     if (!getDecisionAndClampRange(applyIG, Range))
8738       continue;
8739     InterleaveGroups.insert(IG);
8740     for (unsigned i = 0; i < IG->getFactor(); i++)
8741       if (Instruction *Member = IG->getMember(i))
8742         RecipeBuilder.recordRecipeOf(Member);
8743   };
8744 
8745   // ---------------------------------------------------------------------------
8746   // Build initial VPlan: Scan the body of the loop in a topological order to
8747   // visit each basic block after having visited its predecessor basic blocks.
8748   // ---------------------------------------------------------------------------
8749 
8750   // Create a dummy pre-entry VPBasicBlock to start building the VPlan.
8751   auto Plan = std::make_unique<VPlan>();
8752   VPBasicBlock *VPBB = new VPBasicBlock("Pre-Entry");
8753   Plan->setEntry(VPBB);
8754 
8755   // Scan the body of the loop in a topological order to visit each basic block
8756   // after having visited its predecessor basic blocks.
8757   LoopBlocksDFS DFS(OrigLoop);
8758   DFS.perform(LI);
8759 
8760   for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) {
8761     // Relevant instructions from basic block BB will be grouped into VPRecipe
8762     // ingredients and fill a new VPBasicBlock.
8763     unsigned VPBBsForBB = 0;
8764     auto *FirstVPBBForBB = new VPBasicBlock(BB->getName());
8765     VPBlockUtils::insertBlockAfter(FirstVPBBForBB, VPBB);
8766     VPBB = FirstVPBBForBB;
8767     Builder.setInsertPoint(VPBB);
8768 
8769     // Introduce each ingredient into VPlan.
8770     // TODO: Model and preserve debug instrinsics in VPlan.
8771     for (Instruction &I : BB->instructionsWithoutDebug()) {
8772       Instruction *Instr = &I;
8773 
8774       // First filter out irrelevant instructions, to ensure no recipes are
8775       // built for them.
8776       if (isa<BranchInst>(Instr) || DeadInstructions.count(Instr))
8777         continue;
8778 
8779       if (auto Recipe =
8780               RecipeBuilder.tryToCreateWidenRecipe(Instr, Range, Plan)) {
8781         for (auto *Def : Recipe->definedValues()) {
8782           auto *UV = Def->getUnderlyingValue();
8783           Plan->addVPValue(UV, Def);
8784         }
8785 
8786         RecipeBuilder.setRecipe(Instr, Recipe);
8787         VPBB->appendRecipe(Recipe);
8788         continue;
8789       }
8790 
8791       // Otherwise, if all widening options failed, Instruction is to be
8792       // replicated. This may create a successor for VPBB.
8793       VPBasicBlock *NextVPBB = RecipeBuilder.handleReplication(
8794           Instr, Range, VPBB, PredInst2Recipe, Plan);
8795       if (NextVPBB != VPBB) {
8796         VPBB = NextVPBB;
8797         VPBB->setName(BB->hasName() ? BB->getName() + "." + Twine(VPBBsForBB++)
8798                                     : "");
8799       }
8800     }
8801   }
8802 
8803   // Discard empty dummy pre-entry VPBasicBlock. Note that other VPBasicBlocks
8804   // may also be empty, such as the last one VPBB, reflecting original
8805   // basic-blocks with no recipes.
8806   VPBasicBlock *PreEntry = cast<VPBasicBlock>(Plan->getEntry());
8807   assert(PreEntry->empty() && "Expecting empty pre-entry block.");
8808   VPBlockBase *Entry = Plan->setEntry(PreEntry->getSingleSuccessor());
8809   VPBlockUtils::disconnectBlocks(PreEntry, Entry);
8810   delete PreEntry;
8811 
8812   // ---------------------------------------------------------------------------
8813   // Transform initial VPlan: Apply previously taken decisions, in order, to
8814   // bring the VPlan to its final state.
8815   // ---------------------------------------------------------------------------
8816 
8817   // Apply Sink-After legal constraints.
8818   for (auto &Entry : SinkAfter) {
8819     VPRecipeBase *Sink = RecipeBuilder.getRecipe(Entry.first);
8820     VPRecipeBase *Target = RecipeBuilder.getRecipe(Entry.second);
8821     // If the target is in a replication region, make sure to move Sink to the
8822     // block after it, not into the replication region itself.
8823     if (auto *Region =
8824             dyn_cast_or_null<VPRegionBlock>(Target->getParent()->getParent())) {
8825       if (Region->isReplicator()) {
8826         assert(Region->getNumSuccessors() == 1 && "Expected SESE region!");
8827         VPBasicBlock *NextBlock =
8828             cast<VPBasicBlock>(Region->getSuccessors().front());
8829         Sink->moveBefore(*NextBlock, NextBlock->getFirstNonPhi());
8830         continue;
8831       }
8832     }
8833     Sink->moveAfter(Target);
8834   }
8835 
8836   // Interleave memory: for each Interleave Group we marked earlier as relevant
8837   // for this VPlan, replace the Recipes widening its memory instructions with a
8838   // single VPInterleaveRecipe at its insertion point.
8839   for (auto IG : InterleaveGroups) {
8840     auto *Recipe = cast<VPWidenMemoryInstructionRecipe>(
8841         RecipeBuilder.getRecipe(IG->getInsertPos()));
8842     SmallVector<VPValue *, 4> StoredValues;
8843     for (unsigned i = 0; i < IG->getFactor(); ++i)
8844       if (auto *SI = dyn_cast_or_null<StoreInst>(IG->getMember(i)))
8845         StoredValues.push_back(Plan->getOrAddVPValue(SI->getOperand(0)));
8846 
8847     auto *VPIG = new VPInterleaveRecipe(IG, Recipe->getAddr(), StoredValues,
8848                                         Recipe->getMask());
8849     VPIG->insertBefore(Recipe);
8850     unsigned J = 0;
8851     for (unsigned i = 0; i < IG->getFactor(); ++i)
8852       if (Instruction *Member = IG->getMember(i)) {
8853         if (!Member->getType()->isVoidTy()) {
8854           VPValue *OriginalV = Plan->getVPValue(Member);
8855           Plan->removeVPValueFor(Member);
8856           Plan->addVPValue(Member, VPIG->getVPValue(J));
8857           OriginalV->replaceAllUsesWith(VPIG->getVPValue(J));
8858           J++;
8859         }
8860         RecipeBuilder.getRecipe(Member)->eraseFromParent();
8861       }
8862   }
8863 
8864   // Adjust the recipes for any inloop reductions.
8865   if (Range.Start.isVector())
8866     adjustRecipesForInLoopReductions(Plan, RecipeBuilder);
8867 
8868   // Finally, if tail is folded by masking, introduce selects between the phi
8869   // and the live-out instruction of each reduction, at the end of the latch.
8870   if (CM.foldTailByMasking() && !Legal->getReductionVars().empty()) {
8871     Builder.setInsertPoint(VPBB);
8872     auto *Cond = RecipeBuilder.createBlockInMask(OrigLoop->getHeader(), Plan);
8873     for (auto &Reduction : Legal->getReductionVars()) {
8874       if (CM.isInLoopReduction(Reduction.first))
8875         continue;
8876       VPValue *Phi = Plan->getOrAddVPValue(Reduction.first);
8877       VPValue *Red = Plan->getOrAddVPValue(Reduction.second.getLoopExitInstr());
8878       Builder.createNaryOp(Instruction::Select, {Cond, Red, Phi});
8879     }
8880   }
8881 
8882   std::string PlanName;
8883   raw_string_ostream RSO(PlanName);
8884   ElementCount VF = Range.Start;
8885   Plan->addVF(VF);
8886   RSO << "Initial VPlan for VF={" << VF;
8887   for (VF *= 2; ElementCount::isKnownLT(VF, Range.End); VF *= 2) {
8888     Plan->addVF(VF);
8889     RSO << "," << VF;
8890   }
8891   RSO << "},UF>=1";
8892   RSO.flush();
8893   Plan->setName(PlanName);
8894 
8895   return Plan;
8896 }
8897 
8898 VPlanPtr LoopVectorizationPlanner::buildVPlan(VFRange &Range) {
8899   // Outer loop handling: They may require CFG and instruction level
8900   // transformations before even evaluating whether vectorization is profitable.
8901   // Since we cannot modify the incoming IR, we need to build VPlan upfront in
8902   // the vectorization pipeline.
8903   assert(!OrigLoop->isInnermost());
8904   assert(EnableVPlanNativePath && "VPlan-native path is not enabled.");
8905 
8906   // Create new empty VPlan
8907   auto Plan = std::make_unique<VPlan>();
8908 
8909   // Build hierarchical CFG
8910   VPlanHCFGBuilder HCFGBuilder(OrigLoop, LI, *Plan);
8911   HCFGBuilder.buildHierarchicalCFG();
8912 
8913   for (ElementCount VF = Range.Start; ElementCount::isKnownLT(VF, Range.End);
8914        VF *= 2)
8915     Plan->addVF(VF);
8916 
8917   if (EnableVPlanPredication) {
8918     VPlanPredicator VPP(*Plan);
8919     VPP.predicate();
8920 
8921     // Avoid running transformation to recipes until masked code generation in
8922     // VPlan-native path is in place.
8923     return Plan;
8924   }
8925 
8926   SmallPtrSet<Instruction *, 1> DeadInstructions;
8927   VPlanTransforms::VPInstructionsToVPRecipes(
8928       OrigLoop, Plan, Legal->getInductionVars(), DeadInstructions);
8929   return Plan;
8930 }
8931 
8932 // Adjust the recipes for any inloop reductions. The chain of instructions
8933 // leading from the loop exit instr to the phi need to be converted to
8934 // reductions, with one operand being vector and the other being the scalar
8935 // reduction chain.
8936 void LoopVectorizationPlanner::adjustRecipesForInLoopReductions(
8937     VPlanPtr &Plan, VPRecipeBuilder &RecipeBuilder) {
8938   for (auto &Reduction : CM.getInLoopReductionChains()) {
8939     PHINode *Phi = Reduction.first;
8940     RecurrenceDescriptor &RdxDesc = Legal->getReductionVars()[Phi];
8941     const SmallVector<Instruction *, 4> &ReductionOperations = Reduction.second;
8942 
8943     // ReductionOperations are orders top-down from the phi's use to the
8944     // LoopExitValue. We keep a track of the previous item (the Chain) to tell
8945     // which of the two operands will remain scalar and which will be reduced.
8946     // For minmax the chain will be the select instructions.
8947     Instruction *Chain = Phi;
8948     for (Instruction *R : ReductionOperations) {
8949       VPRecipeBase *WidenRecipe = RecipeBuilder.getRecipe(R);
8950       RecurKind Kind = RdxDesc.getRecurrenceKind();
8951 
8952       VPValue *ChainOp = Plan->getVPValue(Chain);
8953       unsigned FirstOpId;
8954       if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) {
8955         assert(isa<VPWidenSelectRecipe>(WidenRecipe) &&
8956                "Expected to replace a VPWidenSelectSC");
8957         FirstOpId = 1;
8958       } else {
8959         assert(isa<VPWidenRecipe>(WidenRecipe) &&
8960                "Expected to replace a VPWidenSC");
8961         FirstOpId = 0;
8962       }
8963       unsigned VecOpId =
8964           R->getOperand(FirstOpId) == Chain ? FirstOpId + 1 : FirstOpId;
8965       VPValue *VecOp = Plan->getVPValue(R->getOperand(VecOpId));
8966 
8967       auto *CondOp = CM.foldTailByMasking()
8968                          ? RecipeBuilder.createBlockInMask(R->getParent(), Plan)
8969                          : nullptr;
8970       VPReductionRecipe *RedRecipe = new VPReductionRecipe(
8971           &RdxDesc, R, ChainOp, VecOp, CondOp, TTI);
8972       WidenRecipe->getVPValue()->replaceAllUsesWith(RedRecipe);
8973       Plan->removeVPValueFor(R);
8974       Plan->addVPValue(R, RedRecipe);
8975       WidenRecipe->getParent()->insert(RedRecipe, WidenRecipe->getIterator());
8976       WidenRecipe->getVPValue()->replaceAllUsesWith(RedRecipe);
8977       WidenRecipe->eraseFromParent();
8978 
8979       if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) {
8980         VPRecipeBase *CompareRecipe =
8981             RecipeBuilder.getRecipe(cast<Instruction>(R->getOperand(0)));
8982         assert(isa<VPWidenRecipe>(CompareRecipe) &&
8983                "Expected to replace a VPWidenSC");
8984         assert(cast<VPWidenRecipe>(CompareRecipe)->getNumUsers() == 0 &&
8985                "Expected no remaining users");
8986         CompareRecipe->eraseFromParent();
8987       }
8988       Chain = R;
8989     }
8990   }
8991 }
8992 
8993 Value* LoopVectorizationPlanner::VPCallbackILV::
8994 getOrCreateVectorValues(Value *V, unsigned Part) {
8995       return ILV.getOrCreateVectorValue(V, Part);
8996 }
8997 
8998 Value *LoopVectorizationPlanner::VPCallbackILV::getOrCreateScalarValue(
8999     Value *V, const VPIteration &Instance) {
9000   return ILV.getOrCreateScalarValue(V, Instance);
9001 }
9002 
9003 void VPInterleaveRecipe::print(raw_ostream &O, const Twine &Indent,
9004                                VPSlotTracker &SlotTracker) const {
9005   O << "\"INTERLEAVE-GROUP with factor " << IG->getFactor() << " at ";
9006   IG->getInsertPos()->printAsOperand(O, false);
9007   O << ", ";
9008   getAddr()->printAsOperand(O, SlotTracker);
9009   VPValue *Mask = getMask();
9010   if (Mask) {
9011     O << ", ";
9012     Mask->printAsOperand(O, SlotTracker);
9013   }
9014   for (unsigned i = 0; i < IG->getFactor(); ++i)
9015     if (Instruction *I = IG->getMember(i))
9016       O << "\\l\" +\n" << Indent << "\"  " << VPlanIngredient(I) << " " << i;
9017 }
9018 
9019 void VPWidenCallRecipe::execute(VPTransformState &State) {
9020   State.ILV->widenCallInstruction(*cast<CallInst>(getUnderlyingInstr()), this,
9021                                   *this, State);
9022 }
9023 
9024 void VPWidenSelectRecipe::execute(VPTransformState &State) {
9025   State.ILV->widenSelectInstruction(*cast<SelectInst>(getUnderlyingInstr()),
9026                                     this, *this, InvariantCond, State);
9027 }
9028 
9029 void VPWidenRecipe::execute(VPTransformState &State) {
9030   State.ILV->widenInstruction(*getUnderlyingInstr(), this, *this, State);
9031 }
9032 
9033 void VPWidenGEPRecipe::execute(VPTransformState &State) {
9034   State.ILV->widenGEP(cast<GetElementPtrInst>(getUnderlyingInstr()), this,
9035                       *this, State.UF, State.VF, IsPtrLoopInvariant,
9036                       IsIndexLoopInvariant, State);
9037 }
9038 
9039 void VPWidenIntOrFpInductionRecipe::execute(VPTransformState &State) {
9040   assert(!State.Instance && "Int or FP induction being replicated.");
9041   State.ILV->widenIntOrFpInduction(IV, getStartValue()->getLiveInIRValue(),
9042                                    getTruncInst(), getVPValue(0),
9043                                    getCastValue(), State);
9044 }
9045 
9046 void VPWidenPHIRecipe::execute(VPTransformState &State) {
9047   Value *StartV =
9048       getStartValue() ? getStartValue()->getLiveInIRValue() : nullptr;
9049   State.ILV->widenPHIInstruction(Phi, RdxDesc, StartV, State.UF, State.VF);
9050 }
9051 
9052 void VPBlendRecipe::execute(VPTransformState &State) {
9053   State.ILV->setDebugLocFromInst(State.Builder, Phi);
9054   // We know that all PHIs in non-header blocks are converted into
9055   // selects, so we don't have to worry about the insertion order and we
9056   // can just use the builder.
9057   // At this point we generate the predication tree. There may be
9058   // duplications since this is a simple recursive scan, but future
9059   // optimizations will clean it up.
9060 
9061   unsigned NumIncoming = getNumIncomingValues();
9062 
9063   // Generate a sequence of selects of the form:
9064   // SELECT(Mask3, In3,
9065   //        SELECT(Mask2, In2,
9066   //               SELECT(Mask1, In1,
9067   //                      In0)))
9068   // Note that Mask0 is never used: lanes for which no path reaches this phi and
9069   // are essentially undef are taken from In0.
9070   InnerLoopVectorizer::VectorParts Entry(State.UF);
9071   for (unsigned In = 0; In < NumIncoming; ++In) {
9072     for (unsigned Part = 0; Part < State.UF; ++Part) {
9073       // We might have single edge PHIs (blocks) - use an identity
9074       // 'select' for the first PHI operand.
9075       Value *In0 = State.get(getIncomingValue(In), Part);
9076       if (In == 0)
9077         Entry[Part] = In0; // Initialize with the first incoming value.
9078       else {
9079         // Select between the current value and the previous incoming edge
9080         // based on the incoming mask.
9081         Value *Cond = State.get(getMask(In), Part);
9082         Entry[Part] =
9083             State.Builder.CreateSelect(Cond, In0, Entry[Part], "predphi");
9084       }
9085     }
9086   }
9087   for (unsigned Part = 0; Part < State.UF; ++Part)
9088     State.ValueMap.setVectorValue(Phi, Part, Entry[Part]);
9089 }
9090 
9091 void VPInterleaveRecipe::execute(VPTransformState &State) {
9092   assert(!State.Instance && "Interleave group being replicated.");
9093   State.ILV->vectorizeInterleaveGroup(IG, definedValues(), State, getAddr(),
9094                                       getStoredValues(), getMask());
9095 }
9096 
9097 void VPReductionRecipe::execute(VPTransformState &State) {
9098   assert(!State.Instance && "Reduction being replicated.");
9099   for (unsigned Part = 0; Part < State.UF; ++Part) {
9100     RecurKind Kind = RdxDesc->getRecurrenceKind();
9101     Value *NewVecOp = State.get(getVecOp(), Part);
9102     if (VPValue *Cond = getCondOp()) {
9103       Value *NewCond = State.get(Cond, Part);
9104       VectorType *VecTy = cast<VectorType>(NewVecOp->getType());
9105       Constant *Iden = RecurrenceDescriptor::getRecurrenceIdentity(
9106           Kind, VecTy->getElementType());
9107       Constant *IdenVec =
9108           ConstantVector::getSplat(VecTy->getElementCount(), Iden);
9109       Value *Select = State.Builder.CreateSelect(NewCond, NewVecOp, IdenVec);
9110       NewVecOp = Select;
9111     }
9112     Value *NewRed =
9113         createTargetReduction(State.Builder, TTI, *RdxDesc, NewVecOp);
9114     Value *PrevInChain = State.get(getChainOp(), Part);
9115     Value *NextInChain;
9116     if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) {
9117       NextInChain =
9118           createMinMaxOp(State.Builder, RdxDesc->getRecurrenceKind(),
9119                          NewRed, PrevInChain);
9120     } else {
9121       NextInChain = State.Builder.CreateBinOp(
9122           (Instruction::BinaryOps)getUnderlyingInstr()->getOpcode(), NewRed,
9123           PrevInChain);
9124     }
9125     State.set(this, getUnderlyingInstr(), NextInChain, Part);
9126   }
9127 }
9128 
9129 void VPReplicateRecipe::execute(VPTransformState &State) {
9130   if (State.Instance) { // Generate a single instance.
9131     assert(!State.VF.isScalable() && "Can't scalarize a scalable vector");
9132     State.ILV->scalarizeInstruction(getUnderlyingInstr(), *this,
9133                                     *State.Instance, IsPredicated, State);
9134     // Insert scalar instance packing it into a vector.
9135     if (AlsoPack && State.VF.isVector()) {
9136       // If we're constructing lane 0, initialize to start from poison.
9137       if (State.Instance->Lane == 0) {
9138         assert(!State.VF.isScalable() && "VF is assumed to be non scalable.");
9139         Value *Poison = PoisonValue::get(
9140             VectorType::get(getUnderlyingValue()->getType(), State.VF));
9141         State.ValueMap.setVectorValue(getUnderlyingInstr(),
9142                                       State.Instance->Part, Poison);
9143       }
9144       State.ILV->packScalarIntoVectorValue(getUnderlyingInstr(),
9145                                            *State.Instance);
9146     }
9147     return;
9148   }
9149 
9150   // Generate scalar instances for all VF lanes of all UF parts, unless the
9151   // instruction is uniform inwhich case generate only the first lane for each
9152   // of the UF parts.
9153   unsigned EndLane = IsUniform ? 1 : State.VF.getKnownMinValue();
9154   assert((!State.VF.isScalable() || IsUniform) &&
9155          "Can't scalarize a scalable vector");
9156   for (unsigned Part = 0; Part < State.UF; ++Part)
9157     for (unsigned Lane = 0; Lane < EndLane; ++Lane)
9158       State.ILV->scalarizeInstruction(getUnderlyingInstr(), *this,
9159                                       VPIteration(Part, Lane), IsPredicated,
9160                                       State);
9161 }
9162 
9163 void VPBranchOnMaskRecipe::execute(VPTransformState &State) {
9164   assert(State.Instance && "Branch on Mask works only on single instance.");
9165 
9166   unsigned Part = State.Instance->Part;
9167   unsigned Lane = State.Instance->Lane;
9168 
9169   Value *ConditionBit = nullptr;
9170   VPValue *BlockInMask = getMask();
9171   if (BlockInMask) {
9172     ConditionBit = State.get(BlockInMask, Part);
9173     if (ConditionBit->getType()->isVectorTy())
9174       ConditionBit = State.Builder.CreateExtractElement(
9175           ConditionBit, State.Builder.getInt32(Lane));
9176   } else // Block in mask is all-one.
9177     ConditionBit = State.Builder.getTrue();
9178 
9179   // Replace the temporary unreachable terminator with a new conditional branch,
9180   // whose two destinations will be set later when they are created.
9181   auto *CurrentTerminator = State.CFG.PrevBB->getTerminator();
9182   assert(isa<UnreachableInst>(CurrentTerminator) &&
9183          "Expected to replace unreachable terminator with conditional branch.");
9184   auto *CondBr = BranchInst::Create(State.CFG.PrevBB, nullptr, ConditionBit);
9185   CondBr->setSuccessor(0, nullptr);
9186   ReplaceInstWithInst(CurrentTerminator, CondBr);
9187 }
9188 
9189 void VPPredInstPHIRecipe::execute(VPTransformState &State) {
9190   assert(State.Instance && "Predicated instruction PHI works per instance.");
9191   Instruction *ScalarPredInst =
9192       cast<Instruction>(State.get(getOperand(0), *State.Instance));
9193   BasicBlock *PredicatedBB = ScalarPredInst->getParent();
9194   BasicBlock *PredicatingBB = PredicatedBB->getSinglePredecessor();
9195   assert(PredicatingBB && "Predicated block has no single predecessor.");
9196 
9197   // By current pack/unpack logic we need to generate only a single phi node: if
9198   // a vector value for the predicated instruction exists at this point it means
9199   // the instruction has vector users only, and a phi for the vector value is
9200   // needed. In this case the recipe of the predicated instruction is marked to
9201   // also do that packing, thereby "hoisting" the insert-element sequence.
9202   // Otherwise, a phi node for the scalar value is needed.
9203   unsigned Part = State.Instance->Part;
9204   Instruction *PredInst =
9205       cast<Instruction>(getOperand(0)->getUnderlyingValue());
9206   if (State.ValueMap.hasVectorValue(PredInst, Part)) {
9207     Value *VectorValue = State.ValueMap.getVectorValue(PredInst, Part);
9208     InsertElementInst *IEI = cast<InsertElementInst>(VectorValue);
9209     PHINode *VPhi = State.Builder.CreatePHI(IEI->getType(), 2);
9210     VPhi->addIncoming(IEI->getOperand(0), PredicatingBB); // Unmodified vector.
9211     VPhi->addIncoming(IEI, PredicatedBB); // New vector with inserted element.
9212     State.ValueMap.resetVectorValue(PredInst, Part, VPhi); // Update cache.
9213   } else {
9214     Type *PredInstType = PredInst->getType();
9215     PHINode *Phi = State.Builder.CreatePHI(PredInstType, 2);
9216     Phi->addIncoming(PoisonValue::get(ScalarPredInst->getType()), PredicatingBB);
9217     Phi->addIncoming(ScalarPredInst, PredicatedBB);
9218     State.ValueMap.resetScalarValue(PredInst, *State.Instance, Phi);
9219   }
9220 }
9221 
9222 void VPWidenMemoryInstructionRecipe::execute(VPTransformState &State) {
9223   VPValue *StoredValue = isStore() ? getStoredValue() : nullptr;
9224   State.ILV->vectorizeMemoryInstruction(&Ingredient, State,
9225                                         StoredValue ? nullptr : getVPValue(),
9226                                         getAddr(), StoredValue, getMask());
9227 }
9228 
9229 // Determine how to lower the scalar epilogue, which depends on 1) optimising
9230 // for minimum code-size, 2) predicate compiler options, 3) loop hints forcing
9231 // predication, and 4) a TTI hook that analyses whether the loop is suitable
9232 // for predication.
9233 static ScalarEpilogueLowering getScalarEpilogueLowering(
9234     Function *F, Loop *L, LoopVectorizeHints &Hints, ProfileSummaryInfo *PSI,
9235     BlockFrequencyInfo *BFI, TargetTransformInfo *TTI, TargetLibraryInfo *TLI,
9236     AssumptionCache *AC, LoopInfo *LI, ScalarEvolution *SE, DominatorTree *DT,
9237     LoopVectorizationLegality &LVL) {
9238   // 1) OptSize takes precedence over all other options, i.e. if this is set,
9239   // don't look at hints or options, and don't request a scalar epilogue.
9240   // (For PGSO, as shouldOptimizeForSize isn't currently accessible from
9241   // LoopAccessInfo (due to code dependency and not being able to reliably get
9242   // PSI/BFI from a loop analysis under NPM), we cannot suppress the collection
9243   // of strides in LoopAccessInfo::analyzeLoop() and vectorize without
9244   // versioning when the vectorization is forced, unlike hasOptSize. So revert
9245   // back to the old way and vectorize with versioning when forced. See D81345.)
9246   if (F->hasOptSize() || (llvm::shouldOptimizeForSize(L->getHeader(), PSI, BFI,
9247                                                       PGSOQueryType::IRPass) &&
9248                           Hints.getForce() != LoopVectorizeHints::FK_Enabled))
9249     return CM_ScalarEpilogueNotAllowedOptSize;
9250 
9251   // 2) If set, obey the directives
9252   if (PreferPredicateOverEpilogue.getNumOccurrences()) {
9253     switch (PreferPredicateOverEpilogue) {
9254     case PreferPredicateTy::ScalarEpilogue:
9255       return CM_ScalarEpilogueAllowed;
9256     case PreferPredicateTy::PredicateElseScalarEpilogue:
9257       return CM_ScalarEpilogueNotNeededUsePredicate;
9258     case PreferPredicateTy::PredicateOrDontVectorize:
9259       return CM_ScalarEpilogueNotAllowedUsePredicate;
9260     };
9261   }
9262 
9263   // 3) If set, obey the hints
9264   switch (Hints.getPredicate()) {
9265   case LoopVectorizeHints::FK_Enabled:
9266     return CM_ScalarEpilogueNotNeededUsePredicate;
9267   case LoopVectorizeHints::FK_Disabled:
9268     return CM_ScalarEpilogueAllowed;
9269   };
9270 
9271   // 4) if the TTI hook indicates this is profitable, request predication.
9272   if (TTI->preferPredicateOverEpilogue(L, LI, *SE, *AC, TLI, DT,
9273                                        LVL.getLAI()))
9274     return CM_ScalarEpilogueNotNeededUsePredicate;
9275 
9276   return CM_ScalarEpilogueAllowed;
9277 }
9278 
9279 void VPTransformState::set(VPValue *Def, Value *IRDef, Value *V,
9280                            const VPIteration &Instance) {
9281   set(Def, V, Instance);
9282   ILV->setScalarValue(IRDef, Instance, V);
9283 }
9284 
9285 void VPTransformState::set(VPValue *Def, Value *IRDef, Value *V,
9286                            unsigned Part) {
9287   set(Def, V, Part);
9288   ILV->setVectorValue(IRDef, Part, V);
9289 }
9290 
9291 Value *VPTransformState::get(VPValue *Def, unsigned Part) {
9292   // If Values have been set for this Def return the one relevant for \p Part.
9293   if (hasVectorValue(Def, Part))
9294     return Data.PerPartOutput[Def][Part];
9295 
9296   // TODO: Remove the callback once all scalar recipes are managed using
9297   // VPValues.
9298   if (!hasScalarValue(Def, {Part, 0}))
9299     return Callback.getOrCreateVectorValues(VPValue2Value[Def], Part);
9300 
9301   Value *ScalarValue = get(Def, {Part, 0});
9302   // If we aren't vectorizing, we can just copy the scalar map values over
9303   // to the vector map.
9304   if (VF.isScalar()) {
9305     set(Def, ScalarValue, Part);
9306     return ScalarValue;
9307   }
9308 
9309   auto *RepR = dyn_cast<VPReplicateRecipe>(Def);
9310   bool IsUniform = RepR && RepR->isUniform();
9311 
9312   unsigned LastLane = IsUniform ? 0 : VF.getKnownMinValue() - 1;
9313   auto *LastInst = cast<Instruction>(get(Def, {Part, LastLane}));
9314 
9315   // Set the insert point after the last scalarized instruction. This
9316   // ensures the insertelement sequence will directly follow the scalar
9317   // definitions.
9318   auto OldIP = Builder.saveIP();
9319   auto NewIP = std::next(BasicBlock::iterator(LastInst));
9320   Builder.SetInsertPoint(&*NewIP);
9321 
9322   // However, if we are vectorizing, we need to construct the vector values.
9323   // If the value is known to be uniform after vectorization, we can just
9324   // broadcast the scalar value corresponding to lane zero for each unroll
9325   // iteration. Otherwise, we construct the vector values using
9326   // insertelement instructions. Since the resulting vectors are stored in
9327   // VectorLoopValueMap, we will only generate the insertelements once.
9328   Value *VectorValue = nullptr;
9329   if (IsUniform) {
9330     VectorValue = ILV->getBroadcastInstrs(ScalarValue);
9331     set(Def, VectorValue, Part);
9332   } else {
9333     // Initialize packing with insertelements to start from undef.
9334     assert(!VF.isScalable() && "VF is assumed to be non scalable.");
9335     Value *Undef = UndefValue::get(VectorType::get(LastInst->getType(), VF));
9336     set(Def, Undef, Part);
9337     for (unsigned Lane = 0; Lane < VF.getKnownMinValue(); ++Lane)
9338       ILV->packScalarIntoVectorValue(Def, {Part, Lane}, *this);
9339     VectorValue = get(Def, Part);
9340   }
9341   Builder.restoreIP(OldIP);
9342   return VectorValue;
9343 }
9344 
9345 // Process the loop in the VPlan-native vectorization path. This path builds
9346 // VPlan upfront in the vectorization pipeline, which allows to apply
9347 // VPlan-to-VPlan transformations from the very beginning without modifying the
9348 // input LLVM IR.
9349 static bool processLoopInVPlanNativePath(
9350     Loop *L, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT,
9351     LoopVectorizationLegality *LVL, TargetTransformInfo *TTI,
9352     TargetLibraryInfo *TLI, DemandedBits *DB, AssumptionCache *AC,
9353     OptimizationRemarkEmitter *ORE, BlockFrequencyInfo *BFI,
9354     ProfileSummaryInfo *PSI, LoopVectorizeHints &Hints) {
9355 
9356   if (isa<SCEVCouldNotCompute>(PSE.getBackedgeTakenCount())) {
9357     LLVM_DEBUG(dbgs() << "LV: cannot compute the outer-loop trip count\n");
9358     return false;
9359   }
9360   assert(EnableVPlanNativePath && "VPlan-native path is disabled.");
9361   Function *F = L->getHeader()->getParent();
9362   InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL->getLAI());
9363 
9364   ScalarEpilogueLowering SEL = getScalarEpilogueLowering(
9365       F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, *LVL);
9366 
9367   LoopVectorizationCostModel CM(SEL, L, PSE, LI, LVL, *TTI, TLI, DB, AC, ORE, F,
9368                                 &Hints, IAI);
9369   // Use the planner for outer loop vectorization.
9370   // TODO: CM is not used at this point inside the planner. Turn CM into an
9371   // optional argument if we don't need it in the future.
9372   LoopVectorizationPlanner LVP(L, LI, TLI, TTI, LVL, CM, IAI, PSE);
9373 
9374   // Get user vectorization factor.
9375   ElementCount UserVF = Hints.getWidth();
9376 
9377   // Plan how to best vectorize, return the best VF and its cost.
9378   const VectorizationFactor VF = LVP.planInVPlanNativePath(UserVF);
9379 
9380   // If we are stress testing VPlan builds, do not attempt to generate vector
9381   // code. Masked vector code generation support will follow soon.
9382   // Also, do not attempt to vectorize if no vector code will be produced.
9383   if (VPlanBuildStressTest || EnableVPlanPredication ||
9384       VectorizationFactor::Disabled() == VF)
9385     return false;
9386 
9387   LVP.setBestPlan(VF.Width, 1);
9388 
9389   InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, 1, LVL,
9390                          &CM, BFI, PSI);
9391   LLVM_DEBUG(dbgs() << "Vectorizing outer loop in \""
9392                     << L->getHeader()->getParent()->getName() << "\"\n");
9393   LVP.executePlan(LB, DT);
9394 
9395   // Mark the loop as already vectorized to avoid vectorizing again.
9396   Hints.setAlreadyVectorized();
9397 
9398   assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs()));
9399   return true;
9400 }
9401 
9402 LoopVectorizePass::LoopVectorizePass(LoopVectorizeOptions Opts)
9403     : InterleaveOnlyWhenForced(Opts.InterleaveOnlyWhenForced ||
9404                                !EnableLoopInterleaving),
9405       VectorizeOnlyWhenForced(Opts.VectorizeOnlyWhenForced ||
9406                               !EnableLoopVectorization) {}
9407 
9408 bool LoopVectorizePass::processLoop(Loop *L) {
9409   assert((EnableVPlanNativePath || L->isInnermost()) &&
9410          "VPlan-native path is not enabled. Only process inner loops.");
9411 
9412 #ifndef NDEBUG
9413   const std::string DebugLocStr = getDebugLocString(L);
9414 #endif /* NDEBUG */
9415 
9416   LLVM_DEBUG(dbgs() << "\nLV: Checking a loop in \""
9417                     << L->getHeader()->getParent()->getName() << "\" from "
9418                     << DebugLocStr << "\n");
9419 
9420   LoopVectorizeHints Hints(L, InterleaveOnlyWhenForced, *ORE);
9421 
9422   LLVM_DEBUG(
9423       dbgs() << "LV: Loop hints:"
9424              << " force="
9425              << (Hints.getForce() == LoopVectorizeHints::FK_Disabled
9426                      ? "disabled"
9427                      : (Hints.getForce() == LoopVectorizeHints::FK_Enabled
9428                             ? "enabled"
9429                             : "?"))
9430              << " width=" << Hints.getWidth()
9431              << " unroll=" << Hints.getInterleave() << "\n");
9432 
9433   // Function containing loop
9434   Function *F = L->getHeader()->getParent();
9435 
9436   // Looking at the diagnostic output is the only way to determine if a loop
9437   // was vectorized (other than looking at the IR or machine code), so it
9438   // is important to generate an optimization remark for each loop. Most of
9439   // these messages are generated as OptimizationRemarkAnalysis. Remarks
9440   // generated as OptimizationRemark and OptimizationRemarkMissed are
9441   // less verbose reporting vectorized loops and unvectorized loops that may
9442   // benefit from vectorization, respectively.
9443 
9444   if (!Hints.allowVectorization(F, L, VectorizeOnlyWhenForced)) {
9445     LLVM_DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n");
9446     return false;
9447   }
9448 
9449   PredicatedScalarEvolution PSE(*SE, *L);
9450 
9451   // Check if it is legal to vectorize the loop.
9452   LoopVectorizationRequirements Requirements(*ORE);
9453   LoopVectorizationLegality LVL(L, PSE, DT, TTI, TLI, AA, F, GetLAA, LI, ORE,
9454                                 &Requirements, &Hints, DB, AC, BFI, PSI);
9455   if (!LVL.canVectorize(EnableVPlanNativePath)) {
9456     LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n");
9457     Hints.emitRemarkWithHints();
9458     return false;
9459   }
9460 
9461   // Check the function attributes and profiles to find out if this function
9462   // should be optimized for size.
9463   ScalarEpilogueLowering SEL = getScalarEpilogueLowering(
9464       F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, LVL);
9465 
9466   // Entrance to the VPlan-native vectorization path. Outer loops are processed
9467   // here. They may require CFG and instruction level transformations before
9468   // even evaluating whether vectorization is profitable. Since we cannot modify
9469   // the incoming IR, we need to build VPlan upfront in the vectorization
9470   // pipeline.
9471   if (!L->isInnermost())
9472     return processLoopInVPlanNativePath(L, PSE, LI, DT, &LVL, TTI, TLI, DB, AC,
9473                                         ORE, BFI, PSI, Hints);
9474 
9475   assert(L->isInnermost() && "Inner loop expected.");
9476 
9477   // Check the loop for a trip count threshold: vectorize loops with a tiny trip
9478   // count by optimizing for size, to minimize overheads.
9479   auto ExpectedTC = getSmallBestKnownTC(*SE, L);
9480   if (ExpectedTC && *ExpectedTC < TinyTripCountVectorThreshold) {
9481     LLVM_DEBUG(dbgs() << "LV: Found a loop with a very small trip count. "
9482                       << "This loop is worth vectorizing only if no scalar "
9483                       << "iteration overheads are incurred.");
9484     if (Hints.getForce() == LoopVectorizeHints::FK_Enabled)
9485       LLVM_DEBUG(dbgs() << " But vectorizing was explicitly forced.\n");
9486     else {
9487       LLVM_DEBUG(dbgs() << "\n");
9488       SEL = CM_ScalarEpilogueNotAllowedLowTripLoop;
9489     }
9490   }
9491 
9492   // Check the function attributes to see if implicit floats are allowed.
9493   // FIXME: This check doesn't seem possibly correct -- what if the loop is
9494   // an integer loop and the vector instructions selected are purely integer
9495   // vector instructions?
9496   if (F->hasFnAttribute(Attribute::NoImplicitFloat)) {
9497     reportVectorizationFailure(
9498         "Can't vectorize when the NoImplicitFloat attribute is used",
9499         "loop not vectorized due to NoImplicitFloat attribute",
9500         "NoImplicitFloat", ORE, L);
9501     Hints.emitRemarkWithHints();
9502     return false;
9503   }
9504 
9505   // Check if the target supports potentially unsafe FP vectorization.
9506   // FIXME: Add a check for the type of safety issue (denormal, signaling)
9507   // for the target we're vectorizing for, to make sure none of the
9508   // additional fp-math flags can help.
9509   if (Hints.isPotentiallyUnsafe() &&
9510       TTI->isFPVectorizationPotentiallyUnsafe()) {
9511     reportVectorizationFailure(
9512         "Potentially unsafe FP op prevents vectorization",
9513         "loop not vectorized due to unsafe FP support.",
9514         "UnsafeFP", ORE, L);
9515     Hints.emitRemarkWithHints();
9516     return false;
9517   }
9518 
9519   bool UseInterleaved = TTI->enableInterleavedAccessVectorization();
9520   InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL.getLAI());
9521 
9522   // If an override option has been passed in for interleaved accesses, use it.
9523   if (EnableInterleavedMemAccesses.getNumOccurrences() > 0)
9524     UseInterleaved = EnableInterleavedMemAccesses;
9525 
9526   // Analyze interleaved memory accesses.
9527   if (UseInterleaved) {
9528     IAI.analyzeInterleaving(useMaskedInterleavedAccesses(*TTI));
9529   }
9530 
9531   // Use the cost model.
9532   LoopVectorizationCostModel CM(SEL, L, PSE, LI, &LVL, *TTI, TLI, DB, AC, ORE,
9533                                 F, &Hints, IAI);
9534   CM.collectValuesToIgnore();
9535 
9536   // Use the planner for vectorization.
9537   LoopVectorizationPlanner LVP(L, LI, TLI, TTI, &LVL, CM, IAI, PSE);
9538 
9539   // Get user vectorization factor and interleave count.
9540   ElementCount UserVF = Hints.getWidth();
9541   unsigned UserIC = Hints.getInterleave();
9542 
9543   // Plan how to best vectorize, return the best VF and its cost.
9544   Optional<VectorizationFactor> MaybeVF = LVP.plan(UserVF, UserIC);
9545 
9546   VectorizationFactor VF = VectorizationFactor::Disabled();
9547   unsigned IC = 1;
9548 
9549   if (MaybeVF) {
9550     VF = *MaybeVF;
9551     // Select the interleave count.
9552     IC = CM.selectInterleaveCount(VF.Width, VF.Cost);
9553   }
9554 
9555   // Identify the diagnostic messages that should be produced.
9556   std::pair<StringRef, std::string> VecDiagMsg, IntDiagMsg;
9557   bool VectorizeLoop = true, InterleaveLoop = true;
9558   if (Requirements.doesNotMeet(F, L, Hints)) {
9559     LLVM_DEBUG(dbgs() << "LV: Not vectorizing: loop did not meet vectorization "
9560                          "requirements.\n");
9561     Hints.emitRemarkWithHints();
9562     return false;
9563   }
9564 
9565   if (VF.Width.isScalar()) {
9566     LLVM_DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n");
9567     VecDiagMsg = std::make_pair(
9568         "VectorizationNotBeneficial",
9569         "the cost-model indicates that vectorization is not beneficial");
9570     VectorizeLoop = false;
9571   }
9572 
9573   if (!MaybeVF && UserIC > 1) {
9574     // Tell the user interleaving was avoided up-front, despite being explicitly
9575     // requested.
9576     LLVM_DEBUG(dbgs() << "LV: Ignoring UserIC, because vectorization and "
9577                          "interleaving should be avoided up front\n");
9578     IntDiagMsg = std::make_pair(
9579         "InterleavingAvoided",
9580         "Ignoring UserIC, because interleaving was avoided up front");
9581     InterleaveLoop = false;
9582   } else if (IC == 1 && UserIC <= 1) {
9583     // Tell the user interleaving is not beneficial.
9584     LLVM_DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n");
9585     IntDiagMsg = std::make_pair(
9586         "InterleavingNotBeneficial",
9587         "the cost-model indicates that interleaving is not beneficial");
9588     InterleaveLoop = false;
9589     if (UserIC == 1) {
9590       IntDiagMsg.first = "InterleavingNotBeneficialAndDisabled";
9591       IntDiagMsg.second +=
9592           " and is explicitly disabled or interleave count is set to 1";
9593     }
9594   } else if (IC > 1 && UserIC == 1) {
9595     // Tell the user interleaving is beneficial, but it explicitly disabled.
9596     LLVM_DEBUG(
9597         dbgs() << "LV: Interleaving is beneficial but is explicitly disabled.");
9598     IntDiagMsg = std::make_pair(
9599         "InterleavingBeneficialButDisabled",
9600         "the cost-model indicates that interleaving is beneficial "
9601         "but is explicitly disabled or interleave count is set to 1");
9602     InterleaveLoop = false;
9603   }
9604 
9605   // Override IC if user provided an interleave count.
9606   IC = UserIC > 0 ? UserIC : IC;
9607 
9608   // Emit diagnostic messages, if any.
9609   const char *VAPassName = Hints.vectorizeAnalysisPassName();
9610   if (!VectorizeLoop && !InterleaveLoop) {
9611     // Do not vectorize or interleaving the loop.
9612     ORE->emit([&]() {
9613       return OptimizationRemarkMissed(VAPassName, VecDiagMsg.first,
9614                                       L->getStartLoc(), L->getHeader())
9615              << VecDiagMsg.second;
9616     });
9617     ORE->emit([&]() {
9618       return OptimizationRemarkMissed(LV_NAME, IntDiagMsg.first,
9619                                       L->getStartLoc(), L->getHeader())
9620              << IntDiagMsg.second;
9621     });
9622     return false;
9623   } else if (!VectorizeLoop && InterleaveLoop) {
9624     LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n');
9625     ORE->emit([&]() {
9626       return OptimizationRemarkAnalysis(VAPassName, VecDiagMsg.first,
9627                                         L->getStartLoc(), L->getHeader())
9628              << VecDiagMsg.second;
9629     });
9630   } else if (VectorizeLoop && !InterleaveLoop) {
9631     LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width
9632                       << ") in " << DebugLocStr << '\n');
9633     ORE->emit([&]() {
9634       return OptimizationRemarkAnalysis(LV_NAME, IntDiagMsg.first,
9635                                         L->getStartLoc(), L->getHeader())
9636              << IntDiagMsg.second;
9637     });
9638   } else if (VectorizeLoop && InterleaveLoop) {
9639     LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width
9640                       << ") in " << DebugLocStr << '\n');
9641     LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n');
9642   }
9643 
9644   LVP.setBestPlan(VF.Width, IC);
9645 
9646   using namespace ore;
9647   bool DisableRuntimeUnroll = false;
9648   MDNode *OrigLoopID = L->getLoopID();
9649 
9650   if (!VectorizeLoop) {
9651     assert(IC > 1 && "interleave count should not be 1 or 0");
9652     // If we decided that it is not legal to vectorize the loop, then
9653     // interleave it.
9654     InnerLoopUnroller Unroller(L, PSE, LI, DT, TLI, TTI, AC, ORE, IC, &LVL, &CM,
9655                                BFI, PSI);
9656     LVP.executePlan(Unroller, DT);
9657 
9658     ORE->emit([&]() {
9659       return OptimizationRemark(LV_NAME, "Interleaved", L->getStartLoc(),
9660                                 L->getHeader())
9661              << "interleaved loop (interleaved count: "
9662              << NV("InterleaveCount", IC) << ")";
9663     });
9664   } else {
9665     // If we decided that it is *legal* to vectorize the loop, then do it.
9666 
9667     // Consider vectorizing the epilogue too if it's profitable.
9668     VectorizationFactor EpilogueVF =
9669       CM.selectEpilogueVectorizationFactor(VF.Width, LVP);
9670     if (EpilogueVF.Width.isVector()) {
9671 
9672       // The first pass vectorizes the main loop and creates a scalar epilogue
9673       // to be vectorized by executing the plan (potentially with a different
9674       // factor) again shortly afterwards.
9675       EpilogueLoopVectorizationInfo EPI(VF.Width.getKnownMinValue(), IC,
9676                                         EpilogueVF.Width.getKnownMinValue(), 1);
9677       EpilogueVectorizerMainLoop MainILV(L, PSE, LI, DT, TLI, TTI, AC, ORE, EPI,
9678                                          &LVL, &CM, BFI, PSI);
9679 
9680       LVP.setBestPlan(EPI.MainLoopVF, EPI.MainLoopUF);
9681       LVP.executePlan(MainILV, DT);
9682       ++LoopsVectorized;
9683 
9684       simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */);
9685       formLCSSARecursively(*L, *DT, LI, SE);
9686 
9687       // Second pass vectorizes the epilogue and adjusts the control flow
9688       // edges from the first pass.
9689       LVP.setBestPlan(EPI.EpilogueVF, EPI.EpilogueUF);
9690       EPI.MainLoopVF = EPI.EpilogueVF;
9691       EPI.MainLoopUF = EPI.EpilogueUF;
9692       EpilogueVectorizerEpilogueLoop EpilogILV(L, PSE, LI, DT, TLI, TTI, AC,
9693                                                ORE, EPI, &LVL, &CM, BFI, PSI);
9694       LVP.executePlan(EpilogILV, DT);
9695       ++LoopsEpilogueVectorized;
9696 
9697       if (!MainILV.areSafetyChecksAdded())
9698         DisableRuntimeUnroll = true;
9699     } else {
9700       InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, IC,
9701                              &LVL, &CM, BFI, PSI);
9702       LVP.executePlan(LB, DT);
9703       ++LoopsVectorized;
9704 
9705       // Add metadata to disable runtime unrolling a scalar loop when there are
9706       // no runtime checks about strides and memory. A scalar loop that is
9707       // rarely used is not worth unrolling.
9708       if (!LB.areSafetyChecksAdded())
9709         DisableRuntimeUnroll = true;
9710     }
9711 
9712     // Report the vectorization decision.
9713     ORE->emit([&]() {
9714       return OptimizationRemark(LV_NAME, "Vectorized", L->getStartLoc(),
9715                                 L->getHeader())
9716              << "vectorized loop (vectorization width: "
9717              << NV("VectorizationFactor", VF.Width)
9718              << ", interleaved count: " << NV("InterleaveCount", IC) << ")";
9719     });
9720   }
9721 
9722   Optional<MDNode *> RemainderLoopID =
9723       makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll,
9724                                       LLVMLoopVectorizeFollowupEpilogue});
9725   if (RemainderLoopID.hasValue()) {
9726     L->setLoopID(RemainderLoopID.getValue());
9727   } else {
9728     if (DisableRuntimeUnroll)
9729       AddRuntimeUnrollDisableMetaData(L);
9730 
9731     // Mark the loop as already vectorized to avoid vectorizing again.
9732     Hints.setAlreadyVectorized();
9733   }
9734 
9735   assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs()));
9736   return true;
9737 }
9738 
9739 LoopVectorizeResult LoopVectorizePass::runImpl(
9740     Function &F, ScalarEvolution &SE_, LoopInfo &LI_, TargetTransformInfo &TTI_,
9741     DominatorTree &DT_, BlockFrequencyInfo &BFI_, TargetLibraryInfo *TLI_,
9742     DemandedBits &DB_, AAResults &AA_, AssumptionCache &AC_,
9743     std::function<const LoopAccessInfo &(Loop &)> &GetLAA_,
9744     OptimizationRemarkEmitter &ORE_, ProfileSummaryInfo *PSI_) {
9745   SE = &SE_;
9746   LI = &LI_;
9747   TTI = &TTI_;
9748   DT = &DT_;
9749   BFI = &BFI_;
9750   TLI = TLI_;
9751   AA = &AA_;
9752   AC = &AC_;
9753   GetLAA = &GetLAA_;
9754   DB = &DB_;
9755   ORE = &ORE_;
9756   PSI = PSI_;
9757 
9758   // Don't attempt if
9759   // 1. the target claims to have no vector registers, and
9760   // 2. interleaving won't help ILP.
9761   //
9762   // The second condition is necessary because, even if the target has no
9763   // vector registers, loop vectorization may still enable scalar
9764   // interleaving.
9765   if (!TTI->getNumberOfRegisters(TTI->getRegisterClassForType(true)) &&
9766       TTI->getMaxInterleaveFactor(1) < 2)
9767     return LoopVectorizeResult(false, false);
9768 
9769   bool Changed = false, CFGChanged = false;
9770 
9771   // The vectorizer requires loops to be in simplified form.
9772   // Since simplification may add new inner loops, it has to run before the
9773   // legality and profitability checks. This means running the loop vectorizer
9774   // will simplify all loops, regardless of whether anything end up being
9775   // vectorized.
9776   for (auto &L : *LI)
9777     Changed |= CFGChanged |=
9778         simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */);
9779 
9780   // Build up a worklist of inner-loops to vectorize. This is necessary as
9781   // the act of vectorizing or partially unrolling a loop creates new loops
9782   // and can invalidate iterators across the loops.
9783   SmallVector<Loop *, 8> Worklist;
9784 
9785   for (Loop *L : *LI)
9786     collectSupportedLoops(*L, LI, ORE, Worklist);
9787 
9788   LoopsAnalyzed += Worklist.size();
9789 
9790   // Now walk the identified inner loops.
9791   while (!Worklist.empty()) {
9792     Loop *L = Worklist.pop_back_val();
9793 
9794     // For the inner loops we actually process, form LCSSA to simplify the
9795     // transform.
9796     Changed |= formLCSSARecursively(*L, *DT, LI, SE);
9797 
9798     Changed |= CFGChanged |= processLoop(L);
9799   }
9800 
9801   // Process each loop nest in the function.
9802   return LoopVectorizeResult(Changed, CFGChanged);
9803 }
9804 
9805 PreservedAnalyses LoopVectorizePass::run(Function &F,
9806                                          FunctionAnalysisManager &AM) {
9807     auto &SE = AM.getResult<ScalarEvolutionAnalysis>(F);
9808     auto &LI = AM.getResult<LoopAnalysis>(F);
9809     auto &TTI = AM.getResult<TargetIRAnalysis>(F);
9810     auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
9811     auto &BFI = AM.getResult<BlockFrequencyAnalysis>(F);
9812     auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
9813     auto &AA = AM.getResult<AAManager>(F);
9814     auto &AC = AM.getResult<AssumptionAnalysis>(F);
9815     auto &DB = AM.getResult<DemandedBitsAnalysis>(F);
9816     auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F);
9817     MemorySSA *MSSA = EnableMSSALoopDependency
9818                           ? &AM.getResult<MemorySSAAnalysis>(F).getMSSA()
9819                           : nullptr;
9820 
9821     auto &LAM = AM.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager();
9822     std::function<const LoopAccessInfo &(Loop &)> GetLAA =
9823         [&](Loop &L) -> const LoopAccessInfo & {
9824       LoopStandardAnalysisResults AR = {AA,  AC,  DT,      LI,  SE,
9825                                         TLI, TTI, nullptr, MSSA};
9826       return LAM.getResult<LoopAccessAnalysis>(L, AR);
9827     };
9828     auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F);
9829     ProfileSummaryInfo *PSI =
9830         MAMProxy.getCachedResult<ProfileSummaryAnalysis>(*F.getParent());
9831     LoopVectorizeResult Result =
9832         runImpl(F, SE, LI, TTI, DT, BFI, &TLI, DB, AA, AC, GetLAA, ORE, PSI);
9833     if (!Result.MadeAnyChange)
9834       return PreservedAnalyses::all();
9835     PreservedAnalyses PA;
9836 
9837     // We currently do not preserve loopinfo/dominator analyses with outer loop
9838     // vectorization. Until this is addressed, mark these analyses as preserved
9839     // only for non-VPlan-native path.
9840     // TODO: Preserve Loop and Dominator analyses for VPlan-native path.
9841     if (!EnableVPlanNativePath) {
9842       PA.preserve<LoopAnalysis>();
9843       PA.preserve<DominatorTreeAnalysis>();
9844     }
9845     PA.preserve<BasicAA>();
9846     PA.preserve<GlobalsAA>();
9847     if (!Result.MadeCFGChange)
9848       PA.preserveSet<CFGAnalyses>();
9849     return PA;
9850 }
9851