1 //===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops
10 // and generates target-independent LLVM-IR.
11 // The vectorizer uses the TargetTransformInfo analysis to estimate the costs
12 // of instructions in order to estimate the profitability of vectorization.
13 //
14 // The loop vectorizer combines consecutive loop iterations into a single
15 // 'wide' iteration. After this transformation the index is incremented
16 // by the SIMD vector width, and not by one.
17 //
18 // This pass has three parts:
19 // 1. The main loop pass that drives the different parts.
20 // 2. LoopVectorizationLegality - A unit that checks for the legality
21 //    of the vectorization.
22 // 3. InnerLoopVectorizer - A unit that performs the actual
23 //    widening of instructions.
24 // 4. LoopVectorizationCostModel - A unit that checks for the profitability
25 //    of vectorization. It decides on the optimal vector width, which
26 //    can be one, if vectorization is not profitable.
27 //
28 // There is a development effort going on to migrate loop vectorizer to the
29 // VPlan infrastructure and to introduce outer loop vectorization support (see
30 // docs/Proposal/VectorizationPlan.rst and
31 // http://lists.llvm.org/pipermail/llvm-dev/2017-December/119523.html). For this
32 // purpose, we temporarily introduced the VPlan-native vectorization path: an
33 // alternative vectorization path that is natively implemented on top of the
34 // VPlan infrastructure. See EnableVPlanNativePath for enabling.
35 //
36 //===----------------------------------------------------------------------===//
37 //
38 // The reduction-variable vectorization is based on the paper:
39 //  D. Nuzman and R. Henderson. Multi-platform Auto-vectorization.
40 //
41 // Variable uniformity checks are inspired by:
42 //  Karrenberg, R. and Hack, S. Whole Function Vectorization.
43 //
44 // The interleaved access vectorization is based on the paper:
45 //  Dorit Nuzman, Ira Rosen and Ayal Zaks.  Auto-Vectorization of Interleaved
46 //  Data for SIMD
47 //
48 // Other ideas/concepts are from:
49 //  A. Zaks and D. Nuzman. Autovectorization in GCC-two years later.
50 //
51 //  S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua.  An Evaluation of
52 //  Vectorizing Compilers.
53 //
54 //===----------------------------------------------------------------------===//
55 
56 #include "llvm/Transforms/Vectorize/LoopVectorize.h"
57 #include "LoopVectorizationPlanner.h"
58 #include "VPRecipeBuilder.h"
59 #include "VPlan.h"
60 #include "VPlanHCFGBuilder.h"
61 #include "VPlanPredicator.h"
62 #include "VPlanTransforms.h"
63 #include "llvm/ADT/APInt.h"
64 #include "llvm/ADT/ArrayRef.h"
65 #include "llvm/ADT/DenseMap.h"
66 #include "llvm/ADT/DenseMapInfo.h"
67 #include "llvm/ADT/Hashing.h"
68 #include "llvm/ADT/MapVector.h"
69 #include "llvm/ADT/None.h"
70 #include "llvm/ADT/Optional.h"
71 #include "llvm/ADT/STLExtras.h"
72 #include "llvm/ADT/SetVector.h"
73 #include "llvm/ADT/SmallPtrSet.h"
74 #include "llvm/ADT/SmallVector.h"
75 #include "llvm/ADT/Statistic.h"
76 #include "llvm/ADT/StringRef.h"
77 #include "llvm/ADT/Twine.h"
78 #include "llvm/ADT/iterator_range.h"
79 #include "llvm/Analysis/AssumptionCache.h"
80 #include "llvm/Analysis/BasicAliasAnalysis.h"
81 #include "llvm/Analysis/BlockFrequencyInfo.h"
82 #include "llvm/Analysis/CFG.h"
83 #include "llvm/Analysis/CodeMetrics.h"
84 #include "llvm/Analysis/DemandedBits.h"
85 #include "llvm/Analysis/GlobalsModRef.h"
86 #include "llvm/Analysis/LoopAccessAnalysis.h"
87 #include "llvm/Analysis/LoopAnalysisManager.h"
88 #include "llvm/Analysis/LoopInfo.h"
89 #include "llvm/Analysis/LoopIterator.h"
90 #include "llvm/Analysis/MemorySSA.h"
91 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
92 #include "llvm/Analysis/ProfileSummaryInfo.h"
93 #include "llvm/Analysis/ScalarEvolution.h"
94 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
95 #include "llvm/Analysis/TargetLibraryInfo.h"
96 #include "llvm/Analysis/TargetTransformInfo.h"
97 #include "llvm/Analysis/VectorUtils.h"
98 #include "llvm/IR/Attributes.h"
99 #include "llvm/IR/BasicBlock.h"
100 #include "llvm/IR/CFG.h"
101 #include "llvm/IR/Constant.h"
102 #include "llvm/IR/Constants.h"
103 #include "llvm/IR/DataLayout.h"
104 #include "llvm/IR/DebugInfoMetadata.h"
105 #include "llvm/IR/DebugLoc.h"
106 #include "llvm/IR/DerivedTypes.h"
107 #include "llvm/IR/DiagnosticInfo.h"
108 #include "llvm/IR/Dominators.h"
109 #include "llvm/IR/Function.h"
110 #include "llvm/IR/IRBuilder.h"
111 #include "llvm/IR/InstrTypes.h"
112 #include "llvm/IR/Instruction.h"
113 #include "llvm/IR/Instructions.h"
114 #include "llvm/IR/IntrinsicInst.h"
115 #include "llvm/IR/Intrinsics.h"
116 #include "llvm/IR/LLVMContext.h"
117 #include "llvm/IR/Metadata.h"
118 #include "llvm/IR/Module.h"
119 #include "llvm/IR/Operator.h"
120 #include "llvm/IR/Type.h"
121 #include "llvm/IR/Use.h"
122 #include "llvm/IR/User.h"
123 #include "llvm/IR/Value.h"
124 #include "llvm/IR/ValueHandle.h"
125 #include "llvm/IR/Verifier.h"
126 #include "llvm/InitializePasses.h"
127 #include "llvm/Pass.h"
128 #include "llvm/Support/Casting.h"
129 #include "llvm/Support/CommandLine.h"
130 #include "llvm/Support/Compiler.h"
131 #include "llvm/Support/Debug.h"
132 #include "llvm/Support/ErrorHandling.h"
133 #include "llvm/Support/InstructionCost.h"
134 #include "llvm/Support/MathExtras.h"
135 #include "llvm/Support/raw_ostream.h"
136 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
137 #include "llvm/Transforms/Utils/InjectTLIMappings.h"
138 #include "llvm/Transforms/Utils/LoopSimplify.h"
139 #include "llvm/Transforms/Utils/LoopUtils.h"
140 #include "llvm/Transforms/Utils/LoopVersioning.h"
141 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
142 #include "llvm/Transforms/Utils/SizeOpts.h"
143 #include "llvm/Transforms/Vectorize/LoopVectorizationLegality.h"
144 #include <algorithm>
145 #include <cassert>
146 #include <cstdint>
147 #include <cstdlib>
148 #include <functional>
149 #include <iterator>
150 #include <limits>
151 #include <memory>
152 #include <string>
153 #include <tuple>
154 #include <utility>
155 
156 using namespace llvm;
157 
158 #define LV_NAME "loop-vectorize"
159 #define DEBUG_TYPE LV_NAME
160 
161 #ifndef NDEBUG
162 const char VerboseDebug[] = DEBUG_TYPE "-verbose";
163 #endif
164 
165 /// @{
166 /// Metadata attribute names
167 const char LLVMLoopVectorizeFollowupAll[] = "llvm.loop.vectorize.followup_all";
168 const char LLVMLoopVectorizeFollowupVectorized[] =
169     "llvm.loop.vectorize.followup_vectorized";
170 const char LLVMLoopVectorizeFollowupEpilogue[] =
171     "llvm.loop.vectorize.followup_epilogue";
172 /// @}
173 
174 STATISTIC(LoopsVectorized, "Number of loops vectorized");
175 STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization");
176 STATISTIC(LoopsEpilogueVectorized, "Number of epilogues vectorized");
177 
178 static cl::opt<bool> EnableEpilogueVectorization(
179     "enable-epilogue-vectorization", cl::init(true), cl::Hidden,
180     cl::desc("Enable vectorization of epilogue loops."));
181 
182 static cl::opt<unsigned> EpilogueVectorizationForceVF(
183     "epilogue-vectorization-force-VF", cl::init(1), cl::Hidden,
184     cl::desc("When epilogue vectorization is enabled, and a value greater than "
185              "1 is specified, forces the given VF for all applicable epilogue "
186              "loops."));
187 
188 static cl::opt<unsigned> EpilogueVectorizationMinVF(
189     "epilogue-vectorization-minimum-VF", cl::init(16), cl::Hidden,
190     cl::desc("Only loops with vectorization factor equal to or larger than "
191              "the specified value are considered for epilogue vectorization."));
192 
193 /// Loops with a known constant trip count below this number are vectorized only
194 /// if no scalar iteration overheads are incurred.
195 static cl::opt<unsigned> TinyTripCountVectorThreshold(
196     "vectorizer-min-trip-count", cl::init(16), cl::Hidden,
197     cl::desc("Loops with a constant trip count that is smaller than this "
198              "value are vectorized only if no scalar iteration overheads "
199              "are incurred."));
200 
201 // Option prefer-predicate-over-epilogue indicates that an epilogue is undesired,
202 // that predication is preferred, and this lists all options. I.e., the
203 // vectorizer will try to fold the tail-loop (epilogue) into the vector body
204 // and predicate the instructions accordingly. If tail-folding fails, there are
205 // different fallback strategies depending on these values:
206 namespace PreferPredicateTy {
207   enum Option {
208     ScalarEpilogue = 0,
209     PredicateElseScalarEpilogue,
210     PredicateOrDontVectorize
211   };
212 } // namespace PreferPredicateTy
213 
214 static cl::opt<PreferPredicateTy::Option> PreferPredicateOverEpilogue(
215     "prefer-predicate-over-epilogue",
216     cl::init(PreferPredicateTy::ScalarEpilogue),
217     cl::Hidden,
218     cl::desc("Tail-folding and predication preferences over creating a scalar "
219              "epilogue loop."),
220     cl::values(clEnumValN(PreferPredicateTy::ScalarEpilogue,
221                          "scalar-epilogue",
222                          "Don't tail-predicate loops, create scalar epilogue"),
223               clEnumValN(PreferPredicateTy::PredicateElseScalarEpilogue,
224                          "predicate-else-scalar-epilogue",
225                          "prefer tail-folding, create scalar epilogue if tail "
226                          "folding fails."),
227               clEnumValN(PreferPredicateTy::PredicateOrDontVectorize,
228                          "predicate-dont-vectorize",
229                          "prefers tail-folding, don't attempt vectorization if "
230                          "tail-folding fails.")));
231 
232 static cl::opt<bool> MaximizeBandwidth(
233     "vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden,
234     cl::desc("Maximize bandwidth when selecting vectorization factor which "
235              "will be determined by the smallest type in loop."));
236 
237 static cl::opt<bool> EnableInterleavedMemAccesses(
238     "enable-interleaved-mem-accesses", cl::init(false), cl::Hidden,
239     cl::desc("Enable vectorization on interleaved memory accesses in a loop"));
240 
241 /// An interleave-group may need masking if it resides in a block that needs
242 /// predication, or in order to mask away gaps.
243 static cl::opt<bool> EnableMaskedInterleavedMemAccesses(
244     "enable-masked-interleaved-mem-accesses", cl::init(false), cl::Hidden,
245     cl::desc("Enable vectorization on masked interleaved memory accesses in a loop"));
246 
247 static cl::opt<unsigned> TinyTripCountInterleaveThreshold(
248     "tiny-trip-count-interleave-threshold", cl::init(128), cl::Hidden,
249     cl::desc("We don't interleave loops with a estimated constant trip count "
250              "below this number"));
251 
252 static cl::opt<unsigned> ForceTargetNumScalarRegs(
253     "force-target-num-scalar-regs", cl::init(0), cl::Hidden,
254     cl::desc("A flag that overrides the target's number of scalar registers."));
255 
256 static cl::opt<unsigned> ForceTargetNumVectorRegs(
257     "force-target-num-vector-regs", cl::init(0), cl::Hidden,
258     cl::desc("A flag that overrides the target's number of vector registers."));
259 
260 static cl::opt<unsigned> ForceTargetMaxScalarInterleaveFactor(
261     "force-target-max-scalar-interleave", cl::init(0), cl::Hidden,
262     cl::desc("A flag that overrides the target's max interleave factor for "
263              "scalar loops."));
264 
265 static cl::opt<unsigned> ForceTargetMaxVectorInterleaveFactor(
266     "force-target-max-vector-interleave", cl::init(0), cl::Hidden,
267     cl::desc("A flag that overrides the target's max interleave factor for "
268              "vectorized loops."));
269 
270 static cl::opt<unsigned> ForceTargetInstructionCost(
271     "force-target-instruction-cost", cl::init(0), cl::Hidden,
272     cl::desc("A flag that overrides the target's expected cost for "
273              "an instruction to a single constant value. Mostly "
274              "useful for getting consistent testing."));
275 
276 static cl::opt<bool> ForceTargetSupportsScalableVectors(
277     "force-target-supports-scalable-vectors", cl::init(false), cl::Hidden,
278     cl::desc(
279         "Pretend that scalable vectors are supported, even if the target does "
280         "not support them. This flag should only be used for testing."));
281 
282 static cl::opt<unsigned> SmallLoopCost(
283     "small-loop-cost", cl::init(20), cl::Hidden,
284     cl::desc(
285         "The cost of a loop that is considered 'small' by the interleaver."));
286 
287 static cl::opt<bool> LoopVectorizeWithBlockFrequency(
288     "loop-vectorize-with-block-frequency", cl::init(true), cl::Hidden,
289     cl::desc("Enable the use of the block frequency analysis to access PGO "
290              "heuristics minimizing code growth in cold regions and being more "
291              "aggressive in hot regions."));
292 
293 // Runtime interleave loops for load/store throughput.
294 static cl::opt<bool> EnableLoadStoreRuntimeInterleave(
295     "enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden,
296     cl::desc(
297         "Enable runtime interleaving until load/store ports are saturated"));
298 
299 /// Interleave small loops with scalar reductions.
300 static cl::opt<bool> InterleaveSmallLoopScalarReduction(
301     "interleave-small-loop-scalar-reduction", cl::init(false), cl::Hidden,
302     cl::desc("Enable interleaving for loops with small iteration counts that "
303              "contain scalar reductions to expose ILP."));
304 
305 /// The number of stores in a loop that are allowed to need predication.
306 static cl::opt<unsigned> NumberOfStoresToPredicate(
307     "vectorize-num-stores-pred", cl::init(1), cl::Hidden,
308     cl::desc("Max number of stores to be predicated behind an if."));
309 
310 static cl::opt<bool> EnableIndVarRegisterHeur(
311     "enable-ind-var-reg-heur", cl::init(true), cl::Hidden,
312     cl::desc("Count the induction variable only once when interleaving"));
313 
314 static cl::opt<bool> EnableCondStoresVectorization(
315     "enable-cond-stores-vec", cl::init(true), cl::Hidden,
316     cl::desc("Enable if predication of stores during vectorization."));
317 
318 static cl::opt<unsigned> MaxNestedScalarReductionIC(
319     "max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden,
320     cl::desc("The maximum interleave count to use when interleaving a scalar "
321              "reduction in a nested loop."));
322 
323 static cl::opt<bool>
324     PreferInLoopReductions("prefer-inloop-reductions", cl::init(false),
325                            cl::Hidden,
326                            cl::desc("Prefer in-loop vector reductions, "
327                                     "overriding the targets preference."));
328 
329 static cl::opt<bool> PreferPredicatedReductionSelect(
330     "prefer-predicated-reduction-select", cl::init(false), cl::Hidden,
331     cl::desc(
332         "Prefer predicating a reduction operation over an after loop select."));
333 
334 cl::opt<bool> EnableVPlanNativePath(
335     "enable-vplan-native-path", cl::init(false), cl::Hidden,
336     cl::desc("Enable VPlan-native vectorization path with "
337              "support for outer loop vectorization."));
338 
339 // FIXME: Remove this switch once we have divergence analysis. Currently we
340 // assume divergent non-backedge branches when this switch is true.
341 cl::opt<bool> EnableVPlanPredication(
342     "enable-vplan-predication", cl::init(false), cl::Hidden,
343     cl::desc("Enable VPlan-native vectorization path predicator with "
344              "support for outer loop vectorization."));
345 
346 // This flag enables the stress testing of the VPlan H-CFG construction in the
347 // VPlan-native vectorization path. It must be used in conjuction with
348 // -enable-vplan-native-path. -vplan-verify-hcfg can also be used to enable the
349 // verification of the H-CFGs built.
350 static cl::opt<bool> VPlanBuildStressTest(
351     "vplan-build-stress-test", cl::init(false), cl::Hidden,
352     cl::desc(
353         "Build VPlan for every supported loop nest in the function and bail "
354         "out right after the build (stress test the VPlan H-CFG construction "
355         "in the VPlan-native vectorization path)."));
356 
357 cl::opt<bool> llvm::EnableLoopInterleaving(
358     "interleave-loops", cl::init(true), cl::Hidden,
359     cl::desc("Enable loop interleaving in Loop vectorization passes"));
360 cl::opt<bool> llvm::EnableLoopVectorization(
361     "vectorize-loops", cl::init(true), cl::Hidden,
362     cl::desc("Run the Loop vectorization passes"));
363 
364 /// A helper function that returns the type of loaded or stored value.
365 static Type *getMemInstValueType(Value *I) {
366   assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
367          "Expected Load or Store instruction");
368   if (auto *LI = dyn_cast<LoadInst>(I))
369     return LI->getType();
370   return cast<StoreInst>(I)->getValueOperand()->getType();
371 }
372 
373 /// A helper function that returns true if the given type is irregular. The
374 /// type is irregular if its allocated size doesn't equal the store size of an
375 /// element of the corresponding vector type at the given vectorization factor.
376 static bool hasIrregularType(Type *Ty, const DataLayout &DL, ElementCount VF) {
377   // Determine if an array of VF elements of type Ty is "bitcast compatible"
378   // with a <VF x Ty> vector.
379   if (VF.isVector()) {
380     auto *VectorTy = VectorType::get(Ty, VF);
381     return TypeSize::get(VF.getKnownMinValue() *
382                              DL.getTypeAllocSize(Ty).getFixedValue(),
383                          VF.isScalable()) != DL.getTypeStoreSize(VectorTy);
384   }
385 
386   // If the vectorization factor is one, we just check if an array of type Ty
387   // requires padding between elements.
388   return DL.getTypeAllocSizeInBits(Ty) != DL.getTypeSizeInBits(Ty);
389 }
390 
391 /// A helper function that returns the reciprocal of the block probability of
392 /// predicated blocks. If we return X, we are assuming the predicated block
393 /// will execute once for every X iterations of the loop header.
394 ///
395 /// TODO: We should use actual block probability here, if available. Currently,
396 ///       we always assume predicated blocks have a 50% chance of executing.
397 static unsigned getReciprocalPredBlockProb() { return 2; }
398 
399 /// A helper function that adds a 'fast' flag to floating-point operations.
400 static Value *addFastMathFlag(Value *V) {
401   if (isa<FPMathOperator>(V))
402     cast<Instruction>(V)->setFastMathFlags(FastMathFlags::getFast());
403   return V;
404 }
405 
406 /// A helper function that returns an integer or floating-point constant with
407 /// value C.
408 static Constant *getSignedIntOrFpConstant(Type *Ty, int64_t C) {
409   return Ty->isIntegerTy() ? ConstantInt::getSigned(Ty, C)
410                            : ConstantFP::get(Ty, C);
411 }
412 
413 /// Returns "best known" trip count for the specified loop \p L as defined by
414 /// the following procedure:
415 ///   1) Returns exact trip count if it is known.
416 ///   2) Returns expected trip count according to profile data if any.
417 ///   3) Returns upper bound estimate if it is known.
418 ///   4) Returns None if all of the above failed.
419 static Optional<unsigned> getSmallBestKnownTC(ScalarEvolution &SE, Loop *L) {
420   // Check if exact trip count is known.
421   if (unsigned ExpectedTC = SE.getSmallConstantTripCount(L))
422     return ExpectedTC;
423 
424   // Check if there is an expected trip count available from profile data.
425   if (LoopVectorizeWithBlockFrequency)
426     if (auto EstimatedTC = getLoopEstimatedTripCount(L))
427       return EstimatedTC;
428 
429   // Check if upper bound estimate is known.
430   if (unsigned ExpectedTC = SE.getSmallConstantMaxTripCount(L))
431     return ExpectedTC;
432 
433   return None;
434 }
435 
436 namespace llvm {
437 
438 /// InnerLoopVectorizer vectorizes loops which contain only one basic
439 /// block to a specified vectorization factor (VF).
440 /// This class performs the widening of scalars into vectors, or multiple
441 /// scalars. This class also implements the following features:
442 /// * It inserts an epilogue loop for handling loops that don't have iteration
443 ///   counts that are known to be a multiple of the vectorization factor.
444 /// * It handles the code generation for reduction variables.
445 /// * Scalarization (implementation using scalars) of un-vectorizable
446 ///   instructions.
447 /// InnerLoopVectorizer does not perform any vectorization-legality
448 /// checks, and relies on the caller to check for the different legality
449 /// aspects. The InnerLoopVectorizer relies on the
450 /// LoopVectorizationLegality class to provide information about the induction
451 /// and reduction variables that were found to a given vectorization factor.
452 class InnerLoopVectorizer {
453 public:
454   InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE,
455                       LoopInfo *LI, DominatorTree *DT,
456                       const TargetLibraryInfo *TLI,
457                       const TargetTransformInfo *TTI, AssumptionCache *AC,
458                       OptimizationRemarkEmitter *ORE, ElementCount VecWidth,
459                       unsigned UnrollFactor, LoopVectorizationLegality *LVL,
460                       LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI,
461                       ProfileSummaryInfo *PSI)
462       : OrigLoop(OrigLoop), PSE(PSE), LI(LI), DT(DT), TLI(TLI), TTI(TTI),
463         AC(AC), ORE(ORE), VF(VecWidth), UF(UnrollFactor),
464         Builder(PSE.getSE()->getContext()),
465         VectorLoopValueMap(UnrollFactor, VecWidth), Legal(LVL), Cost(CM),
466         BFI(BFI), PSI(PSI) {
467     // Query this against the original loop and save it here because the profile
468     // of the original loop header may change as the transformation happens.
469     OptForSizeBasedOnProfile = llvm::shouldOptimizeForSize(
470         OrigLoop->getHeader(), PSI, BFI, PGSOQueryType::IRPass);
471   }
472 
473   virtual ~InnerLoopVectorizer() = default;
474 
475   /// Create a new empty loop that will contain vectorized instructions later
476   /// on, while the old loop will be used as the scalar remainder. Control flow
477   /// is generated around the vectorized (and scalar epilogue) loops consisting
478   /// of various checks and bypasses. Return the pre-header block of the new
479   /// loop.
480   /// In the case of epilogue vectorization, this function is overriden to
481   /// handle the more complex control flow around the loops.
482   virtual BasicBlock *createVectorizedLoopSkeleton();
483 
484   /// Widen a single instruction within the innermost loop.
485   void widenInstruction(Instruction &I, VPValue *Def, VPUser &Operands,
486                         VPTransformState &State);
487 
488   /// Widen a single call instruction within the innermost loop.
489   void widenCallInstruction(CallInst &I, VPValue *Def, VPUser &ArgOperands,
490                             VPTransformState &State);
491 
492   /// Widen a single select instruction within the innermost loop.
493   void widenSelectInstruction(SelectInst &I, VPValue *VPDef, VPUser &Operands,
494                               bool InvariantCond, VPTransformState &State);
495 
496   /// Fix the vectorized code, taking care of header phi's, live-outs, and more.
497   void fixVectorizedLoop(VPTransformState &State);
498 
499   // Return true if any runtime check is added.
500   bool areSafetyChecksAdded() { return AddedSafetyChecks; }
501 
502   /// A type for vectorized values in the new loop. Each value from the
503   /// original loop, when vectorized, is represented by UF vector values in the
504   /// new unrolled loop, where UF is the unroll factor.
505   using VectorParts = SmallVector<Value *, 2>;
506 
507   /// Vectorize a single GetElementPtrInst based on information gathered and
508   /// decisions taken during planning.
509   void widenGEP(GetElementPtrInst *GEP, VPValue *VPDef, VPUser &Indices,
510                 unsigned UF, ElementCount VF, bool IsPtrLoopInvariant,
511                 SmallBitVector &IsIndexLoopInvariant, VPTransformState &State);
512 
513   /// Vectorize a single PHINode in a block. This method handles the induction
514   /// variable canonicalization. It supports both VF = 1 for unrolled loops and
515   /// arbitrary length vectors.
516   void widenPHIInstruction(Instruction *PN, RecurrenceDescriptor *RdxDesc,
517                            Value *StartV, VPValue *Def,
518                            VPTransformState &State);
519 
520   /// A helper function to scalarize a single Instruction in the innermost loop.
521   /// Generates a sequence of scalar instances for each lane between \p MinLane
522   /// and \p MaxLane, times each part between \p MinPart and \p MaxPart,
523   /// inclusive. Uses the VPValue operands from \p Operands instead of \p
524   /// Instr's operands.
525   void scalarizeInstruction(Instruction *Instr, VPValue *Def, VPUser &Operands,
526                             const VPIteration &Instance, bool IfPredicateInstr,
527                             VPTransformState &State);
528 
529   /// Widen an integer or floating-point induction variable \p IV. If \p Trunc
530   /// is provided, the integer induction variable will first be truncated to
531   /// the corresponding type.
532   void widenIntOrFpInduction(PHINode *IV, Value *Start, TruncInst *Trunc,
533                              VPValue *Def, VPValue *CastDef,
534                              VPTransformState &State);
535 
536   /// getOrCreateVectorValue and getOrCreateScalarValue coordinate to generate a
537   /// vector or scalar value on-demand if one is not yet available. When
538   /// vectorizing a loop, we visit the definition of an instruction before its
539   /// uses. When visiting the definition, we either vectorize or scalarize the
540   /// instruction, creating an entry for it in the corresponding map. (In some
541   /// cases, such as induction variables, we will create both vector and scalar
542   /// entries.) Then, as we encounter uses of the definition, we derive values
543   /// for each scalar or vector use unless such a value is already available.
544   /// For example, if we scalarize a definition and one of its uses is vector,
545   /// we build the required vector on-demand with an insertelement sequence
546   /// when visiting the use. Otherwise, if the use is scalar, we can use the
547   /// existing scalar definition.
548   ///
549   /// Return a value in the new loop corresponding to \p V from the original
550   /// loop at unroll index \p Part. If the value has already been vectorized,
551   /// the corresponding vector entry in VectorLoopValueMap is returned. If,
552   /// however, the value has a scalar entry in VectorLoopValueMap, we construct
553   /// a new vector value on-demand by inserting the scalar values into a vector
554   /// with an insertelement sequence. If the value has been neither vectorized
555   /// nor scalarized, it must be loop invariant, so we simply broadcast the
556   /// value into a vector.
557   Value *getOrCreateVectorValue(Value *V, unsigned Part);
558 
559   void setVectorValue(Value *Scalar, unsigned Part, Value *Vector) {
560     VectorLoopValueMap.setVectorValue(Scalar, Part, Vector);
561   }
562 
563   void resetVectorValue(Value *Scalar, unsigned Part, Value *Vector) {
564     VectorLoopValueMap.resetVectorValue(Scalar, Part, Vector);
565   }
566 
567   void setScalarValue(Value *Scalar, const VPIteration &Instance, Value *V) {
568     VectorLoopValueMap.setScalarValue(Scalar, Instance, V);
569   }
570 
571   /// Return a value in the new loop corresponding to \p V from the original
572   /// loop at unroll and vector indices \p Instance. If the value has been
573   /// vectorized but not scalarized, the necessary extractelement instruction
574   /// will be generated.
575   Value *getOrCreateScalarValue(Value *V, const VPIteration &Instance);
576 
577   /// Construct the vector value of a scalarized value \p V one lane at a time.
578   void packScalarIntoVectorValue(Value *V, const VPIteration &Instance);
579 
580   void packScalarIntoVectorValue(VPValue *Def, const VPIteration &Instance,
581                                  VPTransformState &State);
582 
583   /// Try to vectorize interleaved access group \p Group with the base address
584   /// given in \p Addr, optionally masking the vector operations if \p
585   /// BlockInMask is non-null. Use \p State to translate given VPValues to IR
586   /// values in the vectorized loop.
587   void vectorizeInterleaveGroup(const InterleaveGroup<Instruction> *Group,
588                                 ArrayRef<VPValue *> VPDefs,
589                                 VPTransformState &State, VPValue *Addr,
590                                 ArrayRef<VPValue *> StoredValues,
591                                 VPValue *BlockInMask = nullptr);
592 
593   /// Vectorize Load and Store instructions with the base address given in \p
594   /// Addr, optionally masking the vector operations if \p BlockInMask is
595   /// non-null. Use \p State to translate given VPValues to IR values in the
596   /// vectorized loop.
597   void vectorizeMemoryInstruction(Instruction *Instr, VPTransformState &State,
598                                   VPValue *Def, VPValue *Addr,
599                                   VPValue *StoredValue, VPValue *BlockInMask);
600 
601   /// Set the debug location in the builder using the debug location in
602   /// the instruction.
603   void setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr);
604 
605   /// Fix the non-induction PHIs in the OrigPHIsToFix vector.
606   void fixNonInductionPHIs(VPTransformState &State);
607 
608   /// Create a broadcast instruction. This method generates a broadcast
609   /// instruction (shuffle) for loop invariant values and for the induction
610   /// value. If this is the induction variable then we extend it to N, N+1, ...
611   /// this is needed because each iteration in the loop corresponds to a SIMD
612   /// element.
613   virtual Value *getBroadcastInstrs(Value *V);
614 
615 protected:
616   friend class LoopVectorizationPlanner;
617 
618   /// A small list of PHINodes.
619   using PhiVector = SmallVector<PHINode *, 4>;
620 
621   /// A type for scalarized values in the new loop. Each value from the
622   /// original loop, when scalarized, is represented by UF x VF scalar values
623   /// in the new unrolled loop, where UF is the unroll factor and VF is the
624   /// vectorization factor.
625   using ScalarParts = SmallVector<SmallVector<Value *, 4>, 2>;
626 
627   /// Set up the values of the IVs correctly when exiting the vector loop.
628   void fixupIVUsers(PHINode *OrigPhi, const InductionDescriptor &II,
629                     Value *CountRoundDown, Value *EndValue,
630                     BasicBlock *MiddleBlock);
631 
632   /// Create a new induction variable inside L.
633   PHINode *createInductionVariable(Loop *L, Value *Start, Value *End,
634                                    Value *Step, Instruction *DL);
635 
636   /// Handle all cross-iteration phis in the header.
637   void fixCrossIterationPHIs(VPTransformState &State);
638 
639   /// Fix a first-order recurrence. This is the second phase of vectorizing
640   /// this phi node.
641   void fixFirstOrderRecurrence(PHINode *Phi, VPTransformState &State);
642 
643   /// Fix a reduction cross-iteration phi. This is the second phase of
644   /// vectorizing this phi node.
645   void fixReduction(PHINode *Phi, VPTransformState &State);
646 
647   /// Clear NSW/NUW flags from reduction instructions if necessary.
648   void clearReductionWrapFlags(RecurrenceDescriptor &RdxDesc);
649 
650   /// Fixup the LCSSA phi nodes in the unique exit block.  This simply
651   /// means we need to add the appropriate incoming value from the middle
652   /// block as exiting edges from the scalar epilogue loop (if present) are
653   /// already in place, and we exit the vector loop exclusively to the middle
654   /// block.
655   void fixLCSSAPHIs(VPTransformState &State);
656 
657   /// Iteratively sink the scalarized operands of a predicated instruction into
658   /// the block that was created for it.
659   void sinkScalarOperands(Instruction *PredInst);
660 
661   /// Shrinks vector element sizes to the smallest bitwidth they can be legally
662   /// represented as.
663   void truncateToMinimalBitwidths();
664 
665   /// This function adds (StartIdx, StartIdx + Step, StartIdx + 2*Step, ...)
666   /// to each vector element of Val. The sequence starts at StartIndex.
667   /// \p Opcode is relevant for FP induction variable.
668   virtual Value *getStepVector(Value *Val, int StartIdx, Value *Step,
669                                Instruction::BinaryOps Opcode =
670                                Instruction::BinaryOpsEnd);
671 
672   /// Compute scalar induction steps. \p ScalarIV is the scalar induction
673   /// variable on which to base the steps, \p Step is the size of the step, and
674   /// \p EntryVal is the value from the original loop that maps to the steps.
675   /// Note that \p EntryVal doesn't have to be an induction variable - it
676   /// can also be a truncate instruction.
677   void buildScalarSteps(Value *ScalarIV, Value *Step, Instruction *EntryVal,
678                         const InductionDescriptor &ID, VPValue *Def,
679                         VPValue *CastDef, VPTransformState &State);
680 
681   /// Create a vector induction phi node based on an existing scalar one. \p
682   /// EntryVal is the value from the original loop that maps to the vector phi
683   /// node, and \p Step is the loop-invariant step. If \p EntryVal is a
684   /// truncate instruction, instead of widening the original IV, we widen a
685   /// version of the IV truncated to \p EntryVal's type.
686   void createVectorIntOrFpInductionPHI(const InductionDescriptor &II,
687                                        Value *Step, Value *Start,
688                                        Instruction *EntryVal, VPValue *Def,
689                                        VPValue *CastDef,
690                                        VPTransformState &State);
691 
692   /// Returns true if an instruction \p I should be scalarized instead of
693   /// vectorized for the chosen vectorization factor.
694   bool shouldScalarizeInstruction(Instruction *I) const;
695 
696   /// Returns true if we should generate a scalar version of \p IV.
697   bool needsScalarInduction(Instruction *IV) const;
698 
699   /// If there is a cast involved in the induction variable \p ID, which should
700   /// be ignored in the vectorized loop body, this function records the
701   /// VectorLoopValue of the respective Phi also as the VectorLoopValue of the
702   /// cast. We had already proved that the casted Phi is equal to the uncasted
703   /// Phi in the vectorized loop (under a runtime guard), and therefore
704   /// there is no need to vectorize the cast - the same value can be used in the
705   /// vector loop for both the Phi and the cast.
706   /// If \p VectorLoopValue is a scalarized value, \p Lane is also specified,
707   /// Otherwise, \p VectorLoopValue is a widened/vectorized value.
708   ///
709   /// \p EntryVal is the value from the original loop that maps to the vector
710   /// phi node and is used to distinguish what is the IV currently being
711   /// processed - original one (if \p EntryVal is a phi corresponding to the
712   /// original IV) or the "newly-created" one based on the proof mentioned above
713   /// (see also buildScalarSteps() and createVectorIntOrFPInductionPHI()). In the
714   /// latter case \p EntryVal is a TruncInst and we must not record anything for
715   /// that IV, but it's error-prone to expect callers of this routine to care
716   /// about that, hence this explicit parameter.
717   void recordVectorLoopValueForInductionCast(
718       const InductionDescriptor &ID, const Instruction *EntryVal,
719       Value *VectorLoopValue, VPValue *CastDef, VPTransformState &State,
720       unsigned Part, unsigned Lane = UINT_MAX);
721 
722   /// Generate a shuffle sequence that will reverse the vector Vec.
723   virtual Value *reverseVector(Value *Vec);
724 
725   /// Returns (and creates if needed) the original loop trip count.
726   Value *getOrCreateTripCount(Loop *NewLoop);
727 
728   /// Returns (and creates if needed) the trip count of the widened loop.
729   Value *getOrCreateVectorTripCount(Loop *NewLoop);
730 
731   /// Returns a bitcasted value to the requested vector type.
732   /// Also handles bitcasts of vector<float> <-> vector<pointer> types.
733   Value *createBitOrPointerCast(Value *V, VectorType *DstVTy,
734                                 const DataLayout &DL);
735 
736   /// Emit a bypass check to see if the vector trip count is zero, including if
737   /// it overflows.
738   void emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass);
739 
740   /// Emit a bypass check to see if all of the SCEV assumptions we've
741   /// had to make are correct.
742   void emitSCEVChecks(Loop *L, BasicBlock *Bypass);
743 
744   /// Emit bypass checks to check any memory assumptions we may have made.
745   void emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass);
746 
747   /// Compute the transformed value of Index at offset StartValue using step
748   /// StepValue.
749   /// For integer induction, returns StartValue + Index * StepValue.
750   /// For pointer induction, returns StartValue[Index * StepValue].
751   /// FIXME: The newly created binary instructions should contain nsw/nuw
752   /// flags, which can be found from the original scalar operations.
753   Value *emitTransformedIndex(IRBuilder<> &B, Value *Index, ScalarEvolution *SE,
754                               const DataLayout &DL,
755                               const InductionDescriptor &ID) const;
756 
757   /// Emit basic blocks (prefixed with \p Prefix) for the iteration check,
758   /// vector loop preheader, middle block and scalar preheader. Also
759   /// allocate a loop object for the new vector loop and return it.
760   Loop *createVectorLoopSkeleton(StringRef Prefix);
761 
762   /// Create new phi nodes for the induction variables to resume iteration count
763   /// in the scalar epilogue, from where the vectorized loop left off (given by
764   /// \p VectorTripCount).
765   /// In cases where the loop skeleton is more complicated (eg. epilogue
766   /// vectorization) and the resume values can come from an additional bypass
767   /// block, the \p AdditionalBypass pair provides information about the bypass
768   /// block and the end value on the edge from bypass to this loop.
769   void createInductionResumeValues(
770       Loop *L, Value *VectorTripCount,
771       std::pair<BasicBlock *, Value *> AdditionalBypass = {nullptr, nullptr});
772 
773   /// Complete the loop skeleton by adding debug MDs, creating appropriate
774   /// conditional branches in the middle block, preparing the builder and
775   /// running the verifier. Take in the vector loop \p L as argument, and return
776   /// the preheader of the completed vector loop.
777   BasicBlock *completeLoopSkeleton(Loop *L, MDNode *OrigLoopID);
778 
779   /// Add additional metadata to \p To that was not present on \p Orig.
780   ///
781   /// Currently this is used to add the noalias annotations based on the
782   /// inserted memchecks.  Use this for instructions that are *cloned* into the
783   /// vector loop.
784   void addNewMetadata(Instruction *To, const Instruction *Orig);
785 
786   /// Add metadata from one instruction to another.
787   ///
788   /// This includes both the original MDs from \p From and additional ones (\see
789   /// addNewMetadata).  Use this for *newly created* instructions in the vector
790   /// loop.
791   void addMetadata(Instruction *To, Instruction *From);
792 
793   /// Similar to the previous function but it adds the metadata to a
794   /// vector of instructions.
795   void addMetadata(ArrayRef<Value *> To, Instruction *From);
796 
797   /// Allow subclasses to override and print debug traces before/after vplan
798   /// execution, when trace information is requested.
799   virtual void printDebugTracesAtStart(){};
800   virtual void printDebugTracesAtEnd(){};
801 
802   /// The original loop.
803   Loop *OrigLoop;
804 
805   /// A wrapper around ScalarEvolution used to add runtime SCEV checks. Applies
806   /// dynamic knowledge to simplify SCEV expressions and converts them to a
807   /// more usable form.
808   PredicatedScalarEvolution &PSE;
809 
810   /// Loop Info.
811   LoopInfo *LI;
812 
813   /// Dominator Tree.
814   DominatorTree *DT;
815 
816   /// Alias Analysis.
817   AAResults *AA;
818 
819   /// Target Library Info.
820   const TargetLibraryInfo *TLI;
821 
822   /// Target Transform Info.
823   const TargetTransformInfo *TTI;
824 
825   /// Assumption Cache.
826   AssumptionCache *AC;
827 
828   /// Interface to emit optimization remarks.
829   OptimizationRemarkEmitter *ORE;
830 
831   /// LoopVersioning.  It's only set up (non-null) if memchecks were
832   /// used.
833   ///
834   /// This is currently only used to add no-alias metadata based on the
835   /// memchecks.  The actually versioning is performed manually.
836   std::unique_ptr<LoopVersioning> LVer;
837 
838   /// The vectorization SIMD factor to use. Each vector will have this many
839   /// vector elements.
840   ElementCount VF;
841 
842   /// The vectorization unroll factor to use. Each scalar is vectorized to this
843   /// many different vector instructions.
844   unsigned UF;
845 
846   /// The builder that we use
847   IRBuilder<> Builder;
848 
849   // --- Vectorization state ---
850 
851   /// The vector-loop preheader.
852   BasicBlock *LoopVectorPreHeader;
853 
854   /// The scalar-loop preheader.
855   BasicBlock *LoopScalarPreHeader;
856 
857   /// Middle Block between the vector and the scalar.
858   BasicBlock *LoopMiddleBlock;
859 
860   /// The (unique) ExitBlock of the scalar loop.  Note that
861   /// there can be multiple exiting edges reaching this block.
862   BasicBlock *LoopExitBlock;
863 
864   /// The vector loop body.
865   BasicBlock *LoopVectorBody;
866 
867   /// The scalar loop body.
868   BasicBlock *LoopScalarBody;
869 
870   /// A list of all bypass blocks. The first block is the entry of the loop.
871   SmallVector<BasicBlock *, 4> LoopBypassBlocks;
872 
873   /// The new Induction variable which was added to the new block.
874   PHINode *Induction = nullptr;
875 
876   /// The induction variable of the old basic block.
877   PHINode *OldInduction = nullptr;
878 
879   /// Maps values from the original loop to their corresponding values in the
880   /// vectorized loop. A key value can map to either vector values, scalar
881   /// values or both kinds of values, depending on whether the key was
882   /// vectorized and scalarized.
883   VectorizerValueMap VectorLoopValueMap;
884 
885   /// Store instructions that were predicated.
886   SmallVector<Instruction *, 4> PredicatedInstructions;
887 
888   /// Trip count of the original loop.
889   Value *TripCount = nullptr;
890 
891   /// Trip count of the widened loop (TripCount - TripCount % (VF*UF))
892   Value *VectorTripCount = nullptr;
893 
894   /// The legality analysis.
895   LoopVectorizationLegality *Legal;
896 
897   /// The profitablity analysis.
898   LoopVectorizationCostModel *Cost;
899 
900   // Record whether runtime checks are added.
901   bool AddedSafetyChecks = false;
902 
903   // Holds the end values for each induction variable. We save the end values
904   // so we can later fix-up the external users of the induction variables.
905   DenseMap<PHINode *, Value *> IVEndValues;
906 
907   // Vector of original scalar PHIs whose corresponding widened PHIs need to be
908   // fixed up at the end of vector code generation.
909   SmallVector<PHINode *, 8> OrigPHIsToFix;
910 
911   /// BFI and PSI are used to check for profile guided size optimizations.
912   BlockFrequencyInfo *BFI;
913   ProfileSummaryInfo *PSI;
914 
915   // Whether this loop should be optimized for size based on profile guided size
916   // optimizatios.
917   bool OptForSizeBasedOnProfile;
918 };
919 
920 class InnerLoopUnroller : public InnerLoopVectorizer {
921 public:
922   InnerLoopUnroller(Loop *OrigLoop, PredicatedScalarEvolution &PSE,
923                     LoopInfo *LI, DominatorTree *DT,
924                     const TargetLibraryInfo *TLI,
925                     const TargetTransformInfo *TTI, AssumptionCache *AC,
926                     OptimizationRemarkEmitter *ORE, unsigned UnrollFactor,
927                     LoopVectorizationLegality *LVL,
928                     LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI,
929                     ProfileSummaryInfo *PSI)
930       : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE,
931                             ElementCount::getFixed(1), UnrollFactor, LVL, CM,
932                             BFI, PSI) {}
933 
934 private:
935   Value *getBroadcastInstrs(Value *V) override;
936   Value *getStepVector(Value *Val, int StartIdx, Value *Step,
937                        Instruction::BinaryOps Opcode =
938                        Instruction::BinaryOpsEnd) override;
939   Value *reverseVector(Value *Vec) override;
940 };
941 
942 /// Encapsulate information regarding vectorization of a loop and its epilogue.
943 /// This information is meant to be updated and used across two stages of
944 /// epilogue vectorization.
945 struct EpilogueLoopVectorizationInfo {
946   ElementCount MainLoopVF = ElementCount::getFixed(0);
947   unsigned MainLoopUF = 0;
948   ElementCount EpilogueVF = ElementCount::getFixed(0);
949   unsigned EpilogueUF = 0;
950   BasicBlock *MainLoopIterationCountCheck = nullptr;
951   BasicBlock *EpilogueIterationCountCheck = nullptr;
952   BasicBlock *SCEVSafetyCheck = nullptr;
953   BasicBlock *MemSafetyCheck = nullptr;
954   Value *TripCount = nullptr;
955   Value *VectorTripCount = nullptr;
956 
957   EpilogueLoopVectorizationInfo(unsigned MVF, unsigned MUF, unsigned EVF,
958                                 unsigned EUF)
959       : MainLoopVF(ElementCount::getFixed(MVF)), MainLoopUF(MUF),
960         EpilogueVF(ElementCount::getFixed(EVF)), EpilogueUF(EUF) {
961     assert(EUF == 1 &&
962            "A high UF for the epilogue loop is likely not beneficial.");
963   }
964 };
965 
966 /// An extension of the inner loop vectorizer that creates a skeleton for a
967 /// vectorized loop that has its epilogue (residual) also vectorized.
968 /// The idea is to run the vplan on a given loop twice, firstly to setup the
969 /// skeleton and vectorize the main loop, and secondly to complete the skeleton
970 /// from the first step and vectorize the epilogue.  This is achieved by
971 /// deriving two concrete strategy classes from this base class and invoking
972 /// them in succession from the loop vectorizer planner.
973 class InnerLoopAndEpilogueVectorizer : public InnerLoopVectorizer {
974 public:
975   InnerLoopAndEpilogueVectorizer(
976       Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI,
977       DominatorTree *DT, const TargetLibraryInfo *TLI,
978       const TargetTransformInfo *TTI, AssumptionCache *AC,
979       OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI,
980       LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM,
981       BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI)
982       : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE,
983                             EPI.MainLoopVF, EPI.MainLoopUF, LVL, CM, BFI, PSI),
984         EPI(EPI) {}
985 
986   // Override this function to handle the more complex control flow around the
987   // three loops.
988   BasicBlock *createVectorizedLoopSkeleton() final override {
989     return createEpilogueVectorizedLoopSkeleton();
990   }
991 
992   /// The interface for creating a vectorized skeleton using one of two
993   /// different strategies, each corresponding to one execution of the vplan
994   /// as described above.
995   virtual BasicBlock *createEpilogueVectorizedLoopSkeleton() = 0;
996 
997   /// Holds and updates state information required to vectorize the main loop
998   /// and its epilogue in two separate passes. This setup helps us avoid
999   /// regenerating and recomputing runtime safety checks. It also helps us to
1000   /// shorten the iteration-count-check path length for the cases where the
1001   /// iteration count of the loop is so small that the main vector loop is
1002   /// completely skipped.
1003   EpilogueLoopVectorizationInfo &EPI;
1004 };
1005 
1006 /// A specialized derived class of inner loop vectorizer that performs
1007 /// vectorization of *main* loops in the process of vectorizing loops and their
1008 /// epilogues.
1009 class EpilogueVectorizerMainLoop : public InnerLoopAndEpilogueVectorizer {
1010 public:
1011   EpilogueVectorizerMainLoop(
1012       Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI,
1013       DominatorTree *DT, const TargetLibraryInfo *TLI,
1014       const TargetTransformInfo *TTI, AssumptionCache *AC,
1015       OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI,
1016       LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM,
1017       BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI)
1018       : InnerLoopAndEpilogueVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE,
1019                                        EPI, LVL, CM, BFI, PSI) {}
1020   /// Implements the interface for creating a vectorized skeleton using the
1021   /// *main loop* strategy (ie the first pass of vplan execution).
1022   BasicBlock *createEpilogueVectorizedLoopSkeleton() final override;
1023 
1024 protected:
1025   /// Emits an iteration count bypass check once for the main loop (when \p
1026   /// ForEpilogue is false) and once for the epilogue loop (when \p
1027   /// ForEpilogue is true).
1028   BasicBlock *emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass,
1029                                              bool ForEpilogue);
1030   void printDebugTracesAtStart() override;
1031   void printDebugTracesAtEnd() override;
1032 };
1033 
1034 // A specialized derived class of inner loop vectorizer that performs
1035 // vectorization of *epilogue* loops in the process of vectorizing loops and
1036 // their epilogues.
1037 class EpilogueVectorizerEpilogueLoop : public InnerLoopAndEpilogueVectorizer {
1038 public:
1039   EpilogueVectorizerEpilogueLoop(Loop *OrigLoop, PredicatedScalarEvolution &PSE,
1040                     LoopInfo *LI, DominatorTree *DT,
1041                     const TargetLibraryInfo *TLI,
1042                     const TargetTransformInfo *TTI, AssumptionCache *AC,
1043                     OptimizationRemarkEmitter *ORE,
1044                     EpilogueLoopVectorizationInfo &EPI,
1045                     LoopVectorizationLegality *LVL,
1046                     llvm::LoopVectorizationCostModel *CM,
1047                     BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI)
1048       : InnerLoopAndEpilogueVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE,
1049                                        EPI, LVL, CM, BFI, PSI) {}
1050   /// Implements the interface for creating a vectorized skeleton using the
1051   /// *epilogue loop* strategy (ie the second pass of vplan execution).
1052   BasicBlock *createEpilogueVectorizedLoopSkeleton() final override;
1053 
1054 protected:
1055   /// Emits an iteration count bypass check after the main vector loop has
1056   /// finished to see if there are any iterations left to execute by either
1057   /// the vector epilogue or the scalar epilogue.
1058   BasicBlock *emitMinimumVectorEpilogueIterCountCheck(Loop *L,
1059                                                       BasicBlock *Bypass,
1060                                                       BasicBlock *Insert);
1061   void printDebugTracesAtStart() override;
1062   void printDebugTracesAtEnd() override;
1063 };
1064 } // end namespace llvm
1065 
1066 /// Look for a meaningful debug location on the instruction or it's
1067 /// operands.
1068 static Instruction *getDebugLocFromInstOrOperands(Instruction *I) {
1069   if (!I)
1070     return I;
1071 
1072   DebugLoc Empty;
1073   if (I->getDebugLoc() != Empty)
1074     return I;
1075 
1076   for (Use &Op : I->operands()) {
1077     if (Instruction *OpInst = dyn_cast<Instruction>(Op))
1078       if (OpInst->getDebugLoc() != Empty)
1079         return OpInst;
1080   }
1081 
1082   return I;
1083 }
1084 
1085 void InnerLoopVectorizer::setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr) {
1086   if (const Instruction *Inst = dyn_cast_or_null<Instruction>(Ptr)) {
1087     const DILocation *DIL = Inst->getDebugLoc();
1088     if (DIL && Inst->getFunction()->isDebugInfoForProfiling() &&
1089         !isa<DbgInfoIntrinsic>(Inst)) {
1090       assert(!VF.isScalable() && "scalable vectors not yet supported.");
1091       auto NewDIL =
1092           DIL->cloneByMultiplyingDuplicationFactor(UF * VF.getKnownMinValue());
1093       if (NewDIL)
1094         B.SetCurrentDebugLocation(NewDIL.getValue());
1095       else
1096         LLVM_DEBUG(dbgs()
1097                    << "Failed to create new discriminator: "
1098                    << DIL->getFilename() << " Line: " << DIL->getLine());
1099     }
1100     else
1101       B.SetCurrentDebugLocation(DIL);
1102   } else
1103     B.SetCurrentDebugLocation(DebugLoc());
1104 }
1105 
1106 /// Write a record \p DebugMsg about vectorization failure to the debug
1107 /// output stream. If \p I is passed, it is an instruction that prevents
1108 /// vectorization.
1109 #ifndef NDEBUG
1110 static void debugVectorizationFailure(const StringRef DebugMsg,
1111     Instruction *I) {
1112   dbgs() << "LV: Not vectorizing: " << DebugMsg;
1113   if (I != nullptr)
1114     dbgs() << " " << *I;
1115   else
1116     dbgs() << '.';
1117   dbgs() << '\n';
1118 }
1119 #endif
1120 
1121 /// Create an analysis remark that explains why vectorization failed
1122 ///
1123 /// \p PassName is the name of the pass (e.g. can be AlwaysPrint).  \p
1124 /// RemarkName is the identifier for the remark.  If \p I is passed it is an
1125 /// instruction that prevents vectorization.  Otherwise \p TheLoop is used for
1126 /// the location of the remark.  \return the remark object that can be
1127 /// streamed to.
1128 static OptimizationRemarkAnalysis createLVAnalysis(const char *PassName,
1129     StringRef RemarkName, Loop *TheLoop, Instruction *I) {
1130   Value *CodeRegion = TheLoop->getHeader();
1131   DebugLoc DL = TheLoop->getStartLoc();
1132 
1133   if (I) {
1134     CodeRegion = I->getParent();
1135     // If there is no debug location attached to the instruction, revert back to
1136     // using the loop's.
1137     if (I->getDebugLoc())
1138       DL = I->getDebugLoc();
1139   }
1140 
1141   OptimizationRemarkAnalysis R(PassName, RemarkName, DL, CodeRegion);
1142   R << "loop not vectorized: ";
1143   return R;
1144 }
1145 
1146 /// Return a value for Step multiplied by VF.
1147 static Value *createStepForVF(IRBuilder<> &B, Constant *Step, ElementCount VF) {
1148   assert(isa<ConstantInt>(Step) && "Expected an integer step");
1149   Constant *StepVal = ConstantInt::get(
1150       Step->getType(),
1151       cast<ConstantInt>(Step)->getSExtValue() * VF.getKnownMinValue());
1152   return VF.isScalable() ? B.CreateVScale(StepVal) : StepVal;
1153 }
1154 
1155 namespace llvm {
1156 
1157 void reportVectorizationFailure(const StringRef DebugMsg,
1158     const StringRef OREMsg, const StringRef ORETag,
1159     OptimizationRemarkEmitter *ORE, Loop *TheLoop, Instruction *I) {
1160   LLVM_DEBUG(debugVectorizationFailure(DebugMsg, I));
1161   LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE);
1162   ORE->emit(createLVAnalysis(Hints.vectorizeAnalysisPassName(),
1163                 ORETag, TheLoop, I) << OREMsg);
1164 }
1165 
1166 } // end namespace llvm
1167 
1168 #ifndef NDEBUG
1169 /// \return string containing a file name and a line # for the given loop.
1170 static std::string getDebugLocString(const Loop *L) {
1171   std::string Result;
1172   if (L) {
1173     raw_string_ostream OS(Result);
1174     if (const DebugLoc LoopDbgLoc = L->getStartLoc())
1175       LoopDbgLoc.print(OS);
1176     else
1177       // Just print the module name.
1178       OS << L->getHeader()->getParent()->getParent()->getModuleIdentifier();
1179     OS.flush();
1180   }
1181   return Result;
1182 }
1183 #endif
1184 
1185 void InnerLoopVectorizer::addNewMetadata(Instruction *To,
1186                                          const Instruction *Orig) {
1187   // If the loop was versioned with memchecks, add the corresponding no-alias
1188   // metadata.
1189   if (LVer && (isa<LoadInst>(Orig) || isa<StoreInst>(Orig)))
1190     LVer->annotateInstWithNoAlias(To, Orig);
1191 }
1192 
1193 void InnerLoopVectorizer::addMetadata(Instruction *To,
1194                                       Instruction *From) {
1195   propagateMetadata(To, From);
1196   addNewMetadata(To, From);
1197 }
1198 
1199 void InnerLoopVectorizer::addMetadata(ArrayRef<Value *> To,
1200                                       Instruction *From) {
1201   for (Value *V : To) {
1202     if (Instruction *I = dyn_cast<Instruction>(V))
1203       addMetadata(I, From);
1204   }
1205 }
1206 
1207 namespace llvm {
1208 
1209 // Loop vectorization cost-model hints how the scalar epilogue loop should be
1210 // lowered.
1211 enum ScalarEpilogueLowering {
1212 
1213   // The default: allowing scalar epilogues.
1214   CM_ScalarEpilogueAllowed,
1215 
1216   // Vectorization with OptForSize: don't allow epilogues.
1217   CM_ScalarEpilogueNotAllowedOptSize,
1218 
1219   // A special case of vectorisation with OptForSize: loops with a very small
1220   // trip count are considered for vectorization under OptForSize, thereby
1221   // making sure the cost of their loop body is dominant, free of runtime
1222   // guards and scalar iteration overheads.
1223   CM_ScalarEpilogueNotAllowedLowTripLoop,
1224 
1225   // Loop hint predicate indicating an epilogue is undesired.
1226   CM_ScalarEpilogueNotNeededUsePredicate,
1227 
1228   // Directive indicating we must either tail fold or not vectorize
1229   CM_ScalarEpilogueNotAllowedUsePredicate
1230 };
1231 
1232 /// LoopVectorizationCostModel - estimates the expected speedups due to
1233 /// vectorization.
1234 /// In many cases vectorization is not profitable. This can happen because of
1235 /// a number of reasons. In this class we mainly attempt to predict the
1236 /// expected speedup/slowdowns due to the supported instruction set. We use the
1237 /// TargetTransformInfo to query the different backends for the cost of
1238 /// different operations.
1239 class LoopVectorizationCostModel {
1240 public:
1241   LoopVectorizationCostModel(ScalarEpilogueLowering SEL, Loop *L,
1242                              PredicatedScalarEvolution &PSE, LoopInfo *LI,
1243                              LoopVectorizationLegality *Legal,
1244                              const TargetTransformInfo &TTI,
1245                              const TargetLibraryInfo *TLI, DemandedBits *DB,
1246                              AssumptionCache *AC,
1247                              OptimizationRemarkEmitter *ORE, const Function *F,
1248                              const LoopVectorizeHints *Hints,
1249                              InterleavedAccessInfo &IAI)
1250       : ScalarEpilogueStatus(SEL), TheLoop(L), PSE(PSE), LI(LI), Legal(Legal),
1251         TTI(TTI), TLI(TLI), DB(DB), AC(AC), ORE(ORE), TheFunction(F),
1252         Hints(Hints), InterleaveInfo(IAI) {}
1253 
1254   /// \return An upper bound for the vectorization factor, or None if
1255   /// vectorization and interleaving should be avoided up front.
1256   Optional<ElementCount> computeMaxVF(ElementCount UserVF, unsigned UserIC);
1257 
1258   /// \return True if runtime checks are required for vectorization, and false
1259   /// otherwise.
1260   bool runtimeChecksRequired();
1261 
1262   /// \return The most profitable vectorization factor and the cost of that VF.
1263   /// This method checks every power of two up to MaxVF. If UserVF is not ZERO
1264   /// then this vectorization factor will be selected if vectorization is
1265   /// possible.
1266   VectorizationFactor selectVectorizationFactor(ElementCount MaxVF);
1267   VectorizationFactor
1268   selectEpilogueVectorizationFactor(const ElementCount MaxVF,
1269                                     const LoopVectorizationPlanner &LVP);
1270 
1271   /// Setup cost-based decisions for user vectorization factor.
1272   void selectUserVectorizationFactor(ElementCount UserVF) {
1273     collectUniformsAndScalars(UserVF);
1274     collectInstsToScalarize(UserVF);
1275   }
1276 
1277   /// \return The size (in bits) of the smallest and widest types in the code
1278   /// that needs to be vectorized. We ignore values that remain scalar such as
1279   /// 64 bit loop indices.
1280   std::pair<unsigned, unsigned> getSmallestAndWidestTypes();
1281 
1282   /// \return The desired interleave count.
1283   /// If interleave count has been specified by metadata it will be returned.
1284   /// Otherwise, the interleave count is computed and returned. VF and LoopCost
1285   /// are the selected vectorization factor and the cost of the selected VF.
1286   unsigned selectInterleaveCount(ElementCount VF, unsigned LoopCost);
1287 
1288   /// Memory access instruction may be vectorized in more than one way.
1289   /// Form of instruction after vectorization depends on cost.
1290   /// This function takes cost-based decisions for Load/Store instructions
1291   /// and collects them in a map. This decisions map is used for building
1292   /// the lists of loop-uniform and loop-scalar instructions.
1293   /// The calculated cost is saved with widening decision in order to
1294   /// avoid redundant calculations.
1295   void setCostBasedWideningDecision(ElementCount VF);
1296 
1297   /// A struct that represents some properties of the register usage
1298   /// of a loop.
1299   struct RegisterUsage {
1300     /// Holds the number of loop invariant values that are used in the loop.
1301     /// The key is ClassID of target-provided register class.
1302     SmallMapVector<unsigned, unsigned, 4> LoopInvariantRegs;
1303     /// Holds the maximum number of concurrent live intervals in the loop.
1304     /// The key is ClassID of target-provided register class.
1305     SmallMapVector<unsigned, unsigned, 4> MaxLocalUsers;
1306   };
1307 
1308   /// \return Returns information about the register usages of the loop for the
1309   /// given vectorization factors.
1310   SmallVector<RegisterUsage, 8>
1311   calculateRegisterUsage(ArrayRef<ElementCount> VFs);
1312 
1313   /// Collect values we want to ignore in the cost model.
1314   void collectValuesToIgnore();
1315 
1316   /// Split reductions into those that happen in the loop, and those that happen
1317   /// outside. In loop reductions are collected into InLoopReductionChains.
1318   void collectInLoopReductions();
1319 
1320   /// \returns The smallest bitwidth each instruction can be represented with.
1321   /// The vector equivalents of these instructions should be truncated to this
1322   /// type.
1323   const MapVector<Instruction *, uint64_t> &getMinimalBitwidths() const {
1324     return MinBWs;
1325   }
1326 
1327   /// \returns True if it is more profitable to scalarize instruction \p I for
1328   /// vectorization factor \p VF.
1329   bool isProfitableToScalarize(Instruction *I, ElementCount VF) const {
1330     assert(VF.isVector() &&
1331            "Profitable to scalarize relevant only for VF > 1.");
1332 
1333     // Cost model is not run in the VPlan-native path - return conservative
1334     // result until this changes.
1335     if (EnableVPlanNativePath)
1336       return false;
1337 
1338     auto Scalars = InstsToScalarize.find(VF);
1339     assert(Scalars != InstsToScalarize.end() &&
1340            "VF not yet analyzed for scalarization profitability");
1341     return Scalars->second.find(I) != Scalars->second.end();
1342   }
1343 
1344   /// Returns true if \p I is known to be uniform after vectorization.
1345   bool isUniformAfterVectorization(Instruction *I, ElementCount VF) const {
1346     if (VF.isScalar())
1347       return true;
1348 
1349     // Cost model is not run in the VPlan-native path - return conservative
1350     // result until this changes.
1351     if (EnableVPlanNativePath)
1352       return false;
1353 
1354     auto UniformsPerVF = Uniforms.find(VF);
1355     assert(UniformsPerVF != Uniforms.end() &&
1356            "VF not yet analyzed for uniformity");
1357     return UniformsPerVF->second.count(I);
1358   }
1359 
1360   /// Returns true if \p I is known to be scalar after vectorization.
1361   bool isScalarAfterVectorization(Instruction *I, ElementCount VF) const {
1362     if (VF.isScalar())
1363       return true;
1364 
1365     // Cost model is not run in the VPlan-native path - return conservative
1366     // result until this changes.
1367     if (EnableVPlanNativePath)
1368       return false;
1369 
1370     auto ScalarsPerVF = Scalars.find(VF);
1371     assert(ScalarsPerVF != Scalars.end() &&
1372            "Scalar values are not calculated for VF");
1373     return ScalarsPerVF->second.count(I);
1374   }
1375 
1376   /// \returns True if instruction \p I can be truncated to a smaller bitwidth
1377   /// for vectorization factor \p VF.
1378   bool canTruncateToMinimalBitwidth(Instruction *I, ElementCount VF) const {
1379     return VF.isVector() && MinBWs.find(I) != MinBWs.end() &&
1380            !isProfitableToScalarize(I, VF) &&
1381            !isScalarAfterVectorization(I, VF);
1382   }
1383 
1384   /// Decision that was taken during cost calculation for memory instruction.
1385   enum InstWidening {
1386     CM_Unknown,
1387     CM_Widen,         // For consecutive accesses with stride +1.
1388     CM_Widen_Reverse, // For consecutive accesses with stride -1.
1389     CM_Interleave,
1390     CM_GatherScatter,
1391     CM_Scalarize
1392   };
1393 
1394   /// Save vectorization decision \p W and \p Cost taken by the cost model for
1395   /// instruction \p I and vector width \p VF.
1396   void setWideningDecision(Instruction *I, ElementCount VF, InstWidening W,
1397                            InstructionCost Cost) {
1398     assert(VF.isVector() && "Expected VF >=2");
1399     WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost);
1400   }
1401 
1402   /// Save vectorization decision \p W and \p Cost taken by the cost model for
1403   /// interleaving group \p Grp and vector width \p VF.
1404   void setWideningDecision(const InterleaveGroup<Instruction> *Grp,
1405                            ElementCount VF, InstWidening W,
1406                            InstructionCost Cost) {
1407     assert(VF.isVector() && "Expected VF >=2");
1408     /// Broadcast this decicion to all instructions inside the group.
1409     /// But the cost will be assigned to one instruction only.
1410     for (unsigned i = 0; i < Grp->getFactor(); ++i) {
1411       if (auto *I = Grp->getMember(i)) {
1412         if (Grp->getInsertPos() == I)
1413           WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost);
1414         else
1415           WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, 0);
1416       }
1417     }
1418   }
1419 
1420   /// Return the cost model decision for the given instruction \p I and vector
1421   /// width \p VF. Return CM_Unknown if this instruction did not pass
1422   /// through the cost modeling.
1423   InstWidening getWideningDecision(Instruction *I, ElementCount VF) {
1424     assert(VF.isVector() && "Expected VF to be a vector VF");
1425     // Cost model is not run in the VPlan-native path - return conservative
1426     // result until this changes.
1427     if (EnableVPlanNativePath)
1428       return CM_GatherScatter;
1429 
1430     std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(I, VF);
1431     auto Itr = WideningDecisions.find(InstOnVF);
1432     if (Itr == WideningDecisions.end())
1433       return CM_Unknown;
1434     return Itr->second.first;
1435   }
1436 
1437   /// Return the vectorization cost for the given instruction \p I and vector
1438   /// width \p VF.
1439   InstructionCost getWideningCost(Instruction *I, ElementCount VF) {
1440     assert(VF.isVector() && "Expected VF >=2");
1441     std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(I, VF);
1442     assert(WideningDecisions.find(InstOnVF) != WideningDecisions.end() &&
1443            "The cost is not calculated");
1444     return WideningDecisions[InstOnVF].second;
1445   }
1446 
1447   /// Return True if instruction \p I is an optimizable truncate whose operand
1448   /// is an induction variable. Such a truncate will be removed by adding a new
1449   /// induction variable with the destination type.
1450   bool isOptimizableIVTruncate(Instruction *I, ElementCount VF) {
1451     // If the instruction is not a truncate, return false.
1452     auto *Trunc = dyn_cast<TruncInst>(I);
1453     if (!Trunc)
1454       return false;
1455 
1456     // Get the source and destination types of the truncate.
1457     Type *SrcTy = ToVectorTy(cast<CastInst>(I)->getSrcTy(), VF);
1458     Type *DestTy = ToVectorTy(cast<CastInst>(I)->getDestTy(), VF);
1459 
1460     // If the truncate is free for the given types, return false. Replacing a
1461     // free truncate with an induction variable would add an induction variable
1462     // update instruction to each iteration of the loop. We exclude from this
1463     // check the primary induction variable since it will need an update
1464     // instruction regardless.
1465     Value *Op = Trunc->getOperand(0);
1466     if (Op != Legal->getPrimaryInduction() && TTI.isTruncateFree(SrcTy, DestTy))
1467       return false;
1468 
1469     // If the truncated value is not an induction variable, return false.
1470     return Legal->isInductionPhi(Op);
1471   }
1472 
1473   /// Collects the instructions to scalarize for each predicated instruction in
1474   /// the loop.
1475   void collectInstsToScalarize(ElementCount VF);
1476 
1477   /// Collect Uniform and Scalar values for the given \p VF.
1478   /// The sets depend on CM decision for Load/Store instructions
1479   /// that may be vectorized as interleave, gather-scatter or scalarized.
1480   void collectUniformsAndScalars(ElementCount VF) {
1481     // Do the analysis once.
1482     if (VF.isScalar() || Uniforms.find(VF) != Uniforms.end())
1483       return;
1484     setCostBasedWideningDecision(VF);
1485     collectLoopUniforms(VF);
1486     collectLoopScalars(VF);
1487   }
1488 
1489   /// Returns true if the target machine supports masked store operation
1490   /// for the given \p DataType and kind of access to \p Ptr.
1491   bool isLegalMaskedStore(Type *DataType, Value *Ptr, Align Alignment) {
1492     return Legal->isConsecutivePtr(Ptr) &&
1493            TTI.isLegalMaskedStore(DataType, Alignment);
1494   }
1495 
1496   /// Returns true if the target machine supports masked load operation
1497   /// for the given \p DataType and kind of access to \p Ptr.
1498   bool isLegalMaskedLoad(Type *DataType, Value *Ptr, Align Alignment) {
1499     return Legal->isConsecutivePtr(Ptr) &&
1500            TTI.isLegalMaskedLoad(DataType, Alignment);
1501   }
1502 
1503   /// Returns true if the target machine supports masked scatter operation
1504   /// for the given \p DataType.
1505   bool isLegalMaskedScatter(Type *DataType, Align Alignment) {
1506     return TTI.isLegalMaskedScatter(DataType, Alignment);
1507   }
1508 
1509   /// Returns true if the target machine supports masked gather operation
1510   /// for the given \p DataType.
1511   bool isLegalMaskedGather(Type *DataType, Align Alignment) {
1512     return TTI.isLegalMaskedGather(DataType, Alignment);
1513   }
1514 
1515   /// Returns true if the target machine can represent \p V as a masked gather
1516   /// or scatter operation.
1517   bool isLegalGatherOrScatter(Value *V) {
1518     bool LI = isa<LoadInst>(V);
1519     bool SI = isa<StoreInst>(V);
1520     if (!LI && !SI)
1521       return false;
1522     auto *Ty = getMemInstValueType(V);
1523     Align Align = getLoadStoreAlignment(V);
1524     return (LI && isLegalMaskedGather(Ty, Align)) ||
1525            (SI && isLegalMaskedScatter(Ty, Align));
1526   }
1527 
1528   /// Returns true if the target machine supports all of the reduction
1529   /// variables found for the given VF.
1530   bool canVectorizeReductions(ElementCount VF) {
1531     return (all_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool {
1532       RecurrenceDescriptor RdxDesc = Reduction.second;
1533       return TTI.isLegalToVectorizeReduction(RdxDesc, VF);
1534     }));
1535   }
1536 
1537   /// Returns true if \p I is an instruction that will be scalarized with
1538   /// predication. Such instructions include conditional stores and
1539   /// instructions that may divide by zero.
1540   /// If a non-zero VF has been calculated, we check if I will be scalarized
1541   /// predication for that VF.
1542   bool isScalarWithPredication(Instruction *I,
1543                                ElementCount VF = ElementCount::getFixed(1));
1544 
1545   // Returns true if \p I is an instruction that will be predicated either
1546   // through scalar predication or masked load/store or masked gather/scatter.
1547   // Superset of instructions that return true for isScalarWithPredication.
1548   bool isPredicatedInst(Instruction *I) {
1549     if (!blockNeedsPredication(I->getParent()))
1550       return false;
1551     // Loads and stores that need some form of masked operation are predicated
1552     // instructions.
1553     if (isa<LoadInst>(I) || isa<StoreInst>(I))
1554       return Legal->isMaskRequired(I);
1555     return isScalarWithPredication(I);
1556   }
1557 
1558   /// Returns true if \p I is a memory instruction with consecutive memory
1559   /// access that can be widened.
1560   bool
1561   memoryInstructionCanBeWidened(Instruction *I,
1562                                 ElementCount VF = ElementCount::getFixed(1));
1563 
1564   /// Returns true if \p I is a memory instruction in an interleaved-group
1565   /// of memory accesses that can be vectorized with wide vector loads/stores
1566   /// and shuffles.
1567   bool
1568   interleavedAccessCanBeWidened(Instruction *I,
1569                                 ElementCount VF = ElementCount::getFixed(1));
1570 
1571   /// Check if \p Instr belongs to any interleaved access group.
1572   bool isAccessInterleaved(Instruction *Instr) {
1573     return InterleaveInfo.isInterleaved(Instr);
1574   }
1575 
1576   /// Get the interleaved access group that \p Instr belongs to.
1577   const InterleaveGroup<Instruction> *
1578   getInterleavedAccessGroup(Instruction *Instr) {
1579     return InterleaveInfo.getInterleaveGroup(Instr);
1580   }
1581 
1582   /// Returns true if we're required to use a scalar epilogue for at least
1583   /// the final iteration of the original loop.
1584   bool requiresScalarEpilogue() const {
1585     if (!isScalarEpilogueAllowed())
1586       return false;
1587     // If we might exit from anywhere but the latch, must run the exiting
1588     // iteration in scalar form.
1589     if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch())
1590       return true;
1591     return InterleaveInfo.requiresScalarEpilogue();
1592   }
1593 
1594   /// Returns true if a scalar epilogue is not allowed due to optsize or a
1595   /// loop hint annotation.
1596   bool isScalarEpilogueAllowed() const {
1597     return ScalarEpilogueStatus == CM_ScalarEpilogueAllowed;
1598   }
1599 
1600   /// Returns true if all loop blocks should be masked to fold tail loop.
1601   bool foldTailByMasking() const { return FoldTailByMasking; }
1602 
1603   bool blockNeedsPredication(BasicBlock *BB) {
1604     return foldTailByMasking() || Legal->blockNeedsPredication(BB);
1605   }
1606 
1607   /// A SmallMapVector to store the InLoop reduction op chains, mapping phi
1608   /// nodes to the chain of instructions representing the reductions. Uses a
1609   /// MapVector to ensure deterministic iteration order.
1610   using ReductionChainMap =
1611       SmallMapVector<PHINode *, SmallVector<Instruction *, 4>, 4>;
1612 
1613   /// Return the chain of instructions representing an inloop reduction.
1614   const ReductionChainMap &getInLoopReductionChains() const {
1615     return InLoopReductionChains;
1616   }
1617 
1618   /// Returns true if the Phi is part of an inloop reduction.
1619   bool isInLoopReduction(PHINode *Phi) const {
1620     return InLoopReductionChains.count(Phi);
1621   }
1622 
1623   /// Estimate cost of an intrinsic call instruction CI if it were vectorized
1624   /// with factor VF.  Return the cost of the instruction, including
1625   /// scalarization overhead if it's needed.
1626   InstructionCost getVectorIntrinsicCost(CallInst *CI, ElementCount VF);
1627 
1628   /// Estimate cost of a call instruction CI if it were vectorized with factor
1629   /// VF. Return the cost of the instruction, including scalarization overhead
1630   /// if it's needed. The flag NeedToScalarize shows if the call needs to be
1631   /// scalarized -
1632   /// i.e. either vector version isn't available, or is too expensive.
1633   InstructionCost getVectorCallCost(CallInst *CI, ElementCount VF,
1634                                     bool &NeedToScalarize);
1635 
1636   /// Invalidates decisions already taken by the cost model.
1637   void invalidateCostModelingDecisions() {
1638     WideningDecisions.clear();
1639     Uniforms.clear();
1640     Scalars.clear();
1641   }
1642 
1643 private:
1644   unsigned NumPredStores = 0;
1645 
1646   /// \return An upper bound for the vectorization factor, a power-of-2 larger
1647   /// than zero. One is returned if vectorization should best be avoided due
1648   /// to cost.
1649   ElementCount computeFeasibleMaxVF(unsigned ConstTripCount,
1650                                     ElementCount UserVF);
1651 
1652   /// The vectorization cost is a combination of the cost itself and a boolean
1653   /// indicating whether any of the contributing operations will actually
1654   /// operate on
1655   /// vector values after type legalization in the backend. If this latter value
1656   /// is
1657   /// false, then all operations will be scalarized (i.e. no vectorization has
1658   /// actually taken place).
1659   using VectorizationCostTy = std::pair<InstructionCost, bool>;
1660 
1661   /// Returns the expected execution cost. The unit of the cost does
1662   /// not matter because we use the 'cost' units to compare different
1663   /// vector widths. The cost that is returned is *not* normalized by
1664   /// the factor width.
1665   VectorizationCostTy expectedCost(ElementCount VF);
1666 
1667   /// Returns the execution time cost of an instruction for a given vector
1668   /// width. Vector width of one means scalar.
1669   VectorizationCostTy getInstructionCost(Instruction *I, ElementCount VF);
1670 
1671   /// The cost-computation logic from getInstructionCost which provides
1672   /// the vector type as an output parameter.
1673   InstructionCost getInstructionCost(Instruction *I, ElementCount VF,
1674                                      Type *&VectorTy);
1675 
1676   /// Return the cost of instructions in an inloop reduction pattern, if I is
1677   /// part of that pattern.
1678   InstructionCost getReductionPatternCost(Instruction *I, ElementCount VF,
1679                                           Type *VectorTy,
1680                                           TTI::TargetCostKind CostKind);
1681 
1682   /// Calculate vectorization cost of memory instruction \p I.
1683   InstructionCost getMemoryInstructionCost(Instruction *I, ElementCount VF);
1684 
1685   /// The cost computation for scalarized memory instruction.
1686   InstructionCost getMemInstScalarizationCost(Instruction *I, ElementCount VF);
1687 
1688   /// The cost computation for interleaving group of memory instructions.
1689   InstructionCost getInterleaveGroupCost(Instruction *I, ElementCount VF);
1690 
1691   /// The cost computation for Gather/Scatter instruction.
1692   InstructionCost getGatherScatterCost(Instruction *I, ElementCount VF);
1693 
1694   /// The cost computation for widening instruction \p I with consecutive
1695   /// memory access.
1696   InstructionCost getConsecutiveMemOpCost(Instruction *I, ElementCount VF);
1697 
1698   /// The cost calculation for Load/Store instruction \p I with uniform pointer -
1699   /// Load: scalar load + broadcast.
1700   /// Store: scalar store + (loop invariant value stored? 0 : extract of last
1701   /// element)
1702   InstructionCost getUniformMemOpCost(Instruction *I, ElementCount VF);
1703 
1704   /// Estimate the overhead of scalarizing an instruction. This is a
1705   /// convenience wrapper for the type-based getScalarizationOverhead API.
1706   InstructionCost getScalarizationOverhead(Instruction *I, ElementCount VF);
1707 
1708   /// Returns whether the instruction is a load or store and will be a emitted
1709   /// as a vector operation.
1710   bool isConsecutiveLoadOrStore(Instruction *I);
1711 
1712   /// Returns true if an artificially high cost for emulated masked memrefs
1713   /// should be used.
1714   bool useEmulatedMaskMemRefHack(Instruction *I);
1715 
1716   /// Map of scalar integer values to the smallest bitwidth they can be legally
1717   /// represented as. The vector equivalents of these values should be truncated
1718   /// to this type.
1719   MapVector<Instruction *, uint64_t> MinBWs;
1720 
1721   /// A type representing the costs for instructions if they were to be
1722   /// scalarized rather than vectorized. The entries are Instruction-Cost
1723   /// pairs.
1724   using ScalarCostsTy = DenseMap<Instruction *, InstructionCost>;
1725 
1726   /// A set containing all BasicBlocks that are known to present after
1727   /// vectorization as a predicated block.
1728   SmallPtrSet<BasicBlock *, 4> PredicatedBBsAfterVectorization;
1729 
1730   /// Records whether it is allowed to have the original scalar loop execute at
1731   /// least once. This may be needed as a fallback loop in case runtime
1732   /// aliasing/dependence checks fail, or to handle the tail/remainder
1733   /// iterations when the trip count is unknown or doesn't divide by the VF,
1734   /// or as a peel-loop to handle gaps in interleave-groups.
1735   /// Under optsize and when the trip count is very small we don't allow any
1736   /// iterations to execute in the scalar loop.
1737   ScalarEpilogueLowering ScalarEpilogueStatus = CM_ScalarEpilogueAllowed;
1738 
1739   /// All blocks of loop are to be masked to fold tail of scalar iterations.
1740   bool FoldTailByMasking = false;
1741 
1742   /// A map holding scalar costs for different vectorization factors. The
1743   /// presence of a cost for an instruction in the mapping indicates that the
1744   /// instruction will be scalarized when vectorizing with the associated
1745   /// vectorization factor. The entries are VF-ScalarCostTy pairs.
1746   DenseMap<ElementCount, ScalarCostsTy> InstsToScalarize;
1747 
1748   /// Holds the instructions known to be uniform after vectorization.
1749   /// The data is collected per VF.
1750   DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Uniforms;
1751 
1752   /// Holds the instructions known to be scalar after vectorization.
1753   /// The data is collected per VF.
1754   DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Scalars;
1755 
1756   /// Holds the instructions (address computations) that are forced to be
1757   /// scalarized.
1758   DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> ForcedScalars;
1759 
1760   /// PHINodes of the reductions that should be expanded in-loop along with
1761   /// their associated chains of reduction operations, in program order from top
1762   /// (PHI) to bottom
1763   ReductionChainMap InLoopReductionChains;
1764 
1765   /// A Map of inloop reduction operations and their immediate chain operand.
1766   /// FIXME: This can be removed once reductions can be costed correctly in
1767   /// vplan. This was added to allow quick lookup to the inloop operations,
1768   /// without having to loop through InLoopReductionChains.
1769   DenseMap<Instruction *, Instruction *> InLoopReductionImmediateChains;
1770 
1771   /// Returns the expected difference in cost from scalarizing the expression
1772   /// feeding a predicated instruction \p PredInst. The instructions to
1773   /// scalarize and their scalar costs are collected in \p ScalarCosts. A
1774   /// non-negative return value implies the expression will be scalarized.
1775   /// Currently, only single-use chains are considered for scalarization.
1776   int computePredInstDiscount(Instruction *PredInst, ScalarCostsTy &ScalarCosts,
1777                               ElementCount VF);
1778 
1779   /// Collect the instructions that are uniform after vectorization. An
1780   /// instruction is uniform if we represent it with a single scalar value in
1781   /// the vectorized loop corresponding to each vector iteration. Examples of
1782   /// uniform instructions include pointer operands of consecutive or
1783   /// interleaved memory accesses. Note that although uniformity implies an
1784   /// instruction will be scalar, the reverse is not true. In general, a
1785   /// scalarized instruction will be represented by VF scalar values in the
1786   /// vectorized loop, each corresponding to an iteration of the original
1787   /// scalar loop.
1788   void collectLoopUniforms(ElementCount VF);
1789 
1790   /// Collect the instructions that are scalar after vectorization. An
1791   /// instruction is scalar if it is known to be uniform or will be scalarized
1792   /// during vectorization. Non-uniform scalarized instructions will be
1793   /// represented by VF values in the vectorized loop, each corresponding to an
1794   /// iteration of the original scalar loop.
1795   void collectLoopScalars(ElementCount VF);
1796 
1797   /// Keeps cost model vectorization decision and cost for instructions.
1798   /// Right now it is used for memory instructions only.
1799   using DecisionList = DenseMap<std::pair<Instruction *, ElementCount>,
1800                                 std::pair<InstWidening, InstructionCost>>;
1801 
1802   DecisionList WideningDecisions;
1803 
1804   /// Returns true if \p V is expected to be vectorized and it needs to be
1805   /// extracted.
1806   bool needsExtract(Value *V, ElementCount VF) const {
1807     Instruction *I = dyn_cast<Instruction>(V);
1808     if (VF.isScalar() || !I || !TheLoop->contains(I) ||
1809         TheLoop->isLoopInvariant(I))
1810       return false;
1811 
1812     // Assume we can vectorize V (and hence we need extraction) if the
1813     // scalars are not computed yet. This can happen, because it is called
1814     // via getScalarizationOverhead from setCostBasedWideningDecision, before
1815     // the scalars are collected. That should be a safe assumption in most
1816     // cases, because we check if the operands have vectorizable types
1817     // beforehand in LoopVectorizationLegality.
1818     return Scalars.find(VF) == Scalars.end() ||
1819            !isScalarAfterVectorization(I, VF);
1820   };
1821 
1822   /// Returns a range containing only operands needing to be extracted.
1823   SmallVector<Value *, 4> filterExtractingOperands(Instruction::op_range Ops,
1824                                                    ElementCount VF) {
1825     return SmallVector<Value *, 4>(make_filter_range(
1826         Ops, [this, VF](Value *V) { return this->needsExtract(V, VF); }));
1827   }
1828 
1829   /// Determines if we have the infrastructure to vectorize loop \p L and its
1830   /// epilogue, assuming the main loop is vectorized by \p VF.
1831   bool isCandidateForEpilogueVectorization(const Loop &L,
1832                                            const ElementCount VF) const;
1833 
1834   /// Returns true if epilogue vectorization is considered profitable, and
1835   /// false otherwise.
1836   /// \p VF is the vectorization factor chosen for the original loop.
1837   bool isEpilogueVectorizationProfitable(const ElementCount VF) const;
1838 
1839 public:
1840   /// The loop that we evaluate.
1841   Loop *TheLoop;
1842 
1843   /// Predicated scalar evolution analysis.
1844   PredicatedScalarEvolution &PSE;
1845 
1846   /// Loop Info analysis.
1847   LoopInfo *LI;
1848 
1849   /// Vectorization legality.
1850   LoopVectorizationLegality *Legal;
1851 
1852   /// Vector target information.
1853   const TargetTransformInfo &TTI;
1854 
1855   /// Target Library Info.
1856   const TargetLibraryInfo *TLI;
1857 
1858   /// Demanded bits analysis.
1859   DemandedBits *DB;
1860 
1861   /// Assumption cache.
1862   AssumptionCache *AC;
1863 
1864   /// Interface to emit optimization remarks.
1865   OptimizationRemarkEmitter *ORE;
1866 
1867   const Function *TheFunction;
1868 
1869   /// Loop Vectorize Hint.
1870   const LoopVectorizeHints *Hints;
1871 
1872   /// The interleave access information contains groups of interleaved accesses
1873   /// with the same stride and close to each other.
1874   InterleavedAccessInfo &InterleaveInfo;
1875 
1876   /// Values to ignore in the cost model.
1877   SmallPtrSet<const Value *, 16> ValuesToIgnore;
1878 
1879   /// Values to ignore in the cost model when VF > 1.
1880   SmallPtrSet<const Value *, 16> VecValuesToIgnore;
1881 
1882   /// Profitable vector factors.
1883   SmallVector<VectorizationFactor, 8> ProfitableVFs;
1884 };
1885 
1886 } // end namespace llvm
1887 
1888 // Return true if \p OuterLp is an outer loop annotated with hints for explicit
1889 // vectorization. The loop needs to be annotated with #pragma omp simd
1890 // simdlen(#) or #pragma clang vectorize(enable) vectorize_width(#). If the
1891 // vector length information is not provided, vectorization is not considered
1892 // explicit. Interleave hints are not allowed either. These limitations will be
1893 // relaxed in the future.
1894 // Please, note that we are currently forced to abuse the pragma 'clang
1895 // vectorize' semantics. This pragma provides *auto-vectorization hints*
1896 // (i.e., LV must check that vectorization is legal) whereas pragma 'omp simd'
1897 // provides *explicit vectorization hints* (LV can bypass legal checks and
1898 // assume that vectorization is legal). However, both hints are implemented
1899 // using the same metadata (llvm.loop.vectorize, processed by
1900 // LoopVectorizeHints). This will be fixed in the future when the native IR
1901 // representation for pragma 'omp simd' is introduced.
1902 static bool isExplicitVecOuterLoop(Loop *OuterLp,
1903                                    OptimizationRemarkEmitter *ORE) {
1904   assert(!OuterLp->isInnermost() && "This is not an outer loop");
1905   LoopVectorizeHints Hints(OuterLp, true /*DisableInterleaving*/, *ORE);
1906 
1907   // Only outer loops with an explicit vectorization hint are supported.
1908   // Unannotated outer loops are ignored.
1909   if (Hints.getForce() == LoopVectorizeHints::FK_Undefined)
1910     return false;
1911 
1912   Function *Fn = OuterLp->getHeader()->getParent();
1913   if (!Hints.allowVectorization(Fn, OuterLp,
1914                                 true /*VectorizeOnlyWhenForced*/)) {
1915     LLVM_DEBUG(dbgs() << "LV: Loop hints prevent outer loop vectorization.\n");
1916     return false;
1917   }
1918 
1919   if (Hints.getInterleave() > 1) {
1920     // TODO: Interleave support is future work.
1921     LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Interleave is not supported for "
1922                          "outer loops.\n");
1923     Hints.emitRemarkWithHints();
1924     return false;
1925   }
1926 
1927   return true;
1928 }
1929 
1930 static void collectSupportedLoops(Loop &L, LoopInfo *LI,
1931                                   OptimizationRemarkEmitter *ORE,
1932                                   SmallVectorImpl<Loop *> &V) {
1933   // Collect inner loops and outer loops without irreducible control flow. For
1934   // now, only collect outer loops that have explicit vectorization hints. If we
1935   // are stress testing the VPlan H-CFG construction, we collect the outermost
1936   // loop of every loop nest.
1937   if (L.isInnermost() || VPlanBuildStressTest ||
1938       (EnableVPlanNativePath && isExplicitVecOuterLoop(&L, ORE))) {
1939     LoopBlocksRPO RPOT(&L);
1940     RPOT.perform(LI);
1941     if (!containsIrreducibleCFG<const BasicBlock *>(RPOT, *LI)) {
1942       V.push_back(&L);
1943       // TODO: Collect inner loops inside marked outer loops in case
1944       // vectorization fails for the outer loop. Do not invoke
1945       // 'containsIrreducibleCFG' again for inner loops when the outer loop is
1946       // already known to be reducible. We can use an inherited attribute for
1947       // that.
1948       return;
1949     }
1950   }
1951   for (Loop *InnerL : L)
1952     collectSupportedLoops(*InnerL, LI, ORE, V);
1953 }
1954 
1955 namespace {
1956 
1957 /// The LoopVectorize Pass.
1958 struct LoopVectorize : public FunctionPass {
1959   /// Pass identification, replacement for typeid
1960   static char ID;
1961 
1962   LoopVectorizePass Impl;
1963 
1964   explicit LoopVectorize(bool InterleaveOnlyWhenForced = false,
1965                          bool VectorizeOnlyWhenForced = false)
1966       : FunctionPass(ID),
1967         Impl({InterleaveOnlyWhenForced, VectorizeOnlyWhenForced}) {
1968     initializeLoopVectorizePass(*PassRegistry::getPassRegistry());
1969   }
1970 
1971   bool runOnFunction(Function &F) override {
1972     if (skipFunction(F))
1973       return false;
1974 
1975     auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
1976     auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
1977     auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
1978     auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
1979     auto *BFI = &getAnalysis<BlockFrequencyInfoWrapperPass>().getBFI();
1980     auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
1981     auto *TLI = TLIP ? &TLIP->getTLI(F) : nullptr;
1982     auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
1983     auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
1984     auto *LAA = &getAnalysis<LoopAccessLegacyAnalysis>();
1985     auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits();
1986     auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE();
1987     auto *PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI();
1988 
1989     std::function<const LoopAccessInfo &(Loop &)> GetLAA =
1990         [&](Loop &L) -> const LoopAccessInfo & { return LAA->getInfo(&L); };
1991 
1992     return Impl.runImpl(F, *SE, *LI, *TTI, *DT, *BFI, TLI, *DB, *AA, *AC,
1993                         GetLAA, *ORE, PSI).MadeAnyChange;
1994   }
1995 
1996   void getAnalysisUsage(AnalysisUsage &AU) const override {
1997     AU.addRequired<AssumptionCacheTracker>();
1998     AU.addRequired<BlockFrequencyInfoWrapperPass>();
1999     AU.addRequired<DominatorTreeWrapperPass>();
2000     AU.addRequired<LoopInfoWrapperPass>();
2001     AU.addRequired<ScalarEvolutionWrapperPass>();
2002     AU.addRequired<TargetTransformInfoWrapperPass>();
2003     AU.addRequired<AAResultsWrapperPass>();
2004     AU.addRequired<LoopAccessLegacyAnalysis>();
2005     AU.addRequired<DemandedBitsWrapperPass>();
2006     AU.addRequired<OptimizationRemarkEmitterWrapperPass>();
2007     AU.addRequired<InjectTLIMappingsLegacy>();
2008 
2009     // We currently do not preserve loopinfo/dominator analyses with outer loop
2010     // vectorization. Until this is addressed, mark these analyses as preserved
2011     // only for non-VPlan-native path.
2012     // TODO: Preserve Loop and Dominator analyses for VPlan-native path.
2013     if (!EnableVPlanNativePath) {
2014       AU.addPreserved<LoopInfoWrapperPass>();
2015       AU.addPreserved<DominatorTreeWrapperPass>();
2016     }
2017 
2018     AU.addPreserved<BasicAAWrapperPass>();
2019     AU.addPreserved<GlobalsAAWrapperPass>();
2020     AU.addRequired<ProfileSummaryInfoWrapperPass>();
2021   }
2022 };
2023 
2024 } // end anonymous namespace
2025 
2026 //===----------------------------------------------------------------------===//
2027 // Implementation of LoopVectorizationLegality, InnerLoopVectorizer and
2028 // LoopVectorizationCostModel and LoopVectorizationPlanner.
2029 //===----------------------------------------------------------------------===//
2030 
2031 Value *InnerLoopVectorizer::getBroadcastInstrs(Value *V) {
2032   // We need to place the broadcast of invariant variables outside the loop,
2033   // but only if it's proven safe to do so. Else, broadcast will be inside
2034   // vector loop body.
2035   Instruction *Instr = dyn_cast<Instruction>(V);
2036   bool SafeToHoist = OrigLoop->isLoopInvariant(V) &&
2037                      (!Instr ||
2038                       DT->dominates(Instr->getParent(), LoopVectorPreHeader));
2039   // Place the code for broadcasting invariant variables in the new preheader.
2040   IRBuilder<>::InsertPointGuard Guard(Builder);
2041   if (SafeToHoist)
2042     Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
2043 
2044   // Broadcast the scalar into all locations in the vector.
2045   Value *Shuf = Builder.CreateVectorSplat(VF, V, "broadcast");
2046 
2047   return Shuf;
2048 }
2049 
2050 void InnerLoopVectorizer::createVectorIntOrFpInductionPHI(
2051     const InductionDescriptor &II, Value *Step, Value *Start,
2052     Instruction *EntryVal, VPValue *Def, VPValue *CastDef,
2053     VPTransformState &State) {
2054   assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) &&
2055          "Expected either an induction phi-node or a truncate of it!");
2056 
2057   // Construct the initial value of the vector IV in the vector loop preheader
2058   auto CurrIP = Builder.saveIP();
2059   Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
2060   if (isa<TruncInst>(EntryVal)) {
2061     assert(Start->getType()->isIntegerTy() &&
2062            "Truncation requires an integer type");
2063     auto *TruncType = cast<IntegerType>(EntryVal->getType());
2064     Step = Builder.CreateTrunc(Step, TruncType);
2065     Start = Builder.CreateCast(Instruction::Trunc, Start, TruncType);
2066   }
2067   Value *SplatStart = Builder.CreateVectorSplat(VF, Start);
2068   Value *SteppedStart =
2069       getStepVector(SplatStart, 0, Step, II.getInductionOpcode());
2070 
2071   // We create vector phi nodes for both integer and floating-point induction
2072   // variables. Here, we determine the kind of arithmetic we will perform.
2073   Instruction::BinaryOps AddOp;
2074   Instruction::BinaryOps MulOp;
2075   if (Step->getType()->isIntegerTy()) {
2076     AddOp = Instruction::Add;
2077     MulOp = Instruction::Mul;
2078   } else {
2079     AddOp = II.getInductionOpcode();
2080     MulOp = Instruction::FMul;
2081   }
2082 
2083   // Multiply the vectorization factor by the step using integer or
2084   // floating-point arithmetic as appropriate.
2085   Value *ConstVF =
2086       getSignedIntOrFpConstant(Step->getType(), VF.getKnownMinValue());
2087   Value *Mul = addFastMathFlag(Builder.CreateBinOp(MulOp, Step, ConstVF));
2088 
2089   // Create a vector splat to use in the induction update.
2090   //
2091   // FIXME: If the step is non-constant, we create the vector splat with
2092   //        IRBuilder. IRBuilder can constant-fold the multiply, but it doesn't
2093   //        handle a constant vector splat.
2094   assert(!VF.isScalable() && "scalable vectors not yet supported.");
2095   Value *SplatVF = isa<Constant>(Mul)
2096                        ? ConstantVector::getSplat(VF, cast<Constant>(Mul))
2097                        : Builder.CreateVectorSplat(VF, Mul);
2098   Builder.restoreIP(CurrIP);
2099 
2100   // We may need to add the step a number of times, depending on the unroll
2101   // factor. The last of those goes into the PHI.
2102   PHINode *VecInd = PHINode::Create(SteppedStart->getType(), 2, "vec.ind",
2103                                     &*LoopVectorBody->getFirstInsertionPt());
2104   VecInd->setDebugLoc(EntryVal->getDebugLoc());
2105   Instruction *LastInduction = VecInd;
2106   for (unsigned Part = 0; Part < UF; ++Part) {
2107     State.set(Def, EntryVal, LastInduction, Part);
2108 
2109     if (isa<TruncInst>(EntryVal))
2110       addMetadata(LastInduction, EntryVal);
2111     recordVectorLoopValueForInductionCast(II, EntryVal, LastInduction, CastDef,
2112                                           State, Part);
2113 
2114     LastInduction = cast<Instruction>(addFastMathFlag(
2115         Builder.CreateBinOp(AddOp, LastInduction, SplatVF, "step.add")));
2116     LastInduction->setDebugLoc(EntryVal->getDebugLoc());
2117   }
2118 
2119   // Move the last step to the end of the latch block. This ensures consistent
2120   // placement of all induction updates.
2121   auto *LoopVectorLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch();
2122   auto *Br = cast<BranchInst>(LoopVectorLatch->getTerminator());
2123   auto *ICmp = cast<Instruction>(Br->getCondition());
2124   LastInduction->moveBefore(ICmp);
2125   LastInduction->setName("vec.ind.next");
2126 
2127   VecInd->addIncoming(SteppedStart, LoopVectorPreHeader);
2128   VecInd->addIncoming(LastInduction, LoopVectorLatch);
2129 }
2130 
2131 bool InnerLoopVectorizer::shouldScalarizeInstruction(Instruction *I) const {
2132   return Cost->isScalarAfterVectorization(I, VF) ||
2133          Cost->isProfitableToScalarize(I, VF);
2134 }
2135 
2136 bool InnerLoopVectorizer::needsScalarInduction(Instruction *IV) const {
2137   if (shouldScalarizeInstruction(IV))
2138     return true;
2139   auto isScalarInst = [&](User *U) -> bool {
2140     auto *I = cast<Instruction>(U);
2141     return (OrigLoop->contains(I) && shouldScalarizeInstruction(I));
2142   };
2143   return llvm::any_of(IV->users(), isScalarInst);
2144 }
2145 
2146 void InnerLoopVectorizer::recordVectorLoopValueForInductionCast(
2147     const InductionDescriptor &ID, const Instruction *EntryVal,
2148     Value *VectorLoopVal, VPValue *CastDef, VPTransformState &State,
2149     unsigned Part, unsigned Lane) {
2150   assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) &&
2151          "Expected either an induction phi-node or a truncate of it!");
2152 
2153   // This induction variable is not the phi from the original loop but the
2154   // newly-created IV based on the proof that casted Phi is equal to the
2155   // uncasted Phi in the vectorized loop (under a runtime guard possibly). It
2156   // re-uses the same InductionDescriptor that original IV uses but we don't
2157   // have to do any recording in this case - that is done when original IV is
2158   // processed.
2159   if (isa<TruncInst>(EntryVal))
2160     return;
2161 
2162   const SmallVectorImpl<Instruction *> &Casts = ID.getCastInsts();
2163   if (Casts.empty())
2164     return;
2165   // Only the first Cast instruction in the Casts vector is of interest.
2166   // The rest of the Casts (if exist) have no uses outside the
2167   // induction update chain itself.
2168   if (Lane < UINT_MAX)
2169     State.set(CastDef, VectorLoopVal, VPIteration(Part, Lane));
2170   else
2171     State.set(CastDef, VectorLoopVal, Part);
2172 }
2173 
2174 void InnerLoopVectorizer::widenIntOrFpInduction(PHINode *IV, Value *Start,
2175                                                 TruncInst *Trunc, VPValue *Def,
2176                                                 VPValue *CastDef,
2177                                                 VPTransformState &State) {
2178   assert((IV->getType()->isIntegerTy() || IV != OldInduction) &&
2179          "Primary induction variable must have an integer type");
2180 
2181   auto II = Legal->getInductionVars().find(IV);
2182   assert(II != Legal->getInductionVars().end() && "IV is not an induction");
2183 
2184   auto ID = II->second;
2185   assert(IV->getType() == ID.getStartValue()->getType() && "Types must match");
2186 
2187   // The value from the original loop to which we are mapping the new induction
2188   // variable.
2189   Instruction *EntryVal = Trunc ? cast<Instruction>(Trunc) : IV;
2190 
2191   auto &DL = OrigLoop->getHeader()->getModule()->getDataLayout();
2192 
2193   // Generate code for the induction step. Note that induction steps are
2194   // required to be loop-invariant
2195   auto CreateStepValue = [&](const SCEV *Step) -> Value * {
2196     assert(PSE.getSE()->isLoopInvariant(Step, OrigLoop) &&
2197            "Induction step should be loop invariant");
2198     if (PSE.getSE()->isSCEVable(IV->getType())) {
2199       SCEVExpander Exp(*PSE.getSE(), DL, "induction");
2200       return Exp.expandCodeFor(Step, Step->getType(),
2201                                LoopVectorPreHeader->getTerminator());
2202     }
2203     return cast<SCEVUnknown>(Step)->getValue();
2204   };
2205 
2206   // The scalar value to broadcast. This is derived from the canonical
2207   // induction variable. If a truncation type is given, truncate the canonical
2208   // induction variable and step. Otherwise, derive these values from the
2209   // induction descriptor.
2210   auto CreateScalarIV = [&](Value *&Step) -> Value * {
2211     Value *ScalarIV = Induction;
2212     if (IV != OldInduction) {
2213       ScalarIV = IV->getType()->isIntegerTy()
2214                      ? Builder.CreateSExtOrTrunc(Induction, IV->getType())
2215                      : Builder.CreateCast(Instruction::SIToFP, Induction,
2216                                           IV->getType());
2217       ScalarIV = emitTransformedIndex(Builder, ScalarIV, PSE.getSE(), DL, ID);
2218       ScalarIV->setName("offset.idx");
2219     }
2220     if (Trunc) {
2221       auto *TruncType = cast<IntegerType>(Trunc->getType());
2222       assert(Step->getType()->isIntegerTy() &&
2223              "Truncation requires an integer step");
2224       ScalarIV = Builder.CreateTrunc(ScalarIV, TruncType);
2225       Step = Builder.CreateTrunc(Step, TruncType);
2226     }
2227     return ScalarIV;
2228   };
2229 
2230   // Create the vector values from the scalar IV, in the absence of creating a
2231   // vector IV.
2232   auto CreateSplatIV = [&](Value *ScalarIV, Value *Step) {
2233     Value *Broadcasted = getBroadcastInstrs(ScalarIV);
2234     for (unsigned Part = 0; Part < UF; ++Part) {
2235       assert(!VF.isScalable() && "scalable vectors not yet supported.");
2236       Value *EntryPart =
2237           getStepVector(Broadcasted, VF.getKnownMinValue() * Part, Step,
2238                         ID.getInductionOpcode());
2239       State.set(Def, EntryVal, EntryPart, Part);
2240       if (Trunc)
2241         addMetadata(EntryPart, Trunc);
2242       recordVectorLoopValueForInductionCast(ID, EntryVal, EntryPart, CastDef,
2243                                             State, Part);
2244     }
2245   };
2246 
2247   // Now do the actual transformations, and start with creating the step value.
2248   Value *Step = CreateStepValue(ID.getStep());
2249   if (VF.isZero() || VF.isScalar()) {
2250     Value *ScalarIV = CreateScalarIV(Step);
2251     CreateSplatIV(ScalarIV, Step);
2252     return;
2253   }
2254 
2255   // Determine if we want a scalar version of the induction variable. This is
2256   // true if the induction variable itself is not widened, or if it has at
2257   // least one user in the loop that is not widened.
2258   auto NeedsScalarIV = needsScalarInduction(EntryVal);
2259   if (!NeedsScalarIV) {
2260     createVectorIntOrFpInductionPHI(ID, Step, Start, EntryVal, Def, CastDef,
2261                                     State);
2262     return;
2263   }
2264 
2265   // Try to create a new independent vector induction variable. If we can't
2266   // create the phi node, we will splat the scalar induction variable in each
2267   // loop iteration.
2268   if (!shouldScalarizeInstruction(EntryVal)) {
2269     createVectorIntOrFpInductionPHI(ID, Step, Start, EntryVal, Def, CastDef,
2270                                     State);
2271     Value *ScalarIV = CreateScalarIV(Step);
2272     // Create scalar steps that can be used by instructions we will later
2273     // scalarize. Note that the addition of the scalar steps will not increase
2274     // the number of instructions in the loop in the common case prior to
2275     // InstCombine. We will be trading one vector extract for each scalar step.
2276     buildScalarSteps(ScalarIV, Step, EntryVal, ID, Def, CastDef, State);
2277     return;
2278   }
2279 
2280   // All IV users are scalar instructions, so only emit a scalar IV, not a
2281   // vectorised IV. Except when we tail-fold, then the splat IV feeds the
2282   // predicate used by the masked loads/stores.
2283   Value *ScalarIV = CreateScalarIV(Step);
2284   if (!Cost->isScalarEpilogueAllowed())
2285     CreateSplatIV(ScalarIV, Step);
2286   buildScalarSteps(ScalarIV, Step, EntryVal, ID, Def, CastDef, State);
2287 }
2288 
2289 Value *InnerLoopVectorizer::getStepVector(Value *Val, int StartIdx, Value *Step,
2290                                           Instruction::BinaryOps BinOp) {
2291   // Create and check the types.
2292   auto *ValVTy = cast<FixedVectorType>(Val->getType());
2293   int VLen = ValVTy->getNumElements();
2294 
2295   Type *STy = Val->getType()->getScalarType();
2296   assert((STy->isIntegerTy() || STy->isFloatingPointTy()) &&
2297          "Induction Step must be an integer or FP");
2298   assert(Step->getType() == STy && "Step has wrong type");
2299 
2300   SmallVector<Constant *, 8> Indices;
2301 
2302   if (STy->isIntegerTy()) {
2303     // Create a vector of consecutive numbers from zero to VF.
2304     for (int i = 0; i < VLen; ++i)
2305       Indices.push_back(ConstantInt::get(STy, StartIdx + i));
2306 
2307     // Add the consecutive indices to the vector value.
2308     Constant *Cv = ConstantVector::get(Indices);
2309     assert(Cv->getType() == Val->getType() && "Invalid consecutive vec");
2310     Step = Builder.CreateVectorSplat(VLen, Step);
2311     assert(Step->getType() == Val->getType() && "Invalid step vec");
2312     // FIXME: The newly created binary instructions should contain nsw/nuw flags,
2313     // which can be found from the original scalar operations.
2314     Step = Builder.CreateMul(Cv, Step);
2315     return Builder.CreateAdd(Val, Step, "induction");
2316   }
2317 
2318   // Floating point induction.
2319   assert((BinOp == Instruction::FAdd || BinOp == Instruction::FSub) &&
2320          "Binary Opcode should be specified for FP induction");
2321   // Create a vector of consecutive numbers from zero to VF.
2322   for (int i = 0; i < VLen; ++i)
2323     Indices.push_back(ConstantFP::get(STy, (double)(StartIdx + i)));
2324 
2325   // Add the consecutive indices to the vector value.
2326   Constant *Cv = ConstantVector::get(Indices);
2327 
2328   Step = Builder.CreateVectorSplat(VLen, Step);
2329 
2330   // Floating point operations had to be 'fast' to enable the induction.
2331   FastMathFlags Flags;
2332   Flags.setFast();
2333 
2334   Value *MulOp = Builder.CreateFMul(Cv, Step);
2335   if (isa<Instruction>(MulOp))
2336     // Have to check, MulOp may be a constant
2337     cast<Instruction>(MulOp)->setFastMathFlags(Flags);
2338 
2339   Value *BOp = Builder.CreateBinOp(BinOp, Val, MulOp, "induction");
2340   if (isa<Instruction>(BOp))
2341     cast<Instruction>(BOp)->setFastMathFlags(Flags);
2342   return BOp;
2343 }
2344 
2345 void InnerLoopVectorizer::buildScalarSteps(Value *ScalarIV, Value *Step,
2346                                            Instruction *EntryVal,
2347                                            const InductionDescriptor &ID,
2348                                            VPValue *Def, VPValue *CastDef,
2349                                            VPTransformState &State) {
2350   // We shouldn't have to build scalar steps if we aren't vectorizing.
2351   assert(VF.isVector() && "VF should be greater than one");
2352   // Get the value type and ensure it and the step have the same integer type.
2353   Type *ScalarIVTy = ScalarIV->getType()->getScalarType();
2354   assert(ScalarIVTy == Step->getType() &&
2355          "Val and Step should have the same type");
2356 
2357   // We build scalar steps for both integer and floating-point induction
2358   // variables. Here, we determine the kind of arithmetic we will perform.
2359   Instruction::BinaryOps AddOp;
2360   Instruction::BinaryOps MulOp;
2361   if (ScalarIVTy->isIntegerTy()) {
2362     AddOp = Instruction::Add;
2363     MulOp = Instruction::Mul;
2364   } else {
2365     AddOp = ID.getInductionOpcode();
2366     MulOp = Instruction::FMul;
2367   }
2368 
2369   // Determine the number of scalars we need to generate for each unroll
2370   // iteration. If EntryVal is uniform, we only need to generate the first
2371   // lane. Otherwise, we generate all VF values.
2372   unsigned Lanes =
2373       Cost->isUniformAfterVectorization(cast<Instruction>(EntryVal), VF)
2374           ? 1
2375           : VF.getKnownMinValue();
2376   assert((!VF.isScalable() || Lanes == 1) &&
2377          "Should never scalarize a scalable vector");
2378   // Compute the scalar steps and save the results in VectorLoopValueMap.
2379   for (unsigned Part = 0; Part < UF; ++Part) {
2380     for (unsigned Lane = 0; Lane < Lanes; ++Lane) {
2381       auto *IntStepTy = IntegerType::get(ScalarIVTy->getContext(),
2382                                          ScalarIVTy->getScalarSizeInBits());
2383       Value *StartIdx =
2384           createStepForVF(Builder, ConstantInt::get(IntStepTy, Part), VF);
2385       if (ScalarIVTy->isFloatingPointTy())
2386         StartIdx = Builder.CreateSIToFP(StartIdx, ScalarIVTy);
2387       StartIdx = addFastMathFlag(Builder.CreateBinOp(
2388           AddOp, StartIdx, getSignedIntOrFpConstant(ScalarIVTy, Lane)));
2389       // The step returned by `createStepForVF` is a runtime-evaluated value
2390       // when VF is scalable. Otherwise, it should be folded into a Constant.
2391       assert((VF.isScalable() || isa<Constant>(StartIdx)) &&
2392              "Expected StartIdx to be folded to a constant when VF is not "
2393              "scalable");
2394       auto *Mul = addFastMathFlag(Builder.CreateBinOp(MulOp, StartIdx, Step));
2395       auto *Add = addFastMathFlag(Builder.CreateBinOp(AddOp, ScalarIV, Mul));
2396       State.set(Def, Add, VPIteration(Part, Lane));
2397       recordVectorLoopValueForInductionCast(ID, EntryVal, Add, CastDef, State,
2398                                             Part, Lane);
2399     }
2400   }
2401 }
2402 
2403 Value *InnerLoopVectorizer::getOrCreateVectorValue(Value *V, unsigned Part) {
2404   assert(V != Induction && "The new induction variable should not be used.");
2405   assert(!V->getType()->isVectorTy() && "Can't widen a vector");
2406   assert(!V->getType()->isVoidTy() && "Type does not produce a value");
2407 
2408   // If we have a stride that is replaced by one, do it here. Defer this for
2409   // the VPlan-native path until we start running Legal checks in that path.
2410   if (!EnableVPlanNativePath && Legal->hasStride(V))
2411     V = ConstantInt::get(V->getType(), 1);
2412 
2413   // If we have a vector mapped to this value, return it.
2414   if (VectorLoopValueMap.hasVectorValue(V, Part))
2415     return VectorLoopValueMap.getVectorValue(V, Part);
2416 
2417   // If the value has not been vectorized, check if it has been scalarized
2418   // instead. If it has been scalarized, and we actually need the value in
2419   // vector form, we will construct the vector values on demand.
2420   if (VectorLoopValueMap.hasAnyScalarValue(V)) {
2421     Value *ScalarValue =
2422         VectorLoopValueMap.getScalarValue(V, VPIteration(Part, 0));
2423 
2424     // If we've scalarized a value, that value should be an instruction.
2425     auto *I = cast<Instruction>(V);
2426 
2427     // If we aren't vectorizing, we can just copy the scalar map values over to
2428     // the vector map.
2429     if (VF.isScalar()) {
2430       VectorLoopValueMap.setVectorValue(V, Part, ScalarValue);
2431       return ScalarValue;
2432     }
2433 
2434     // Get the last scalar instruction we generated for V and Part. If the value
2435     // is known to be uniform after vectorization, this corresponds to lane zero
2436     // of the Part unroll iteration. Otherwise, the last instruction is the one
2437     // we created for the last vector lane of the Part unroll iteration.
2438     unsigned LastLane = Cost->isUniformAfterVectorization(I, VF)
2439                             ? 0
2440                             : VF.getKnownMinValue() - 1;
2441     assert((!VF.isScalable() || LastLane == 0) &&
2442            "Scalable vectorization can't lead to any scalarized values.");
2443     auto *LastInst = cast<Instruction>(
2444         VectorLoopValueMap.getScalarValue(V, VPIteration(Part, LastLane)));
2445 
2446     // Set the insert point after the last scalarized instruction. This ensures
2447     // the insertelement sequence will directly follow the scalar definitions.
2448     auto OldIP = Builder.saveIP();
2449     auto NewIP = std::next(BasicBlock::iterator(LastInst));
2450     Builder.SetInsertPoint(&*NewIP);
2451 
2452     // However, if we are vectorizing, we need to construct the vector values.
2453     // If the value is known to be uniform after vectorization, we can just
2454     // broadcast the scalar value corresponding to lane zero for each unroll
2455     // iteration. Otherwise, we construct the vector values using insertelement
2456     // instructions. Since the resulting vectors are stored in
2457     // VectorLoopValueMap, we will only generate the insertelements once.
2458     Value *VectorValue = nullptr;
2459     if (Cost->isUniformAfterVectorization(I, VF)) {
2460       VectorValue = getBroadcastInstrs(ScalarValue);
2461       VectorLoopValueMap.setVectorValue(V, Part, VectorValue);
2462     } else {
2463       // Initialize packing with insertelements to start from poison.
2464       assert(!VF.isScalable() && "VF is assumed to be non scalable.");
2465       Value *Poison = PoisonValue::get(VectorType::get(V->getType(), VF));
2466       VectorLoopValueMap.setVectorValue(V, Part, Poison);
2467       for (unsigned Lane = 0; Lane < VF.getKnownMinValue(); ++Lane)
2468         packScalarIntoVectorValue(V, VPIteration(Part, Lane));
2469       VectorValue = VectorLoopValueMap.getVectorValue(V, Part);
2470     }
2471     Builder.restoreIP(OldIP);
2472     return VectorValue;
2473   }
2474 
2475   // If this scalar is unknown, assume that it is a constant or that it is
2476   // loop invariant. Broadcast V and save the value for future uses.
2477   Value *B = getBroadcastInstrs(V);
2478   VectorLoopValueMap.setVectorValue(V, Part, B);
2479   return B;
2480 }
2481 
2482 Value *
2483 InnerLoopVectorizer::getOrCreateScalarValue(Value *V,
2484                                             const VPIteration &Instance) {
2485   // If the value is not an instruction contained in the loop, it should
2486   // already be scalar.
2487   if (OrigLoop->isLoopInvariant(V))
2488     return V;
2489 
2490   assert(Instance.Lane > 0
2491              ? !Cost->isUniformAfterVectorization(cast<Instruction>(V), VF)
2492              : true && "Uniform values only have lane zero");
2493 
2494   // If the value from the original loop has not been vectorized, it is
2495   // represented by UF x VF scalar values in the new loop. Return the requested
2496   // scalar value.
2497   if (VectorLoopValueMap.hasScalarValue(V, Instance))
2498     return VectorLoopValueMap.getScalarValue(V, Instance);
2499 
2500   // If the value has not been scalarized, get its entry in VectorLoopValueMap
2501   // for the given unroll part. If this entry is not a vector type (i.e., the
2502   // vectorization factor is one), there is no need to generate an
2503   // extractelement instruction.
2504   auto *U = getOrCreateVectorValue(V, Instance.Part);
2505   if (!U->getType()->isVectorTy()) {
2506     assert(VF.isScalar() && "Value not scalarized has non-vector type");
2507     return U;
2508   }
2509 
2510   // Otherwise, the value from the original loop has been vectorized and is
2511   // represented by UF vector values. Extract and return the requested scalar
2512   // value from the appropriate vector lane.
2513   return Builder.CreateExtractElement(U, Builder.getInt32(Instance.Lane));
2514 }
2515 
2516 void InnerLoopVectorizer::packScalarIntoVectorValue(
2517     Value *V, const VPIteration &Instance) {
2518   assert(V != Induction && "The new induction variable should not be used.");
2519   assert(!V->getType()->isVectorTy() && "Can't pack a vector");
2520   assert(!V->getType()->isVoidTy() && "Type does not produce a value");
2521 
2522   Value *ScalarInst = VectorLoopValueMap.getScalarValue(V, Instance);
2523   Value *VectorValue = VectorLoopValueMap.getVectorValue(V, Instance.Part);
2524   VectorValue = Builder.CreateInsertElement(VectorValue, ScalarInst,
2525                                             Builder.getInt32(Instance.Lane));
2526   VectorLoopValueMap.resetVectorValue(V, Instance.Part, VectorValue);
2527 }
2528 
2529 void InnerLoopVectorizer::packScalarIntoVectorValue(VPValue *Def,
2530                                                     const VPIteration &Instance,
2531                                                     VPTransformState &State) {
2532   Value *ScalarInst = State.get(Def, Instance);
2533   Value *VectorValue = State.get(Def, Instance.Part);
2534   VectorValue = Builder.CreateInsertElement(
2535       VectorValue, ScalarInst, State.Builder.getInt32(Instance.Lane));
2536   State.set(Def, VectorValue, Instance.Part);
2537 }
2538 
2539 Value *InnerLoopVectorizer::reverseVector(Value *Vec) {
2540   assert(Vec->getType()->isVectorTy() && "Invalid type");
2541   assert(!VF.isScalable() && "Cannot reverse scalable vectors");
2542   SmallVector<int, 8> ShuffleMask;
2543   for (unsigned i = 0; i < VF.getKnownMinValue(); ++i)
2544     ShuffleMask.push_back(VF.getKnownMinValue() - i - 1);
2545 
2546   return Builder.CreateShuffleVector(Vec, ShuffleMask, "reverse");
2547 }
2548 
2549 // Return whether we allow using masked interleave-groups (for dealing with
2550 // strided loads/stores that reside in predicated blocks, or for dealing
2551 // with gaps).
2552 static bool useMaskedInterleavedAccesses(const TargetTransformInfo &TTI) {
2553   // If an override option has been passed in for interleaved accesses, use it.
2554   if (EnableMaskedInterleavedMemAccesses.getNumOccurrences() > 0)
2555     return EnableMaskedInterleavedMemAccesses;
2556 
2557   return TTI.enableMaskedInterleavedAccessVectorization();
2558 }
2559 
2560 // Try to vectorize the interleave group that \p Instr belongs to.
2561 //
2562 // E.g. Translate following interleaved load group (factor = 3):
2563 //   for (i = 0; i < N; i+=3) {
2564 //     R = Pic[i];             // Member of index 0
2565 //     G = Pic[i+1];           // Member of index 1
2566 //     B = Pic[i+2];           // Member of index 2
2567 //     ... // do something to R, G, B
2568 //   }
2569 // To:
2570 //   %wide.vec = load <12 x i32>                       ; Read 4 tuples of R,G,B
2571 //   %R.vec = shuffle %wide.vec, poison, <0, 3, 6, 9>   ; R elements
2572 //   %G.vec = shuffle %wide.vec, poison, <1, 4, 7, 10>  ; G elements
2573 //   %B.vec = shuffle %wide.vec, poison, <2, 5, 8, 11>  ; B elements
2574 //
2575 // Or translate following interleaved store group (factor = 3):
2576 //   for (i = 0; i < N; i+=3) {
2577 //     ... do something to R, G, B
2578 //     Pic[i]   = R;           // Member of index 0
2579 //     Pic[i+1] = G;           // Member of index 1
2580 //     Pic[i+2] = B;           // Member of index 2
2581 //   }
2582 // To:
2583 //   %R_G.vec = shuffle %R.vec, %G.vec, <0, 1, 2, ..., 7>
2584 //   %B_U.vec = shuffle %B.vec, poison, <0, 1, 2, 3, u, u, u, u>
2585 //   %interleaved.vec = shuffle %R_G.vec, %B_U.vec,
2586 //        <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11>    ; Interleave R,G,B elements
2587 //   store <12 x i32> %interleaved.vec              ; Write 4 tuples of R,G,B
2588 void InnerLoopVectorizer::vectorizeInterleaveGroup(
2589     const InterleaveGroup<Instruction> *Group, ArrayRef<VPValue *> VPDefs,
2590     VPTransformState &State, VPValue *Addr, ArrayRef<VPValue *> StoredValues,
2591     VPValue *BlockInMask) {
2592   Instruction *Instr = Group->getInsertPos();
2593   const DataLayout &DL = Instr->getModule()->getDataLayout();
2594 
2595   // Prepare for the vector type of the interleaved load/store.
2596   Type *ScalarTy = getMemInstValueType(Instr);
2597   unsigned InterleaveFactor = Group->getFactor();
2598   assert(!VF.isScalable() && "scalable vectors not yet supported.");
2599   auto *VecTy = VectorType::get(ScalarTy, VF * InterleaveFactor);
2600 
2601   // Prepare for the new pointers.
2602   SmallVector<Value *, 2> AddrParts;
2603   unsigned Index = Group->getIndex(Instr);
2604 
2605   // TODO: extend the masked interleaved-group support to reversed access.
2606   assert((!BlockInMask || !Group->isReverse()) &&
2607          "Reversed masked interleave-group not supported.");
2608 
2609   // If the group is reverse, adjust the index to refer to the last vector lane
2610   // instead of the first. We adjust the index from the first vector lane,
2611   // rather than directly getting the pointer for lane VF - 1, because the
2612   // pointer operand of the interleaved access is supposed to be uniform. For
2613   // uniform instructions, we're only required to generate a value for the
2614   // first vector lane in each unroll iteration.
2615   assert(!VF.isScalable() &&
2616          "scalable vector reverse operation is not implemented");
2617   if (Group->isReverse())
2618     Index += (VF.getKnownMinValue() - 1) * Group->getFactor();
2619 
2620   for (unsigned Part = 0; Part < UF; Part++) {
2621     Value *AddrPart = State.get(Addr, VPIteration(Part, 0));
2622     setDebugLocFromInst(Builder, AddrPart);
2623 
2624     // Notice current instruction could be any index. Need to adjust the address
2625     // to the member of index 0.
2626     //
2627     // E.g.  a = A[i+1];     // Member of index 1 (Current instruction)
2628     //       b = A[i];       // Member of index 0
2629     // Current pointer is pointed to A[i+1], adjust it to A[i].
2630     //
2631     // E.g.  A[i+1] = a;     // Member of index 1
2632     //       A[i]   = b;     // Member of index 0
2633     //       A[i+2] = c;     // Member of index 2 (Current instruction)
2634     // Current pointer is pointed to A[i+2], adjust it to A[i].
2635 
2636     bool InBounds = false;
2637     if (auto *gep = dyn_cast<GetElementPtrInst>(AddrPart->stripPointerCasts()))
2638       InBounds = gep->isInBounds();
2639     AddrPart = Builder.CreateGEP(ScalarTy, AddrPart, Builder.getInt32(-Index));
2640     cast<GetElementPtrInst>(AddrPart)->setIsInBounds(InBounds);
2641 
2642     // Cast to the vector pointer type.
2643     unsigned AddressSpace = AddrPart->getType()->getPointerAddressSpace();
2644     Type *PtrTy = VecTy->getPointerTo(AddressSpace);
2645     AddrParts.push_back(Builder.CreateBitCast(AddrPart, PtrTy));
2646   }
2647 
2648   setDebugLocFromInst(Builder, Instr);
2649   Value *PoisonVec = PoisonValue::get(VecTy);
2650 
2651   Value *MaskForGaps = nullptr;
2652   if (Group->requiresScalarEpilogue() && !Cost->isScalarEpilogueAllowed()) {
2653     assert(!VF.isScalable() && "scalable vectors not yet supported.");
2654     MaskForGaps = createBitMaskForGaps(Builder, VF.getKnownMinValue(), *Group);
2655     assert(MaskForGaps && "Mask for Gaps is required but it is null");
2656   }
2657 
2658   // Vectorize the interleaved load group.
2659   if (isa<LoadInst>(Instr)) {
2660     // For each unroll part, create a wide load for the group.
2661     SmallVector<Value *, 2> NewLoads;
2662     for (unsigned Part = 0; Part < UF; Part++) {
2663       Instruction *NewLoad;
2664       if (BlockInMask || MaskForGaps) {
2665         assert(useMaskedInterleavedAccesses(*TTI) &&
2666                "masked interleaved groups are not allowed.");
2667         Value *GroupMask = MaskForGaps;
2668         if (BlockInMask) {
2669           Value *BlockInMaskPart = State.get(BlockInMask, Part);
2670           assert(!VF.isScalable() && "scalable vectors not yet supported.");
2671           Value *ShuffledMask = Builder.CreateShuffleVector(
2672               BlockInMaskPart,
2673               createReplicatedMask(InterleaveFactor, VF.getKnownMinValue()),
2674               "interleaved.mask");
2675           GroupMask = MaskForGaps
2676                           ? Builder.CreateBinOp(Instruction::And, ShuffledMask,
2677                                                 MaskForGaps)
2678                           : ShuffledMask;
2679         }
2680         NewLoad =
2681             Builder.CreateMaskedLoad(AddrParts[Part], Group->getAlign(),
2682                                      GroupMask, PoisonVec, "wide.masked.vec");
2683       }
2684       else
2685         NewLoad = Builder.CreateAlignedLoad(VecTy, AddrParts[Part],
2686                                             Group->getAlign(), "wide.vec");
2687       Group->addMetadata(NewLoad);
2688       NewLoads.push_back(NewLoad);
2689     }
2690 
2691     // For each member in the group, shuffle out the appropriate data from the
2692     // wide loads.
2693     unsigned J = 0;
2694     for (unsigned I = 0; I < InterleaveFactor; ++I) {
2695       Instruction *Member = Group->getMember(I);
2696 
2697       // Skip the gaps in the group.
2698       if (!Member)
2699         continue;
2700 
2701       assert(!VF.isScalable() && "scalable vectors not yet supported.");
2702       auto StrideMask =
2703           createStrideMask(I, InterleaveFactor, VF.getKnownMinValue());
2704       for (unsigned Part = 0; Part < UF; Part++) {
2705         Value *StridedVec = Builder.CreateShuffleVector(
2706             NewLoads[Part], StrideMask, "strided.vec");
2707 
2708         // If this member has different type, cast the result type.
2709         if (Member->getType() != ScalarTy) {
2710           assert(!VF.isScalable() && "VF is assumed to be non scalable.");
2711           VectorType *OtherVTy = VectorType::get(Member->getType(), VF);
2712           StridedVec = createBitOrPointerCast(StridedVec, OtherVTy, DL);
2713         }
2714 
2715         if (Group->isReverse())
2716           StridedVec = reverseVector(StridedVec);
2717 
2718         State.set(VPDefs[J], Member, StridedVec, Part);
2719       }
2720       ++J;
2721     }
2722     return;
2723   }
2724 
2725   // The sub vector type for current instruction.
2726   assert(!VF.isScalable() && "VF is assumed to be non scalable.");
2727   auto *SubVT = VectorType::get(ScalarTy, VF);
2728 
2729   // Vectorize the interleaved store group.
2730   for (unsigned Part = 0; Part < UF; Part++) {
2731     // Collect the stored vector from each member.
2732     SmallVector<Value *, 4> StoredVecs;
2733     for (unsigned i = 0; i < InterleaveFactor; i++) {
2734       // Interleaved store group doesn't allow a gap, so each index has a member
2735       assert(Group->getMember(i) && "Fail to get a member from an interleaved store group");
2736 
2737       Value *StoredVec = State.get(StoredValues[i], Part);
2738 
2739       if (Group->isReverse())
2740         StoredVec = reverseVector(StoredVec);
2741 
2742       // If this member has different type, cast it to a unified type.
2743 
2744       if (StoredVec->getType() != SubVT)
2745         StoredVec = createBitOrPointerCast(StoredVec, SubVT, DL);
2746 
2747       StoredVecs.push_back(StoredVec);
2748     }
2749 
2750     // Concatenate all vectors into a wide vector.
2751     Value *WideVec = concatenateVectors(Builder, StoredVecs);
2752 
2753     // Interleave the elements in the wide vector.
2754     assert(!VF.isScalable() && "scalable vectors not yet supported.");
2755     Value *IVec = Builder.CreateShuffleVector(
2756         WideVec, createInterleaveMask(VF.getKnownMinValue(), InterleaveFactor),
2757         "interleaved.vec");
2758 
2759     Instruction *NewStoreInstr;
2760     if (BlockInMask) {
2761       Value *BlockInMaskPart = State.get(BlockInMask, Part);
2762       Value *ShuffledMask = Builder.CreateShuffleVector(
2763           BlockInMaskPart,
2764           createReplicatedMask(InterleaveFactor, VF.getKnownMinValue()),
2765           "interleaved.mask");
2766       NewStoreInstr = Builder.CreateMaskedStore(
2767           IVec, AddrParts[Part], Group->getAlign(), ShuffledMask);
2768     }
2769     else
2770       NewStoreInstr =
2771           Builder.CreateAlignedStore(IVec, AddrParts[Part], Group->getAlign());
2772 
2773     Group->addMetadata(NewStoreInstr);
2774   }
2775 }
2776 
2777 void InnerLoopVectorizer::vectorizeMemoryInstruction(
2778     Instruction *Instr, VPTransformState &State, VPValue *Def, VPValue *Addr,
2779     VPValue *StoredValue, VPValue *BlockInMask) {
2780   // Attempt to issue a wide load.
2781   LoadInst *LI = dyn_cast<LoadInst>(Instr);
2782   StoreInst *SI = dyn_cast<StoreInst>(Instr);
2783 
2784   assert((LI || SI) && "Invalid Load/Store instruction");
2785   assert((!SI || StoredValue) && "No stored value provided for widened store");
2786   assert((!LI || !StoredValue) && "Stored value provided for widened load");
2787 
2788   LoopVectorizationCostModel::InstWidening Decision =
2789       Cost->getWideningDecision(Instr, VF);
2790   assert((Decision == LoopVectorizationCostModel::CM_Widen ||
2791           Decision == LoopVectorizationCostModel::CM_Widen_Reverse ||
2792           Decision == LoopVectorizationCostModel::CM_GatherScatter) &&
2793          "CM decision is not to widen the memory instruction");
2794 
2795   Type *ScalarDataTy = getMemInstValueType(Instr);
2796 
2797   auto *DataTy = VectorType::get(ScalarDataTy, VF);
2798   const Align Alignment = getLoadStoreAlignment(Instr);
2799 
2800   // Determine if the pointer operand of the access is either consecutive or
2801   // reverse consecutive.
2802   bool Reverse = (Decision == LoopVectorizationCostModel::CM_Widen_Reverse);
2803   bool ConsecutiveStride =
2804       Reverse || (Decision == LoopVectorizationCostModel::CM_Widen);
2805   bool CreateGatherScatter =
2806       (Decision == LoopVectorizationCostModel::CM_GatherScatter);
2807 
2808   // Either Ptr feeds a vector load/store, or a vector GEP should feed a vector
2809   // gather/scatter. Otherwise Decision should have been to Scalarize.
2810   assert((ConsecutiveStride || CreateGatherScatter) &&
2811          "The instruction should be scalarized");
2812   (void)ConsecutiveStride;
2813 
2814   VectorParts BlockInMaskParts(UF);
2815   bool isMaskRequired = BlockInMask;
2816   if (isMaskRequired)
2817     for (unsigned Part = 0; Part < UF; ++Part)
2818       BlockInMaskParts[Part] = State.get(BlockInMask, Part);
2819 
2820   const auto CreateVecPtr = [&](unsigned Part, Value *Ptr) -> Value * {
2821     // Calculate the pointer for the specific unroll-part.
2822     GetElementPtrInst *PartPtr = nullptr;
2823 
2824     bool InBounds = false;
2825     if (auto *gep = dyn_cast<GetElementPtrInst>(Ptr->stripPointerCasts()))
2826       InBounds = gep->isInBounds();
2827 
2828     if (Reverse) {
2829       assert(!VF.isScalable() &&
2830              "Reversing vectors is not yet supported for scalable vectors.");
2831 
2832       // If the address is consecutive but reversed, then the
2833       // wide store needs to start at the last vector element.
2834       PartPtr = cast<GetElementPtrInst>(Builder.CreateGEP(
2835           ScalarDataTy, Ptr, Builder.getInt32(-Part * VF.getKnownMinValue())));
2836       PartPtr->setIsInBounds(InBounds);
2837       PartPtr = cast<GetElementPtrInst>(Builder.CreateGEP(
2838           ScalarDataTy, PartPtr, Builder.getInt32(1 - VF.getKnownMinValue())));
2839       PartPtr->setIsInBounds(InBounds);
2840       if (isMaskRequired) // Reverse of a null all-one mask is a null mask.
2841         BlockInMaskParts[Part] = reverseVector(BlockInMaskParts[Part]);
2842     } else {
2843       Value *Increment = createStepForVF(Builder, Builder.getInt32(Part), VF);
2844       PartPtr = cast<GetElementPtrInst>(
2845           Builder.CreateGEP(ScalarDataTy, Ptr, Increment));
2846       PartPtr->setIsInBounds(InBounds);
2847     }
2848 
2849     unsigned AddressSpace = Ptr->getType()->getPointerAddressSpace();
2850     return Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace));
2851   };
2852 
2853   // Handle Stores:
2854   if (SI) {
2855     setDebugLocFromInst(Builder, SI);
2856 
2857     for (unsigned Part = 0; Part < UF; ++Part) {
2858       Instruction *NewSI = nullptr;
2859       Value *StoredVal = State.get(StoredValue, Part);
2860       if (CreateGatherScatter) {
2861         Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr;
2862         Value *VectorGep = State.get(Addr, Part);
2863         NewSI = Builder.CreateMaskedScatter(StoredVal, VectorGep, Alignment,
2864                                             MaskPart);
2865       } else {
2866         if (Reverse) {
2867           // If we store to reverse consecutive memory locations, then we need
2868           // to reverse the order of elements in the stored value.
2869           StoredVal = reverseVector(StoredVal);
2870           // We don't want to update the value in the map as it might be used in
2871           // another expression. So don't call resetVectorValue(StoredVal).
2872         }
2873         auto *VecPtr = CreateVecPtr(Part, State.get(Addr, VPIteration(0, 0)));
2874         if (isMaskRequired)
2875           NewSI = Builder.CreateMaskedStore(StoredVal, VecPtr, Alignment,
2876                                             BlockInMaskParts[Part]);
2877         else
2878           NewSI = Builder.CreateAlignedStore(StoredVal, VecPtr, Alignment);
2879       }
2880       addMetadata(NewSI, SI);
2881     }
2882     return;
2883   }
2884 
2885   // Handle loads.
2886   assert(LI && "Must have a load instruction");
2887   setDebugLocFromInst(Builder, LI);
2888   for (unsigned Part = 0; Part < UF; ++Part) {
2889     Value *NewLI;
2890     if (CreateGatherScatter) {
2891       Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr;
2892       Value *VectorGep = State.get(Addr, Part);
2893       NewLI = Builder.CreateMaskedGather(VectorGep, Alignment, MaskPart,
2894                                          nullptr, "wide.masked.gather");
2895       addMetadata(NewLI, LI);
2896     } else {
2897       auto *VecPtr = CreateVecPtr(Part, State.get(Addr, VPIteration(0, 0)));
2898       if (isMaskRequired)
2899         NewLI = Builder.CreateMaskedLoad(
2900             VecPtr, Alignment, BlockInMaskParts[Part], PoisonValue::get(DataTy),
2901             "wide.masked.load");
2902       else
2903         NewLI =
2904             Builder.CreateAlignedLoad(DataTy, VecPtr, Alignment, "wide.load");
2905 
2906       // Add metadata to the load, but setVectorValue to the reverse shuffle.
2907       addMetadata(NewLI, LI);
2908       if (Reverse)
2909         NewLI = reverseVector(NewLI);
2910     }
2911 
2912     State.set(Def, Instr, NewLI, Part);
2913   }
2914 }
2915 
2916 void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr, VPValue *Def,
2917                                                VPUser &User,
2918                                                const VPIteration &Instance,
2919                                                bool IfPredicateInstr,
2920                                                VPTransformState &State) {
2921   assert(!Instr->getType()->isAggregateType() && "Can't handle vectors");
2922 
2923   // llvm.experimental.noalias.scope.decl intrinsics must only be duplicated for
2924   // the first lane and part.
2925   if (isa<NoAliasScopeDeclInst>(Instr))
2926     if (!Instance.isFirstIteration())
2927       return;
2928 
2929   setDebugLocFromInst(Builder, Instr);
2930 
2931   // Does this instruction return a value ?
2932   bool IsVoidRetTy = Instr->getType()->isVoidTy();
2933 
2934   Instruction *Cloned = Instr->clone();
2935   if (!IsVoidRetTy)
2936     Cloned->setName(Instr->getName() + ".cloned");
2937 
2938   State.Builder.SetInsertPoint(Builder.GetInsertBlock(),
2939                                Builder.GetInsertPoint());
2940   // Replace the operands of the cloned instructions with their scalar
2941   // equivalents in the new loop.
2942   for (unsigned op = 0, e = User.getNumOperands(); op != e; ++op) {
2943     auto *Operand = dyn_cast<Instruction>(Instr->getOperand(op));
2944     auto InputInstance = Instance;
2945     if (!Operand || !OrigLoop->contains(Operand) ||
2946         (Cost->isUniformAfterVectorization(Operand, State.VF)))
2947       InputInstance.Lane = 0;
2948     auto *NewOp = State.get(User.getOperand(op), InputInstance);
2949     Cloned->setOperand(op, NewOp);
2950   }
2951   addNewMetadata(Cloned, Instr);
2952 
2953   // Place the cloned scalar in the new loop.
2954   Builder.Insert(Cloned);
2955 
2956   State.set(Def, Instr, Cloned, Instance);
2957 
2958   // If we just cloned a new assumption, add it the assumption cache.
2959   if (auto *II = dyn_cast<IntrinsicInst>(Cloned))
2960     if (II->getIntrinsicID() == Intrinsic::assume)
2961       AC->registerAssumption(II);
2962 
2963   // End if-block.
2964   if (IfPredicateInstr)
2965     PredicatedInstructions.push_back(Cloned);
2966 }
2967 
2968 PHINode *InnerLoopVectorizer::createInductionVariable(Loop *L, Value *Start,
2969                                                       Value *End, Value *Step,
2970                                                       Instruction *DL) {
2971   BasicBlock *Header = L->getHeader();
2972   BasicBlock *Latch = L->getLoopLatch();
2973   // As we're just creating this loop, it's possible no latch exists
2974   // yet. If so, use the header as this will be a single block loop.
2975   if (!Latch)
2976     Latch = Header;
2977 
2978   IRBuilder<> Builder(&*Header->getFirstInsertionPt());
2979   Instruction *OldInst = getDebugLocFromInstOrOperands(OldInduction);
2980   setDebugLocFromInst(Builder, OldInst);
2981   auto *Induction = Builder.CreatePHI(Start->getType(), 2, "index");
2982 
2983   Builder.SetInsertPoint(Latch->getTerminator());
2984   setDebugLocFromInst(Builder, OldInst);
2985 
2986   // Create i+1 and fill the PHINode.
2987   Value *Next = Builder.CreateAdd(Induction, Step, "index.next");
2988   Induction->addIncoming(Start, L->getLoopPreheader());
2989   Induction->addIncoming(Next, Latch);
2990   // Create the compare.
2991   Value *ICmp = Builder.CreateICmpEQ(Next, End);
2992   Builder.CreateCondBr(ICmp, L->getUniqueExitBlock(), Header);
2993 
2994   // Now we have two terminators. Remove the old one from the block.
2995   Latch->getTerminator()->eraseFromParent();
2996 
2997   return Induction;
2998 }
2999 
3000 Value *InnerLoopVectorizer::getOrCreateTripCount(Loop *L) {
3001   if (TripCount)
3002     return TripCount;
3003 
3004   assert(L && "Create Trip Count for null loop.");
3005   IRBuilder<> Builder(L->getLoopPreheader()->getTerminator());
3006   // Find the loop boundaries.
3007   ScalarEvolution *SE = PSE.getSE();
3008   const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount();
3009   assert(!isa<SCEVCouldNotCompute>(BackedgeTakenCount) &&
3010          "Invalid loop count");
3011 
3012   Type *IdxTy = Legal->getWidestInductionType();
3013   assert(IdxTy && "No type for induction");
3014 
3015   // The exit count might have the type of i64 while the phi is i32. This can
3016   // happen if we have an induction variable that is sign extended before the
3017   // compare. The only way that we get a backedge taken count is that the
3018   // induction variable was signed and as such will not overflow. In such a case
3019   // truncation is legal.
3020   if (SE->getTypeSizeInBits(BackedgeTakenCount->getType()) >
3021       IdxTy->getPrimitiveSizeInBits())
3022     BackedgeTakenCount = SE->getTruncateOrNoop(BackedgeTakenCount, IdxTy);
3023   BackedgeTakenCount = SE->getNoopOrZeroExtend(BackedgeTakenCount, IdxTy);
3024 
3025   // Get the total trip count from the count by adding 1.
3026   const SCEV *ExitCount = SE->getAddExpr(
3027       BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType()));
3028 
3029   const DataLayout &DL = L->getHeader()->getModule()->getDataLayout();
3030 
3031   // Expand the trip count and place the new instructions in the preheader.
3032   // Notice that the pre-header does not change, only the loop body.
3033   SCEVExpander Exp(*SE, DL, "induction");
3034 
3035   // Count holds the overall loop count (N).
3036   TripCount = Exp.expandCodeFor(ExitCount, ExitCount->getType(),
3037                                 L->getLoopPreheader()->getTerminator());
3038 
3039   if (TripCount->getType()->isPointerTy())
3040     TripCount =
3041         CastInst::CreatePointerCast(TripCount, IdxTy, "exitcount.ptrcnt.to.int",
3042                                     L->getLoopPreheader()->getTerminator());
3043 
3044   return TripCount;
3045 }
3046 
3047 Value *InnerLoopVectorizer::getOrCreateVectorTripCount(Loop *L) {
3048   if (VectorTripCount)
3049     return VectorTripCount;
3050 
3051   Value *TC = getOrCreateTripCount(L);
3052   IRBuilder<> Builder(L->getLoopPreheader()->getTerminator());
3053 
3054   Type *Ty = TC->getType();
3055   // This is where we can make the step a runtime constant.
3056   Value *Step = createStepForVF(Builder, ConstantInt::get(Ty, UF), VF);
3057 
3058   // If the tail is to be folded by masking, round the number of iterations N
3059   // up to a multiple of Step instead of rounding down. This is done by first
3060   // adding Step-1 and then rounding down. Note that it's ok if this addition
3061   // overflows: the vector induction variable will eventually wrap to zero given
3062   // that it starts at zero and its Step is a power of two; the loop will then
3063   // exit, with the last early-exit vector comparison also producing all-true.
3064   if (Cost->foldTailByMasking()) {
3065     assert(isPowerOf2_32(VF.getKnownMinValue() * UF) &&
3066            "VF*UF must be a power of 2 when folding tail by masking");
3067     assert(!VF.isScalable() &&
3068            "Tail folding not yet supported for scalable vectors");
3069     TC = Builder.CreateAdd(
3070         TC, ConstantInt::get(Ty, VF.getKnownMinValue() * UF - 1), "n.rnd.up");
3071   }
3072 
3073   // Now we need to generate the expression for the part of the loop that the
3074   // vectorized body will execute. This is equal to N - (N % Step) if scalar
3075   // iterations are not required for correctness, or N - Step, otherwise. Step
3076   // is equal to the vectorization factor (number of SIMD elements) times the
3077   // unroll factor (number of SIMD instructions).
3078   Value *R = Builder.CreateURem(TC, Step, "n.mod.vf");
3079 
3080   // There are two cases where we need to ensure (at least) the last iteration
3081   // runs in the scalar remainder loop. Thus, if the step evenly divides
3082   // the trip count, we set the remainder to be equal to the step. If the step
3083   // does not evenly divide the trip count, no adjustment is necessary since
3084   // there will already be scalar iterations. Note that the minimum iterations
3085   // check ensures that N >= Step. The cases are:
3086   // 1) If there is a non-reversed interleaved group that may speculatively
3087   //    access memory out-of-bounds.
3088   // 2) If any instruction may follow a conditionally taken exit. That is, if
3089   //    the loop contains multiple exiting blocks, or a single exiting block
3090   //    which is not the latch.
3091   if (VF.isVector() && Cost->requiresScalarEpilogue()) {
3092     auto *IsZero = Builder.CreateICmpEQ(R, ConstantInt::get(R->getType(), 0));
3093     R = Builder.CreateSelect(IsZero, Step, R);
3094   }
3095 
3096   VectorTripCount = Builder.CreateSub(TC, R, "n.vec");
3097 
3098   return VectorTripCount;
3099 }
3100 
3101 Value *InnerLoopVectorizer::createBitOrPointerCast(Value *V, VectorType *DstVTy,
3102                                                    const DataLayout &DL) {
3103   // Verify that V is a vector type with same number of elements as DstVTy.
3104   auto *DstFVTy = cast<FixedVectorType>(DstVTy);
3105   unsigned VF = DstFVTy->getNumElements();
3106   auto *SrcVecTy = cast<FixedVectorType>(V->getType());
3107   assert((VF == SrcVecTy->getNumElements()) && "Vector dimensions do not match");
3108   Type *SrcElemTy = SrcVecTy->getElementType();
3109   Type *DstElemTy = DstFVTy->getElementType();
3110   assert((DL.getTypeSizeInBits(SrcElemTy) == DL.getTypeSizeInBits(DstElemTy)) &&
3111          "Vector elements must have same size");
3112 
3113   // Do a direct cast if element types are castable.
3114   if (CastInst::isBitOrNoopPointerCastable(SrcElemTy, DstElemTy, DL)) {
3115     return Builder.CreateBitOrPointerCast(V, DstFVTy);
3116   }
3117   // V cannot be directly casted to desired vector type.
3118   // May happen when V is a floating point vector but DstVTy is a vector of
3119   // pointers or vice-versa. Handle this using a two-step bitcast using an
3120   // intermediate Integer type for the bitcast i.e. Ptr <-> Int <-> Float.
3121   assert((DstElemTy->isPointerTy() != SrcElemTy->isPointerTy()) &&
3122          "Only one type should be a pointer type");
3123   assert((DstElemTy->isFloatingPointTy() != SrcElemTy->isFloatingPointTy()) &&
3124          "Only one type should be a floating point type");
3125   Type *IntTy =
3126       IntegerType::getIntNTy(V->getContext(), DL.getTypeSizeInBits(SrcElemTy));
3127   auto *VecIntTy = FixedVectorType::get(IntTy, VF);
3128   Value *CastVal = Builder.CreateBitOrPointerCast(V, VecIntTy);
3129   return Builder.CreateBitOrPointerCast(CastVal, DstFVTy);
3130 }
3131 
3132 void InnerLoopVectorizer::emitMinimumIterationCountCheck(Loop *L,
3133                                                          BasicBlock *Bypass) {
3134   Value *Count = getOrCreateTripCount(L);
3135   // Reuse existing vector loop preheader for TC checks.
3136   // Note that new preheader block is generated for vector loop.
3137   BasicBlock *const TCCheckBlock = LoopVectorPreHeader;
3138   IRBuilder<> Builder(TCCheckBlock->getTerminator());
3139 
3140   // Generate code to check if the loop's trip count is less than VF * UF, or
3141   // equal to it in case a scalar epilogue is required; this implies that the
3142   // vector trip count is zero. This check also covers the case where adding one
3143   // to the backedge-taken count overflowed leading to an incorrect trip count
3144   // of zero. In this case we will also jump to the scalar loop.
3145   auto P = Cost->requiresScalarEpilogue() ? ICmpInst::ICMP_ULE
3146                                           : ICmpInst::ICMP_ULT;
3147 
3148   // If tail is to be folded, vector loop takes care of all iterations.
3149   Value *CheckMinIters = Builder.getFalse();
3150   if (!Cost->foldTailByMasking()) {
3151     Value *Step =
3152         createStepForVF(Builder, ConstantInt::get(Count->getType(), UF), VF);
3153     CheckMinIters = Builder.CreateICmp(P, Count, Step, "min.iters.check");
3154   }
3155   // Create new preheader for vector loop.
3156   LoopVectorPreHeader =
3157       SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(), DT, LI, nullptr,
3158                  "vector.ph");
3159 
3160   assert(DT->properlyDominates(DT->getNode(TCCheckBlock),
3161                                DT->getNode(Bypass)->getIDom()) &&
3162          "TC check is expected to dominate Bypass");
3163 
3164   // Update dominator for Bypass & LoopExit.
3165   DT->changeImmediateDominator(Bypass, TCCheckBlock);
3166   DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock);
3167 
3168   ReplaceInstWithInst(
3169       TCCheckBlock->getTerminator(),
3170       BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters));
3171   LoopBypassBlocks.push_back(TCCheckBlock);
3172 }
3173 
3174 void InnerLoopVectorizer::emitSCEVChecks(Loop *L, BasicBlock *Bypass) {
3175   // Reuse existing vector loop preheader for SCEV checks.
3176   // Note that new preheader block is generated for vector loop.
3177   BasicBlock *const SCEVCheckBlock = LoopVectorPreHeader;
3178 
3179   // Generate the code to check that the SCEV assumptions that we made.
3180   // We want the new basic block to start at the first instruction in a
3181   // sequence of instructions that form a check.
3182   SCEVExpander Exp(*PSE.getSE(), Bypass->getModule()->getDataLayout(),
3183                    "scev.check");
3184   Value *SCEVCheck = Exp.expandCodeForPredicate(
3185       &PSE.getUnionPredicate(), SCEVCheckBlock->getTerminator());
3186 
3187   if (auto *C = dyn_cast<ConstantInt>(SCEVCheck))
3188     if (C->isZero())
3189       return;
3190 
3191   assert(!(SCEVCheckBlock->getParent()->hasOptSize() ||
3192            (OptForSizeBasedOnProfile &&
3193             Cost->Hints->getForce() != LoopVectorizeHints::FK_Enabled)) &&
3194          "Cannot SCEV check stride or overflow when optimizing for size");
3195 
3196   SCEVCheckBlock->setName("vector.scevcheck");
3197   // Create new preheader for vector loop.
3198   LoopVectorPreHeader =
3199       SplitBlock(SCEVCheckBlock, SCEVCheckBlock->getTerminator(), DT, LI,
3200                  nullptr, "vector.ph");
3201 
3202   // Update dominator only if this is first RT check.
3203   if (LoopBypassBlocks.empty()) {
3204     DT->changeImmediateDominator(Bypass, SCEVCheckBlock);
3205     DT->changeImmediateDominator(LoopExitBlock, SCEVCheckBlock);
3206   }
3207 
3208   ReplaceInstWithInst(
3209       SCEVCheckBlock->getTerminator(),
3210       BranchInst::Create(Bypass, LoopVectorPreHeader, SCEVCheck));
3211   LoopBypassBlocks.push_back(SCEVCheckBlock);
3212   AddedSafetyChecks = true;
3213 }
3214 
3215 void InnerLoopVectorizer::emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass) {
3216   // VPlan-native path does not do any analysis for runtime checks currently.
3217   if (EnableVPlanNativePath)
3218     return;
3219 
3220   // Reuse existing vector loop preheader for runtime memory checks.
3221   // Note that new preheader block is generated for vector loop.
3222   BasicBlock *const MemCheckBlock = L->getLoopPreheader();
3223 
3224   // Generate the code that checks in runtime if arrays overlap. We put the
3225   // checks into a separate block to make the more common case of few elements
3226   // faster.
3227   auto *LAI = Legal->getLAI();
3228   const auto &RtPtrChecking = *LAI->getRuntimePointerChecking();
3229   if (!RtPtrChecking.Need)
3230     return;
3231 
3232   if (MemCheckBlock->getParent()->hasOptSize() || OptForSizeBasedOnProfile) {
3233     assert(Cost->Hints->getForce() == LoopVectorizeHints::FK_Enabled &&
3234            "Cannot emit memory checks when optimizing for size, unless forced "
3235            "to vectorize.");
3236     ORE->emit([&]() {
3237       return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationCodeSize",
3238                                         L->getStartLoc(), L->getHeader())
3239              << "Code-size may be reduced by not forcing "
3240                 "vectorization, or by source-code modifications "
3241                 "eliminating the need for runtime checks "
3242                 "(e.g., adding 'restrict').";
3243     });
3244   }
3245 
3246   MemCheckBlock->setName("vector.memcheck");
3247   // Create new preheader for vector loop.
3248   LoopVectorPreHeader =
3249       SplitBlock(MemCheckBlock, MemCheckBlock->getTerminator(), DT, LI, nullptr,
3250                  "vector.ph");
3251 
3252   auto *CondBranch = cast<BranchInst>(
3253       Builder.CreateCondBr(Builder.getTrue(), Bypass, LoopVectorPreHeader));
3254   ReplaceInstWithInst(MemCheckBlock->getTerminator(), CondBranch);
3255   LoopBypassBlocks.push_back(MemCheckBlock);
3256   AddedSafetyChecks = true;
3257 
3258   // Update dominator only if this is first RT check.
3259   if (LoopBypassBlocks.empty()) {
3260     DT->changeImmediateDominator(Bypass, MemCheckBlock);
3261     DT->changeImmediateDominator(LoopExitBlock, MemCheckBlock);
3262   }
3263 
3264   Instruction *FirstCheckInst;
3265   Instruction *MemRuntimeCheck;
3266   SCEVExpander Exp(*PSE.getSE(), MemCheckBlock->getModule()->getDataLayout(),
3267                    "induction");
3268   std::tie(FirstCheckInst, MemRuntimeCheck) = addRuntimeChecks(
3269       MemCheckBlock->getTerminator(), OrigLoop, RtPtrChecking.getChecks(), Exp);
3270   assert(MemRuntimeCheck && "no RT checks generated although RtPtrChecking "
3271                             "claimed checks are required");
3272   CondBranch->setCondition(MemRuntimeCheck);
3273 
3274   // We currently don't use LoopVersioning for the actual loop cloning but we
3275   // still use it to add the noalias metadata.
3276   LVer = std::make_unique<LoopVersioning>(
3277       *Legal->getLAI(),
3278       Legal->getLAI()->getRuntimePointerChecking()->getChecks(), OrigLoop, LI,
3279       DT, PSE.getSE());
3280   LVer->prepareNoAliasMetadata();
3281 }
3282 
3283 Value *InnerLoopVectorizer::emitTransformedIndex(
3284     IRBuilder<> &B, Value *Index, ScalarEvolution *SE, const DataLayout &DL,
3285     const InductionDescriptor &ID) const {
3286 
3287   SCEVExpander Exp(*SE, DL, "induction");
3288   auto Step = ID.getStep();
3289   auto StartValue = ID.getStartValue();
3290   assert(Index->getType() == Step->getType() &&
3291          "Index type does not match StepValue type");
3292 
3293   // Note: the IR at this point is broken. We cannot use SE to create any new
3294   // SCEV and then expand it, hoping that SCEV's simplification will give us
3295   // a more optimal code. Unfortunately, attempt of doing so on invalid IR may
3296   // lead to various SCEV crashes. So all we can do is to use builder and rely
3297   // on InstCombine for future simplifications. Here we handle some trivial
3298   // cases only.
3299   auto CreateAdd = [&B](Value *X, Value *Y) {
3300     assert(X->getType() == Y->getType() && "Types don't match!");
3301     if (auto *CX = dyn_cast<ConstantInt>(X))
3302       if (CX->isZero())
3303         return Y;
3304     if (auto *CY = dyn_cast<ConstantInt>(Y))
3305       if (CY->isZero())
3306         return X;
3307     return B.CreateAdd(X, Y);
3308   };
3309 
3310   auto CreateMul = [&B](Value *X, Value *Y) {
3311     assert(X->getType() == Y->getType() && "Types don't match!");
3312     if (auto *CX = dyn_cast<ConstantInt>(X))
3313       if (CX->isOne())
3314         return Y;
3315     if (auto *CY = dyn_cast<ConstantInt>(Y))
3316       if (CY->isOne())
3317         return X;
3318     return B.CreateMul(X, Y);
3319   };
3320 
3321   // Get a suitable insert point for SCEV expansion. For blocks in the vector
3322   // loop, choose the end of the vector loop header (=LoopVectorBody), because
3323   // the DomTree is not kept up-to-date for additional blocks generated in the
3324   // vector loop. By using the header as insertion point, we guarantee that the
3325   // expanded instructions dominate all their uses.
3326   auto GetInsertPoint = [this, &B]() {
3327     BasicBlock *InsertBB = B.GetInsertPoint()->getParent();
3328     if (InsertBB != LoopVectorBody &&
3329         LI->getLoopFor(LoopVectorBody) == LI->getLoopFor(InsertBB))
3330       return LoopVectorBody->getTerminator();
3331     return &*B.GetInsertPoint();
3332   };
3333   switch (ID.getKind()) {
3334   case InductionDescriptor::IK_IntInduction: {
3335     assert(Index->getType() == StartValue->getType() &&
3336            "Index type does not match StartValue type");
3337     if (ID.getConstIntStepValue() && ID.getConstIntStepValue()->isMinusOne())
3338       return B.CreateSub(StartValue, Index);
3339     auto *Offset = CreateMul(
3340         Index, Exp.expandCodeFor(Step, Index->getType(), GetInsertPoint()));
3341     return CreateAdd(StartValue, Offset);
3342   }
3343   case InductionDescriptor::IK_PtrInduction: {
3344     assert(isa<SCEVConstant>(Step) &&
3345            "Expected constant step for pointer induction");
3346     return B.CreateGEP(
3347         StartValue->getType()->getPointerElementType(), StartValue,
3348         CreateMul(Index,
3349                   Exp.expandCodeFor(Step, Index->getType(), GetInsertPoint())));
3350   }
3351   case InductionDescriptor::IK_FpInduction: {
3352     assert(Step->getType()->isFloatingPointTy() && "Expected FP Step value");
3353     auto InductionBinOp = ID.getInductionBinOp();
3354     assert(InductionBinOp &&
3355            (InductionBinOp->getOpcode() == Instruction::FAdd ||
3356             InductionBinOp->getOpcode() == Instruction::FSub) &&
3357            "Original bin op should be defined for FP induction");
3358 
3359     Value *StepValue = cast<SCEVUnknown>(Step)->getValue();
3360 
3361     // Floating point operations had to be 'fast' to enable the induction.
3362     FastMathFlags Flags;
3363     Flags.setFast();
3364 
3365     Value *MulExp = B.CreateFMul(StepValue, Index);
3366     if (isa<Instruction>(MulExp))
3367       // We have to check, the MulExp may be a constant.
3368       cast<Instruction>(MulExp)->setFastMathFlags(Flags);
3369 
3370     Value *BOp = B.CreateBinOp(InductionBinOp->getOpcode(), StartValue, MulExp,
3371                                "induction");
3372     if (isa<Instruction>(BOp))
3373       cast<Instruction>(BOp)->setFastMathFlags(Flags);
3374 
3375     return BOp;
3376   }
3377   case InductionDescriptor::IK_NoInduction:
3378     return nullptr;
3379   }
3380   llvm_unreachable("invalid enum");
3381 }
3382 
3383 Loop *InnerLoopVectorizer::createVectorLoopSkeleton(StringRef Prefix) {
3384   LoopScalarBody = OrigLoop->getHeader();
3385   LoopVectorPreHeader = OrigLoop->getLoopPreheader();
3386   LoopExitBlock = OrigLoop->getUniqueExitBlock();
3387   assert(LoopExitBlock && "Must have an exit block");
3388   assert(LoopVectorPreHeader && "Invalid loop structure");
3389 
3390   LoopMiddleBlock =
3391       SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT,
3392                  LI, nullptr, Twine(Prefix) + "middle.block");
3393   LoopScalarPreHeader =
3394       SplitBlock(LoopMiddleBlock, LoopMiddleBlock->getTerminator(), DT, LI,
3395                  nullptr, Twine(Prefix) + "scalar.ph");
3396 
3397   // Set up branch from middle block to the exit and scalar preheader blocks.
3398   // completeLoopSkeleton will update the condition to use an iteration check,
3399   // if required to decide whether to execute the remainder.
3400   BranchInst *BrInst =
3401       BranchInst::Create(LoopExitBlock, LoopScalarPreHeader, Builder.getTrue());
3402   auto *ScalarLatchTerm = OrigLoop->getLoopLatch()->getTerminator();
3403   BrInst->setDebugLoc(ScalarLatchTerm->getDebugLoc());
3404   ReplaceInstWithInst(LoopMiddleBlock->getTerminator(), BrInst);
3405 
3406   // We intentionally don't let SplitBlock to update LoopInfo since
3407   // LoopVectorBody should belong to another loop than LoopVectorPreHeader.
3408   // LoopVectorBody is explicitly added to the correct place few lines later.
3409   LoopVectorBody =
3410       SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT,
3411                  nullptr, nullptr, Twine(Prefix) + "vector.body");
3412 
3413   // Update dominator for loop exit.
3414   DT->changeImmediateDominator(LoopExitBlock, LoopMiddleBlock);
3415 
3416   // Create and register the new vector loop.
3417   Loop *Lp = LI->AllocateLoop();
3418   Loop *ParentLoop = OrigLoop->getParentLoop();
3419 
3420   // Insert the new loop into the loop nest and register the new basic blocks
3421   // before calling any utilities such as SCEV that require valid LoopInfo.
3422   if (ParentLoop) {
3423     ParentLoop->addChildLoop(Lp);
3424   } else {
3425     LI->addTopLevelLoop(Lp);
3426   }
3427   Lp->addBasicBlockToLoop(LoopVectorBody, *LI);
3428   return Lp;
3429 }
3430 
3431 void InnerLoopVectorizer::createInductionResumeValues(
3432     Loop *L, Value *VectorTripCount,
3433     std::pair<BasicBlock *, Value *> AdditionalBypass) {
3434   assert(VectorTripCount && L && "Expected valid arguments");
3435   assert(((AdditionalBypass.first && AdditionalBypass.second) ||
3436           (!AdditionalBypass.first && !AdditionalBypass.second)) &&
3437          "Inconsistent information about additional bypass.");
3438   // We are going to resume the execution of the scalar loop.
3439   // Go over all of the induction variables that we found and fix the
3440   // PHIs that are left in the scalar version of the loop.
3441   // The starting values of PHI nodes depend on the counter of the last
3442   // iteration in the vectorized loop.
3443   // If we come from a bypass edge then we need to start from the original
3444   // start value.
3445   for (auto &InductionEntry : Legal->getInductionVars()) {
3446     PHINode *OrigPhi = InductionEntry.first;
3447     InductionDescriptor II = InductionEntry.second;
3448 
3449     // Create phi nodes to merge from the  backedge-taken check block.
3450     PHINode *BCResumeVal =
3451         PHINode::Create(OrigPhi->getType(), 3, "bc.resume.val",
3452                         LoopScalarPreHeader->getTerminator());
3453     // Copy original phi DL over to the new one.
3454     BCResumeVal->setDebugLoc(OrigPhi->getDebugLoc());
3455     Value *&EndValue = IVEndValues[OrigPhi];
3456     Value *EndValueFromAdditionalBypass = AdditionalBypass.second;
3457     if (OrigPhi == OldInduction) {
3458       // We know what the end value is.
3459       EndValue = VectorTripCount;
3460     } else {
3461       IRBuilder<> B(L->getLoopPreheader()->getTerminator());
3462       Type *StepType = II.getStep()->getType();
3463       Instruction::CastOps CastOp =
3464           CastInst::getCastOpcode(VectorTripCount, true, StepType, true);
3465       Value *CRD = B.CreateCast(CastOp, VectorTripCount, StepType, "cast.crd");
3466       const DataLayout &DL = LoopScalarBody->getModule()->getDataLayout();
3467       EndValue = emitTransformedIndex(B, CRD, PSE.getSE(), DL, II);
3468       EndValue->setName("ind.end");
3469 
3470       // Compute the end value for the additional bypass (if applicable).
3471       if (AdditionalBypass.first) {
3472         B.SetInsertPoint(&(*AdditionalBypass.first->getFirstInsertionPt()));
3473         CastOp = CastInst::getCastOpcode(AdditionalBypass.second, true,
3474                                          StepType, true);
3475         CRD =
3476             B.CreateCast(CastOp, AdditionalBypass.second, StepType, "cast.crd");
3477         EndValueFromAdditionalBypass =
3478             emitTransformedIndex(B, CRD, PSE.getSE(), DL, II);
3479         EndValueFromAdditionalBypass->setName("ind.end");
3480       }
3481     }
3482     // The new PHI merges the original incoming value, in case of a bypass,
3483     // or the value at the end of the vectorized loop.
3484     BCResumeVal->addIncoming(EndValue, LoopMiddleBlock);
3485 
3486     // Fix the scalar body counter (PHI node).
3487     // The old induction's phi node in the scalar body needs the truncated
3488     // value.
3489     for (BasicBlock *BB : LoopBypassBlocks)
3490       BCResumeVal->addIncoming(II.getStartValue(), BB);
3491 
3492     if (AdditionalBypass.first)
3493       BCResumeVal->setIncomingValueForBlock(AdditionalBypass.first,
3494                                             EndValueFromAdditionalBypass);
3495 
3496     OrigPhi->setIncomingValueForBlock(LoopScalarPreHeader, BCResumeVal);
3497   }
3498 }
3499 
3500 BasicBlock *InnerLoopVectorizer::completeLoopSkeleton(Loop *L,
3501                                                       MDNode *OrigLoopID) {
3502   assert(L && "Expected valid loop.");
3503 
3504   // The trip counts should be cached by now.
3505   Value *Count = getOrCreateTripCount(L);
3506   Value *VectorTripCount = getOrCreateVectorTripCount(L);
3507 
3508   auto *ScalarLatchTerm = OrigLoop->getLoopLatch()->getTerminator();
3509 
3510   // Add a check in the middle block to see if we have completed
3511   // all of the iterations in the first vector loop.
3512   // If (N - N%VF) == N, then we *don't* need to run the remainder.
3513   // If tail is to be folded, we know we don't need to run the remainder.
3514   if (!Cost->foldTailByMasking()) {
3515     Instruction *CmpN = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ,
3516                                         Count, VectorTripCount, "cmp.n",
3517                                         LoopMiddleBlock->getTerminator());
3518 
3519     // Here we use the same DebugLoc as the scalar loop latch terminator instead
3520     // of the corresponding compare because they may have ended up with
3521     // different line numbers and we want to avoid awkward line stepping while
3522     // debugging. Eg. if the compare has got a line number inside the loop.
3523     CmpN->setDebugLoc(ScalarLatchTerm->getDebugLoc());
3524     cast<BranchInst>(LoopMiddleBlock->getTerminator())->setCondition(CmpN);
3525   }
3526 
3527   // Get ready to start creating new instructions into the vectorized body.
3528   assert(LoopVectorPreHeader == L->getLoopPreheader() &&
3529          "Inconsistent vector loop preheader");
3530   Builder.SetInsertPoint(&*LoopVectorBody->getFirstInsertionPt());
3531 
3532   Optional<MDNode *> VectorizedLoopID =
3533       makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll,
3534                                       LLVMLoopVectorizeFollowupVectorized});
3535   if (VectorizedLoopID.hasValue()) {
3536     L->setLoopID(VectorizedLoopID.getValue());
3537 
3538     // Do not setAlreadyVectorized if loop attributes have been defined
3539     // explicitly.
3540     return LoopVectorPreHeader;
3541   }
3542 
3543   // Keep all loop hints from the original loop on the vector loop (we'll
3544   // replace the vectorizer-specific hints below).
3545   if (MDNode *LID = OrigLoop->getLoopID())
3546     L->setLoopID(LID);
3547 
3548   LoopVectorizeHints Hints(L, true, *ORE);
3549   Hints.setAlreadyVectorized();
3550 
3551 #ifdef EXPENSIVE_CHECKS
3552   assert(DT->verify(DominatorTree::VerificationLevel::Fast));
3553   LI->verify(*DT);
3554 #endif
3555 
3556   return LoopVectorPreHeader;
3557 }
3558 
3559 BasicBlock *InnerLoopVectorizer::createVectorizedLoopSkeleton() {
3560   /*
3561    In this function we generate a new loop. The new loop will contain
3562    the vectorized instructions while the old loop will continue to run the
3563    scalar remainder.
3564 
3565        [ ] <-- loop iteration number check.
3566     /   |
3567    /    v
3568   |    [ ] <-- vector loop bypass (may consist of multiple blocks).
3569   |  /  |
3570   | /   v
3571   ||   [ ]     <-- vector pre header.
3572   |/    |
3573   |     v
3574   |    [  ] \
3575   |    [  ]_|   <-- vector loop.
3576   |     |
3577   |     v
3578   |   -[ ]   <--- middle-block.
3579   |  /  |
3580   | /   v
3581   -|- >[ ]     <--- new preheader.
3582    |    |
3583    |    v
3584    |   [ ] \
3585    |   [ ]_|   <-- old scalar loop to handle remainder.
3586     \   |
3587      \  v
3588       >[ ]     <-- exit block.
3589    ...
3590    */
3591 
3592   // Get the metadata of the original loop before it gets modified.
3593   MDNode *OrigLoopID = OrigLoop->getLoopID();
3594 
3595   // Create an empty vector loop, and prepare basic blocks for the runtime
3596   // checks.
3597   Loop *Lp = createVectorLoopSkeleton("");
3598 
3599   // Now, compare the new count to zero. If it is zero skip the vector loop and
3600   // jump to the scalar loop. This check also covers the case where the
3601   // backedge-taken count is uint##_max: adding one to it will overflow leading
3602   // to an incorrect trip count of zero. In this (rare) case we will also jump
3603   // to the scalar loop.
3604   emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader);
3605 
3606   // Generate the code to check any assumptions that we've made for SCEV
3607   // expressions.
3608   emitSCEVChecks(Lp, LoopScalarPreHeader);
3609 
3610   // Generate the code that checks in runtime if arrays overlap. We put the
3611   // checks into a separate block to make the more common case of few elements
3612   // faster.
3613   emitMemRuntimeChecks(Lp, LoopScalarPreHeader);
3614 
3615   // Some loops have a single integer induction variable, while other loops
3616   // don't. One example is c++ iterators that often have multiple pointer
3617   // induction variables. In the code below we also support a case where we
3618   // don't have a single induction variable.
3619   //
3620   // We try to obtain an induction variable from the original loop as hard
3621   // as possible. However if we don't find one that:
3622   //   - is an integer
3623   //   - counts from zero, stepping by one
3624   //   - is the size of the widest induction variable type
3625   // then we create a new one.
3626   OldInduction = Legal->getPrimaryInduction();
3627   Type *IdxTy = Legal->getWidestInductionType();
3628   Value *StartIdx = ConstantInt::get(IdxTy, 0);
3629   // The loop step is equal to the vectorization factor (num of SIMD elements)
3630   // times the unroll factor (num of SIMD instructions).
3631   Builder.SetInsertPoint(&*Lp->getHeader()->getFirstInsertionPt());
3632   Value *Step = createStepForVF(Builder, ConstantInt::get(IdxTy, UF), VF);
3633   Value *CountRoundDown = getOrCreateVectorTripCount(Lp);
3634   Induction =
3635       createInductionVariable(Lp, StartIdx, CountRoundDown, Step,
3636                               getDebugLocFromInstOrOperands(OldInduction));
3637 
3638   // Emit phis for the new starting index of the scalar loop.
3639   createInductionResumeValues(Lp, CountRoundDown);
3640 
3641   return completeLoopSkeleton(Lp, OrigLoopID);
3642 }
3643 
3644 // Fix up external users of the induction variable. At this point, we are
3645 // in LCSSA form, with all external PHIs that use the IV having one input value,
3646 // coming from the remainder loop. We need those PHIs to also have a correct
3647 // value for the IV when arriving directly from the middle block.
3648 void InnerLoopVectorizer::fixupIVUsers(PHINode *OrigPhi,
3649                                        const InductionDescriptor &II,
3650                                        Value *CountRoundDown, Value *EndValue,
3651                                        BasicBlock *MiddleBlock) {
3652   // There are two kinds of external IV usages - those that use the value
3653   // computed in the last iteration (the PHI) and those that use the penultimate
3654   // value (the value that feeds into the phi from the loop latch).
3655   // We allow both, but they, obviously, have different values.
3656 
3657   assert(OrigLoop->getUniqueExitBlock() && "Expected a single exit block");
3658 
3659   DenseMap<Value *, Value *> MissingVals;
3660 
3661   // An external user of the last iteration's value should see the value that
3662   // the remainder loop uses to initialize its own IV.
3663   Value *PostInc = OrigPhi->getIncomingValueForBlock(OrigLoop->getLoopLatch());
3664   for (User *U : PostInc->users()) {
3665     Instruction *UI = cast<Instruction>(U);
3666     if (!OrigLoop->contains(UI)) {
3667       assert(isa<PHINode>(UI) && "Expected LCSSA form");
3668       MissingVals[UI] = EndValue;
3669     }
3670   }
3671 
3672   // An external user of the penultimate value need to see EndValue - Step.
3673   // The simplest way to get this is to recompute it from the constituent SCEVs,
3674   // that is Start + (Step * (CRD - 1)).
3675   for (User *U : OrigPhi->users()) {
3676     auto *UI = cast<Instruction>(U);
3677     if (!OrigLoop->contains(UI)) {
3678       const DataLayout &DL =
3679           OrigLoop->getHeader()->getModule()->getDataLayout();
3680       assert(isa<PHINode>(UI) && "Expected LCSSA form");
3681 
3682       IRBuilder<> B(MiddleBlock->getTerminator());
3683       Value *CountMinusOne = B.CreateSub(
3684           CountRoundDown, ConstantInt::get(CountRoundDown->getType(), 1));
3685       Value *CMO =
3686           !II.getStep()->getType()->isIntegerTy()
3687               ? B.CreateCast(Instruction::SIToFP, CountMinusOne,
3688                              II.getStep()->getType())
3689               : B.CreateSExtOrTrunc(CountMinusOne, II.getStep()->getType());
3690       CMO->setName("cast.cmo");
3691       Value *Escape = emitTransformedIndex(B, CMO, PSE.getSE(), DL, II);
3692       Escape->setName("ind.escape");
3693       MissingVals[UI] = Escape;
3694     }
3695   }
3696 
3697   for (auto &I : MissingVals) {
3698     PHINode *PHI = cast<PHINode>(I.first);
3699     // One corner case we have to handle is two IVs "chasing" each-other,
3700     // that is %IV2 = phi [...], [ %IV1, %latch ]
3701     // In this case, if IV1 has an external use, we need to avoid adding both
3702     // "last value of IV1" and "penultimate value of IV2". So, verify that we
3703     // don't already have an incoming value for the middle block.
3704     if (PHI->getBasicBlockIndex(MiddleBlock) == -1)
3705       PHI->addIncoming(I.second, MiddleBlock);
3706   }
3707 }
3708 
3709 namespace {
3710 
3711 struct CSEDenseMapInfo {
3712   static bool canHandle(const Instruction *I) {
3713     return isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) ||
3714            isa<ShuffleVectorInst>(I) || isa<GetElementPtrInst>(I);
3715   }
3716 
3717   static inline Instruction *getEmptyKey() {
3718     return DenseMapInfo<Instruction *>::getEmptyKey();
3719   }
3720 
3721   static inline Instruction *getTombstoneKey() {
3722     return DenseMapInfo<Instruction *>::getTombstoneKey();
3723   }
3724 
3725   static unsigned getHashValue(const Instruction *I) {
3726     assert(canHandle(I) && "Unknown instruction!");
3727     return hash_combine(I->getOpcode(), hash_combine_range(I->value_op_begin(),
3728                                                            I->value_op_end()));
3729   }
3730 
3731   static bool isEqual(const Instruction *LHS, const Instruction *RHS) {
3732     if (LHS == getEmptyKey() || RHS == getEmptyKey() ||
3733         LHS == getTombstoneKey() || RHS == getTombstoneKey())
3734       return LHS == RHS;
3735     return LHS->isIdenticalTo(RHS);
3736   }
3737 };
3738 
3739 } // end anonymous namespace
3740 
3741 ///Perform cse of induction variable instructions.
3742 static void cse(BasicBlock *BB) {
3743   // Perform simple cse.
3744   SmallDenseMap<Instruction *, Instruction *, 4, CSEDenseMapInfo> CSEMap;
3745   for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) {
3746     Instruction *In = &*I++;
3747 
3748     if (!CSEDenseMapInfo::canHandle(In))
3749       continue;
3750 
3751     // Check if we can replace this instruction with any of the
3752     // visited instructions.
3753     if (Instruction *V = CSEMap.lookup(In)) {
3754       In->replaceAllUsesWith(V);
3755       In->eraseFromParent();
3756       continue;
3757     }
3758 
3759     CSEMap[In] = In;
3760   }
3761 }
3762 
3763 InstructionCost
3764 LoopVectorizationCostModel::getVectorCallCost(CallInst *CI, ElementCount VF,
3765                                               bool &NeedToScalarize) {
3766   Function *F = CI->getCalledFunction();
3767   Type *ScalarRetTy = CI->getType();
3768   SmallVector<Type *, 4> Tys, ScalarTys;
3769   for (auto &ArgOp : CI->arg_operands())
3770     ScalarTys.push_back(ArgOp->getType());
3771 
3772   // Estimate cost of scalarized vector call. The source operands are assumed
3773   // to be vectors, so we need to extract individual elements from there,
3774   // execute VF scalar calls, and then gather the result into the vector return
3775   // value.
3776   InstructionCost ScalarCallCost =
3777       TTI.getCallInstrCost(F, ScalarRetTy, ScalarTys, TTI::TCK_RecipThroughput);
3778   if (VF.isScalar())
3779     return ScalarCallCost;
3780 
3781   // Compute corresponding vector type for return value and arguments.
3782   Type *RetTy = ToVectorTy(ScalarRetTy, VF);
3783   for (Type *ScalarTy : ScalarTys)
3784     Tys.push_back(ToVectorTy(ScalarTy, VF));
3785 
3786   // Compute costs of unpacking argument values for the scalar calls and
3787   // packing the return values to a vector.
3788   InstructionCost ScalarizationCost = getScalarizationOverhead(CI, VF);
3789 
3790   InstructionCost Cost =
3791       ScalarCallCost * VF.getKnownMinValue() + ScalarizationCost;
3792 
3793   // If we can't emit a vector call for this function, then the currently found
3794   // cost is the cost we need to return.
3795   NeedToScalarize = true;
3796   VFShape Shape = VFShape::get(*CI, VF, false /*HasGlobalPred*/);
3797   Function *VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape);
3798 
3799   if (!TLI || CI->isNoBuiltin() || !VecFunc)
3800     return Cost;
3801 
3802   // If the corresponding vector cost is cheaper, return its cost.
3803   InstructionCost VectorCallCost =
3804       TTI.getCallInstrCost(nullptr, RetTy, Tys, TTI::TCK_RecipThroughput);
3805   if (VectorCallCost < Cost) {
3806     NeedToScalarize = false;
3807     Cost = VectorCallCost;
3808   }
3809   return Cost;
3810 }
3811 
3812 InstructionCost
3813 LoopVectorizationCostModel::getVectorIntrinsicCost(CallInst *CI,
3814                                                    ElementCount VF) {
3815   Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
3816   assert(ID && "Expected intrinsic call!");
3817 
3818   IntrinsicCostAttributes CostAttrs(ID, *CI, VF);
3819   return TTI.getIntrinsicInstrCost(CostAttrs,
3820                                    TargetTransformInfo::TCK_RecipThroughput);
3821 }
3822 
3823 static Type *smallestIntegerVectorType(Type *T1, Type *T2) {
3824   auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType());
3825   auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType());
3826   return I1->getBitWidth() < I2->getBitWidth() ? T1 : T2;
3827 }
3828 
3829 static Type *largestIntegerVectorType(Type *T1, Type *T2) {
3830   auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType());
3831   auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType());
3832   return I1->getBitWidth() > I2->getBitWidth() ? T1 : T2;
3833 }
3834 
3835 void InnerLoopVectorizer::truncateToMinimalBitwidths() {
3836   // For every instruction `I` in MinBWs, truncate the operands, create a
3837   // truncated version of `I` and reextend its result. InstCombine runs
3838   // later and will remove any ext/trunc pairs.
3839   SmallPtrSet<Value *, 4> Erased;
3840   for (const auto &KV : Cost->getMinimalBitwidths()) {
3841     // If the value wasn't vectorized, we must maintain the original scalar
3842     // type. The absence of the value from VectorLoopValueMap indicates that it
3843     // wasn't vectorized.
3844     if (!VectorLoopValueMap.hasAnyVectorValue(KV.first))
3845       continue;
3846     for (unsigned Part = 0; Part < UF; ++Part) {
3847       Value *I = getOrCreateVectorValue(KV.first, Part);
3848       if (Erased.count(I) || I->use_empty() || !isa<Instruction>(I))
3849         continue;
3850       Type *OriginalTy = I->getType();
3851       Type *ScalarTruncatedTy =
3852           IntegerType::get(OriginalTy->getContext(), KV.second);
3853       auto *TruncatedTy = FixedVectorType::get(
3854           ScalarTruncatedTy,
3855           cast<FixedVectorType>(OriginalTy)->getNumElements());
3856       if (TruncatedTy == OriginalTy)
3857         continue;
3858 
3859       IRBuilder<> B(cast<Instruction>(I));
3860       auto ShrinkOperand = [&](Value *V) -> Value * {
3861         if (auto *ZI = dyn_cast<ZExtInst>(V))
3862           if (ZI->getSrcTy() == TruncatedTy)
3863             return ZI->getOperand(0);
3864         return B.CreateZExtOrTrunc(V, TruncatedTy);
3865       };
3866 
3867       // The actual instruction modification depends on the instruction type,
3868       // unfortunately.
3869       Value *NewI = nullptr;
3870       if (auto *BO = dyn_cast<BinaryOperator>(I)) {
3871         NewI = B.CreateBinOp(BO->getOpcode(), ShrinkOperand(BO->getOperand(0)),
3872                              ShrinkOperand(BO->getOperand(1)));
3873 
3874         // Any wrapping introduced by shrinking this operation shouldn't be
3875         // considered undefined behavior. So, we can't unconditionally copy
3876         // arithmetic wrapping flags to NewI.
3877         cast<BinaryOperator>(NewI)->copyIRFlags(I, /*IncludeWrapFlags=*/false);
3878       } else if (auto *CI = dyn_cast<ICmpInst>(I)) {
3879         NewI =
3880             B.CreateICmp(CI->getPredicate(), ShrinkOperand(CI->getOperand(0)),
3881                          ShrinkOperand(CI->getOperand(1)));
3882       } else if (auto *SI = dyn_cast<SelectInst>(I)) {
3883         NewI = B.CreateSelect(SI->getCondition(),
3884                               ShrinkOperand(SI->getTrueValue()),
3885                               ShrinkOperand(SI->getFalseValue()));
3886       } else if (auto *CI = dyn_cast<CastInst>(I)) {
3887         switch (CI->getOpcode()) {
3888         default:
3889           llvm_unreachable("Unhandled cast!");
3890         case Instruction::Trunc:
3891           NewI = ShrinkOperand(CI->getOperand(0));
3892           break;
3893         case Instruction::SExt:
3894           NewI = B.CreateSExtOrTrunc(
3895               CI->getOperand(0),
3896               smallestIntegerVectorType(OriginalTy, TruncatedTy));
3897           break;
3898         case Instruction::ZExt:
3899           NewI = B.CreateZExtOrTrunc(
3900               CI->getOperand(0),
3901               smallestIntegerVectorType(OriginalTy, TruncatedTy));
3902           break;
3903         }
3904       } else if (auto *SI = dyn_cast<ShuffleVectorInst>(I)) {
3905         auto Elements0 = cast<FixedVectorType>(SI->getOperand(0)->getType())
3906                              ->getNumElements();
3907         auto *O0 = B.CreateZExtOrTrunc(
3908             SI->getOperand(0),
3909             FixedVectorType::get(ScalarTruncatedTy, Elements0));
3910         auto Elements1 = cast<FixedVectorType>(SI->getOperand(1)->getType())
3911                              ->getNumElements();
3912         auto *O1 = B.CreateZExtOrTrunc(
3913             SI->getOperand(1),
3914             FixedVectorType::get(ScalarTruncatedTy, Elements1));
3915 
3916         NewI = B.CreateShuffleVector(O0, O1, SI->getShuffleMask());
3917       } else if (isa<LoadInst>(I) || isa<PHINode>(I)) {
3918         // Don't do anything with the operands, just extend the result.
3919         continue;
3920       } else if (auto *IE = dyn_cast<InsertElementInst>(I)) {
3921         auto Elements = cast<FixedVectorType>(IE->getOperand(0)->getType())
3922                             ->getNumElements();
3923         auto *O0 = B.CreateZExtOrTrunc(
3924             IE->getOperand(0),
3925             FixedVectorType::get(ScalarTruncatedTy, Elements));
3926         auto *O1 = B.CreateZExtOrTrunc(IE->getOperand(1), ScalarTruncatedTy);
3927         NewI = B.CreateInsertElement(O0, O1, IE->getOperand(2));
3928       } else if (auto *EE = dyn_cast<ExtractElementInst>(I)) {
3929         auto Elements = cast<FixedVectorType>(EE->getOperand(0)->getType())
3930                             ->getNumElements();
3931         auto *O0 = B.CreateZExtOrTrunc(
3932             EE->getOperand(0),
3933             FixedVectorType::get(ScalarTruncatedTy, Elements));
3934         NewI = B.CreateExtractElement(O0, EE->getOperand(2));
3935       } else {
3936         // If we don't know what to do, be conservative and don't do anything.
3937         continue;
3938       }
3939 
3940       // Lastly, extend the result.
3941       NewI->takeName(cast<Instruction>(I));
3942       Value *Res = B.CreateZExtOrTrunc(NewI, OriginalTy);
3943       I->replaceAllUsesWith(Res);
3944       cast<Instruction>(I)->eraseFromParent();
3945       Erased.insert(I);
3946       VectorLoopValueMap.resetVectorValue(KV.first, Part, Res);
3947     }
3948   }
3949 
3950   // We'll have created a bunch of ZExts that are now parentless. Clean up.
3951   for (const auto &KV : Cost->getMinimalBitwidths()) {
3952     // If the value wasn't vectorized, we must maintain the original scalar
3953     // type. The absence of the value from VectorLoopValueMap indicates that it
3954     // wasn't vectorized.
3955     if (!VectorLoopValueMap.hasAnyVectorValue(KV.first))
3956       continue;
3957     for (unsigned Part = 0; Part < UF; ++Part) {
3958       Value *I = getOrCreateVectorValue(KV.first, Part);
3959       ZExtInst *Inst = dyn_cast<ZExtInst>(I);
3960       if (Inst && Inst->use_empty()) {
3961         Value *NewI = Inst->getOperand(0);
3962         Inst->eraseFromParent();
3963         VectorLoopValueMap.resetVectorValue(KV.first, Part, NewI);
3964       }
3965     }
3966   }
3967 }
3968 
3969 void InnerLoopVectorizer::fixVectorizedLoop(VPTransformState &State) {
3970   // Insert truncates and extends for any truncated instructions as hints to
3971   // InstCombine.
3972   if (VF.isVector())
3973     truncateToMinimalBitwidths();
3974 
3975   // Fix widened non-induction PHIs by setting up the PHI operands.
3976   if (OrigPHIsToFix.size()) {
3977     assert(EnableVPlanNativePath &&
3978            "Unexpected non-induction PHIs for fixup in non VPlan-native path");
3979     fixNonInductionPHIs(State);
3980   }
3981 
3982   // At this point every instruction in the original loop is widened to a
3983   // vector form. Now we need to fix the recurrences in the loop. These PHI
3984   // nodes are currently empty because we did not want to introduce cycles.
3985   // This is the second stage of vectorizing recurrences.
3986   fixCrossIterationPHIs(State);
3987 
3988   // Forget the original basic block.
3989   PSE.getSE()->forgetLoop(OrigLoop);
3990 
3991   // Fix-up external users of the induction variables.
3992   for (auto &Entry : Legal->getInductionVars())
3993     fixupIVUsers(Entry.first, Entry.second,
3994                  getOrCreateVectorTripCount(LI->getLoopFor(LoopVectorBody)),
3995                  IVEndValues[Entry.first], LoopMiddleBlock);
3996 
3997   fixLCSSAPHIs(State);
3998   for (Instruction *PI : PredicatedInstructions)
3999     sinkScalarOperands(&*PI);
4000 
4001   // Remove redundant induction instructions.
4002   cse(LoopVectorBody);
4003 
4004   // Set/update profile weights for the vector and remainder loops as original
4005   // loop iterations are now distributed among them. Note that original loop
4006   // represented by LoopScalarBody becomes remainder loop after vectorization.
4007   //
4008   // For cases like foldTailByMasking() and requiresScalarEpiloque() we may
4009   // end up getting slightly roughened result but that should be OK since
4010   // profile is not inherently precise anyway. Note also possible bypass of
4011   // vector code caused by legality checks is ignored, assigning all the weight
4012   // to the vector loop, optimistically.
4013   //
4014   // For scalable vectorization we can't know at compile time how many iterations
4015   // of the loop are handled in one vector iteration, so instead assume a pessimistic
4016   // vscale of '1'.
4017   setProfileInfoAfterUnrolling(
4018       LI->getLoopFor(LoopScalarBody), LI->getLoopFor(LoopVectorBody),
4019       LI->getLoopFor(LoopScalarBody), VF.getKnownMinValue() * UF);
4020 }
4021 
4022 void InnerLoopVectorizer::fixCrossIterationPHIs(VPTransformState &State) {
4023   // In order to support recurrences we need to be able to vectorize Phi nodes.
4024   // Phi nodes have cycles, so we need to vectorize them in two stages. This is
4025   // stage #2: We now need to fix the recurrences by adding incoming edges to
4026   // the currently empty PHI nodes. At this point every instruction in the
4027   // original loop is widened to a vector form so we can use them to construct
4028   // the incoming edges.
4029   for (PHINode &Phi : OrigLoop->getHeader()->phis()) {
4030     // Handle first-order recurrences and reductions that need to be fixed.
4031     if (Legal->isFirstOrderRecurrence(&Phi))
4032       fixFirstOrderRecurrence(&Phi, State);
4033     else if (Legal->isReductionVariable(&Phi))
4034       fixReduction(&Phi, State);
4035   }
4036 }
4037 
4038 void InnerLoopVectorizer::fixFirstOrderRecurrence(PHINode *Phi,
4039                                                   VPTransformState &State) {
4040   // This is the second phase of vectorizing first-order recurrences. An
4041   // overview of the transformation is described below. Suppose we have the
4042   // following loop.
4043   //
4044   //   for (int i = 0; i < n; ++i)
4045   //     b[i] = a[i] - a[i - 1];
4046   //
4047   // There is a first-order recurrence on "a". For this loop, the shorthand
4048   // scalar IR looks like:
4049   //
4050   //   scalar.ph:
4051   //     s_init = a[-1]
4052   //     br scalar.body
4053   //
4054   //   scalar.body:
4055   //     i = phi [0, scalar.ph], [i+1, scalar.body]
4056   //     s1 = phi [s_init, scalar.ph], [s2, scalar.body]
4057   //     s2 = a[i]
4058   //     b[i] = s2 - s1
4059   //     br cond, scalar.body, ...
4060   //
4061   // In this example, s1 is a recurrence because it's value depends on the
4062   // previous iteration. In the first phase of vectorization, we created a
4063   // temporary value for s1. We now complete the vectorization and produce the
4064   // shorthand vector IR shown below (for VF = 4, UF = 1).
4065   //
4066   //   vector.ph:
4067   //     v_init = vector(..., ..., ..., a[-1])
4068   //     br vector.body
4069   //
4070   //   vector.body
4071   //     i = phi [0, vector.ph], [i+4, vector.body]
4072   //     v1 = phi [v_init, vector.ph], [v2, vector.body]
4073   //     v2 = a[i, i+1, i+2, i+3];
4074   //     v3 = vector(v1(3), v2(0, 1, 2))
4075   //     b[i, i+1, i+2, i+3] = v2 - v3
4076   //     br cond, vector.body, middle.block
4077   //
4078   //   middle.block:
4079   //     x = v2(3)
4080   //     br scalar.ph
4081   //
4082   //   scalar.ph:
4083   //     s_init = phi [x, middle.block], [a[-1], otherwise]
4084   //     br scalar.body
4085   //
4086   // After execution completes the vector loop, we extract the next value of
4087   // the recurrence (x) to use as the initial value in the scalar loop.
4088 
4089   // Get the original loop preheader and single loop latch.
4090   auto *Preheader = OrigLoop->getLoopPreheader();
4091   auto *Latch = OrigLoop->getLoopLatch();
4092 
4093   // Get the initial and previous values of the scalar recurrence.
4094   auto *ScalarInit = Phi->getIncomingValueForBlock(Preheader);
4095   auto *Previous = Phi->getIncomingValueForBlock(Latch);
4096 
4097   // Create a vector from the initial value.
4098   auto *VectorInit = ScalarInit;
4099   if (VF.isVector()) {
4100     Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
4101     assert(!VF.isScalable() && "VF is assumed to be non scalable.");
4102     VectorInit = Builder.CreateInsertElement(
4103         PoisonValue::get(VectorType::get(VectorInit->getType(), VF)), VectorInit,
4104         Builder.getInt32(VF.getKnownMinValue() - 1), "vector.recur.init");
4105   }
4106 
4107   VPValue *PhiDef = State.Plan->getVPValue(Phi);
4108   VPValue *PreviousDef = State.Plan->getVPValue(Previous);
4109   // We constructed a temporary phi node in the first phase of vectorization.
4110   // This phi node will eventually be deleted.
4111   Builder.SetInsertPoint(cast<Instruction>(State.get(PhiDef, 0)));
4112 
4113   // Create a phi node for the new recurrence. The current value will either be
4114   // the initial value inserted into a vector or loop-varying vector value.
4115   auto *VecPhi = Builder.CreatePHI(VectorInit->getType(), 2, "vector.recur");
4116   VecPhi->addIncoming(VectorInit, LoopVectorPreHeader);
4117 
4118   // Get the vectorized previous value of the last part UF - 1. It appears last
4119   // among all unrolled iterations, due to the order of their construction.
4120   Value *PreviousLastPart = State.get(PreviousDef, UF - 1);
4121 
4122   // Find and set the insertion point after the previous value if it is an
4123   // instruction.
4124   BasicBlock::iterator InsertPt;
4125   // Note that the previous value may have been constant-folded so it is not
4126   // guaranteed to be an instruction in the vector loop.
4127   // FIXME: Loop invariant values do not form recurrences. We should deal with
4128   //        them earlier.
4129   if (LI->getLoopFor(LoopVectorBody)->isLoopInvariant(PreviousLastPart))
4130     InsertPt = LoopVectorBody->getFirstInsertionPt();
4131   else {
4132     Instruction *PreviousInst = cast<Instruction>(PreviousLastPart);
4133     if (isa<PHINode>(PreviousLastPart))
4134       // If the previous value is a phi node, we should insert after all the phi
4135       // nodes in the block containing the PHI to avoid breaking basic block
4136       // verification. Note that the basic block may be different to
4137       // LoopVectorBody, in case we predicate the loop.
4138       InsertPt = PreviousInst->getParent()->getFirstInsertionPt();
4139     else
4140       InsertPt = ++PreviousInst->getIterator();
4141   }
4142   Builder.SetInsertPoint(&*InsertPt);
4143 
4144   // We will construct a vector for the recurrence by combining the values for
4145   // the current and previous iterations. This is the required shuffle mask.
4146   assert(!VF.isScalable());
4147   SmallVector<int, 8> ShuffleMask(VF.getKnownMinValue());
4148   ShuffleMask[0] = VF.getKnownMinValue() - 1;
4149   for (unsigned I = 1; I < VF.getKnownMinValue(); ++I)
4150     ShuffleMask[I] = I + VF.getKnownMinValue() - 1;
4151 
4152   // The vector from which to take the initial value for the current iteration
4153   // (actual or unrolled). Initially, this is the vector phi node.
4154   Value *Incoming = VecPhi;
4155 
4156   // Shuffle the current and previous vector and update the vector parts.
4157   for (unsigned Part = 0; Part < UF; ++Part) {
4158     Value *PreviousPart = State.get(PreviousDef, Part);
4159     Value *PhiPart = State.get(PhiDef, Part);
4160     auto *Shuffle =
4161         VF.isVector()
4162             ? Builder.CreateShuffleVector(Incoming, PreviousPart, ShuffleMask)
4163             : Incoming;
4164     PhiPart->replaceAllUsesWith(Shuffle);
4165     cast<Instruction>(PhiPart)->eraseFromParent();
4166     State.reset(PhiDef, Phi, Shuffle, Part);
4167     Incoming = PreviousPart;
4168   }
4169 
4170   // Fix the latch value of the new recurrence in the vector loop.
4171   VecPhi->addIncoming(Incoming, LI->getLoopFor(LoopVectorBody)->getLoopLatch());
4172 
4173   // Extract the last vector element in the middle block. This will be the
4174   // initial value for the recurrence when jumping to the scalar loop.
4175   auto *ExtractForScalar = Incoming;
4176   if (VF.isVector()) {
4177     Builder.SetInsertPoint(LoopMiddleBlock->getTerminator());
4178     ExtractForScalar = Builder.CreateExtractElement(
4179         ExtractForScalar, Builder.getInt32(VF.getKnownMinValue() - 1),
4180         "vector.recur.extract");
4181   }
4182   // Extract the second last element in the middle block if the
4183   // Phi is used outside the loop. We need to extract the phi itself
4184   // and not the last element (the phi update in the current iteration). This
4185   // will be the value when jumping to the exit block from the LoopMiddleBlock,
4186   // when the scalar loop is not run at all.
4187   Value *ExtractForPhiUsedOutsideLoop = nullptr;
4188   if (VF.isVector())
4189     ExtractForPhiUsedOutsideLoop = Builder.CreateExtractElement(
4190         Incoming, Builder.getInt32(VF.getKnownMinValue() - 2),
4191         "vector.recur.extract.for.phi");
4192   // When loop is unrolled without vectorizing, initialize
4193   // ExtractForPhiUsedOutsideLoop with the value just prior to unrolled value of
4194   // `Incoming`. This is analogous to the vectorized case above: extracting the
4195   // second last element when VF > 1.
4196   else if (UF > 1)
4197     ExtractForPhiUsedOutsideLoop = State.get(PreviousDef, UF - 2);
4198 
4199   // Fix the initial value of the original recurrence in the scalar loop.
4200   Builder.SetInsertPoint(&*LoopScalarPreHeader->begin());
4201   auto *Start = Builder.CreatePHI(Phi->getType(), 2, "scalar.recur.init");
4202   for (auto *BB : predecessors(LoopScalarPreHeader)) {
4203     auto *Incoming = BB == LoopMiddleBlock ? ExtractForScalar : ScalarInit;
4204     Start->addIncoming(Incoming, BB);
4205   }
4206 
4207   Phi->setIncomingValueForBlock(LoopScalarPreHeader, Start);
4208   Phi->setName("scalar.recur");
4209 
4210   // Finally, fix users of the recurrence outside the loop. The users will need
4211   // either the last value of the scalar recurrence or the last value of the
4212   // vector recurrence we extracted in the middle block. Since the loop is in
4213   // LCSSA form, we just need to find all the phi nodes for the original scalar
4214   // recurrence in the exit block, and then add an edge for the middle block.
4215   // Note that LCSSA does not imply single entry when the original scalar loop
4216   // had multiple exiting edges (as we always run the last iteration in the
4217   // scalar epilogue); in that case, the exiting path through middle will be
4218   // dynamically dead and the value picked for the phi doesn't matter.
4219   for (PHINode &LCSSAPhi : LoopExitBlock->phis())
4220     if (any_of(LCSSAPhi.incoming_values(),
4221                [Phi](Value *V) { return V == Phi; }))
4222       LCSSAPhi.addIncoming(ExtractForPhiUsedOutsideLoop, LoopMiddleBlock);
4223 }
4224 
4225 void InnerLoopVectorizer::fixReduction(PHINode *Phi, VPTransformState &State) {
4226   // Get it's reduction variable descriptor.
4227   assert(Legal->isReductionVariable(Phi) &&
4228          "Unable to find the reduction variable");
4229   RecurrenceDescriptor RdxDesc = Legal->getReductionVars()[Phi];
4230 
4231   RecurKind RK = RdxDesc.getRecurrenceKind();
4232   TrackingVH<Value> ReductionStartValue = RdxDesc.getRecurrenceStartValue();
4233   Instruction *LoopExitInst = RdxDesc.getLoopExitInstr();
4234   setDebugLocFromInst(Builder, ReductionStartValue);
4235   bool IsInLoopReductionPhi = Cost->isInLoopReduction(Phi);
4236 
4237   VPValue *LoopExitInstDef = State.Plan->getVPValue(LoopExitInst);
4238   // This is the vector-clone of the value that leaves the loop.
4239   Type *VecTy = State.get(LoopExitInstDef, 0)->getType();
4240 
4241   // Wrap flags are in general invalid after vectorization, clear them.
4242   clearReductionWrapFlags(RdxDesc);
4243 
4244   // Fix the vector-loop phi.
4245 
4246   // Reductions do not have to start at zero. They can start with
4247   // any loop invariant values.
4248   BasicBlock *Latch = OrigLoop->getLoopLatch();
4249   Value *LoopVal = Phi->getIncomingValueForBlock(Latch);
4250 
4251   for (unsigned Part = 0; Part < UF; ++Part) {
4252     Value *VecRdxPhi = State.get(State.Plan->getVPValue(Phi), Part);
4253     Value *Val = State.get(State.Plan->getVPValue(LoopVal), Part);
4254     cast<PHINode>(VecRdxPhi)
4255       ->addIncoming(Val, LI->getLoopFor(LoopVectorBody)->getLoopLatch());
4256   }
4257 
4258   // Before each round, move the insertion point right between
4259   // the PHIs and the values we are going to write.
4260   // This allows us to write both PHINodes and the extractelement
4261   // instructions.
4262   Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt());
4263 
4264   setDebugLocFromInst(Builder, LoopExitInst);
4265 
4266   // If tail is folded by masking, the vector value to leave the loop should be
4267   // a Select choosing between the vectorized LoopExitInst and vectorized Phi,
4268   // instead of the former. For an inloop reduction the reduction will already
4269   // be predicated, and does not need to be handled here.
4270   if (Cost->foldTailByMasking() && !IsInLoopReductionPhi) {
4271     for (unsigned Part = 0; Part < UF; ++Part) {
4272       Value *VecLoopExitInst = State.get(LoopExitInstDef, Part);
4273       Value *Sel = nullptr;
4274       for (User *U : VecLoopExitInst->users()) {
4275         if (isa<SelectInst>(U)) {
4276           assert(!Sel && "Reduction exit feeding two selects");
4277           Sel = U;
4278         } else
4279           assert(isa<PHINode>(U) && "Reduction exit must feed Phi's or select");
4280       }
4281       assert(Sel && "Reduction exit feeds no select");
4282       State.reset(LoopExitInstDef, LoopExitInst, Sel, Part);
4283 
4284       // If the target can create a predicated operator for the reduction at no
4285       // extra cost in the loop (for example a predicated vadd), it can be
4286       // cheaper for the select to remain in the loop than be sunk out of it,
4287       // and so use the select value for the phi instead of the old
4288       // LoopExitValue.
4289       RecurrenceDescriptor RdxDesc = Legal->getReductionVars()[Phi];
4290       if (PreferPredicatedReductionSelect ||
4291           TTI->preferPredicatedReductionSelect(
4292               RdxDesc.getOpcode(), Phi->getType(),
4293               TargetTransformInfo::ReductionFlags())) {
4294         auto *VecRdxPhi =
4295             cast<PHINode>(State.get(State.Plan->getVPValue(Phi), Part));
4296         VecRdxPhi->setIncomingValueForBlock(
4297             LI->getLoopFor(LoopVectorBody)->getLoopLatch(), Sel);
4298       }
4299     }
4300   }
4301 
4302   // If the vector reduction can be performed in a smaller type, we truncate
4303   // then extend the loop exit value to enable InstCombine to evaluate the
4304   // entire expression in the smaller type.
4305   if (VF.isVector() && Phi->getType() != RdxDesc.getRecurrenceType()) {
4306     assert(!IsInLoopReductionPhi && "Unexpected truncated inloop reduction!");
4307     assert(!VF.isScalable() && "scalable vectors not yet supported.");
4308     Type *RdxVecTy = VectorType::get(RdxDesc.getRecurrenceType(), VF);
4309     Builder.SetInsertPoint(
4310         LI->getLoopFor(LoopVectorBody)->getLoopLatch()->getTerminator());
4311     VectorParts RdxParts(UF);
4312     for (unsigned Part = 0; Part < UF; ++Part) {
4313       RdxParts[Part] = State.get(LoopExitInstDef, Part);
4314       Value *Trunc = Builder.CreateTrunc(RdxParts[Part], RdxVecTy);
4315       Value *Extnd = RdxDesc.isSigned() ? Builder.CreateSExt(Trunc, VecTy)
4316                                         : Builder.CreateZExt(Trunc, VecTy);
4317       for (Value::user_iterator UI = RdxParts[Part]->user_begin();
4318            UI != RdxParts[Part]->user_end();)
4319         if (*UI != Trunc) {
4320           (*UI++)->replaceUsesOfWith(RdxParts[Part], Extnd);
4321           RdxParts[Part] = Extnd;
4322         } else {
4323           ++UI;
4324         }
4325     }
4326     Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt());
4327     for (unsigned Part = 0; Part < UF; ++Part) {
4328       RdxParts[Part] = Builder.CreateTrunc(RdxParts[Part], RdxVecTy);
4329       State.reset(LoopExitInstDef, LoopExitInst, RdxParts[Part], Part);
4330     }
4331   }
4332 
4333   // Reduce all of the unrolled parts into a single vector.
4334   Value *ReducedPartRdx = State.get(LoopExitInstDef, 0);
4335   unsigned Op = RecurrenceDescriptor::getOpcode(RK);
4336 
4337   // The middle block terminator has already been assigned a DebugLoc here (the
4338   // OrigLoop's single latch terminator). We want the whole middle block to
4339   // appear to execute on this line because: (a) it is all compiler generated,
4340   // (b) these instructions are always executed after evaluating the latch
4341   // conditional branch, and (c) other passes may add new predecessors which
4342   // terminate on this line. This is the easiest way to ensure we don't
4343   // accidentally cause an extra step back into the loop while debugging.
4344   setDebugLocFromInst(Builder, LoopMiddleBlock->getTerminator());
4345   {
4346     // Floating-point operations should have some FMF to enable the reduction.
4347     IRBuilderBase::FastMathFlagGuard FMFG(Builder);
4348     Builder.setFastMathFlags(RdxDesc.getFastMathFlags());
4349     for (unsigned Part = 1; Part < UF; ++Part) {
4350       Value *RdxPart = State.get(LoopExitInstDef, Part);
4351       if (Op != Instruction::ICmp && Op != Instruction::FCmp) {
4352         ReducedPartRdx = Builder.CreateBinOp(
4353             (Instruction::BinaryOps)Op, RdxPart, ReducedPartRdx, "bin.rdx");
4354       } else {
4355         ReducedPartRdx = createMinMaxOp(Builder, RK, ReducedPartRdx, RdxPart);
4356       }
4357     }
4358   }
4359 
4360   // Create the reduction after the loop. Note that inloop reductions create the
4361   // target reduction in the loop using a Reduction recipe.
4362   if (VF.isVector() && !IsInLoopReductionPhi) {
4363     ReducedPartRdx =
4364         createTargetReduction(Builder, TTI, RdxDesc, ReducedPartRdx);
4365     // If the reduction can be performed in a smaller type, we need to extend
4366     // the reduction to the wider type before we branch to the original loop.
4367     if (Phi->getType() != RdxDesc.getRecurrenceType())
4368       ReducedPartRdx =
4369         RdxDesc.isSigned()
4370         ? Builder.CreateSExt(ReducedPartRdx, Phi->getType())
4371         : Builder.CreateZExt(ReducedPartRdx, Phi->getType());
4372   }
4373 
4374   // Create a phi node that merges control-flow from the backedge-taken check
4375   // block and the middle block.
4376   PHINode *BCBlockPhi = PHINode::Create(Phi->getType(), 2, "bc.merge.rdx",
4377                                         LoopScalarPreHeader->getTerminator());
4378   for (unsigned I = 0, E = LoopBypassBlocks.size(); I != E; ++I)
4379     BCBlockPhi->addIncoming(ReductionStartValue, LoopBypassBlocks[I]);
4380   BCBlockPhi->addIncoming(ReducedPartRdx, LoopMiddleBlock);
4381 
4382   // Now, we need to fix the users of the reduction variable
4383   // inside and outside of the scalar remainder loop.
4384 
4385   // We know that the loop is in LCSSA form. We need to update the PHI nodes
4386   // in the exit blocks.  See comment on analogous loop in
4387   // fixFirstOrderRecurrence for a more complete explaination of the logic.
4388   for (PHINode &LCSSAPhi : LoopExitBlock->phis())
4389     if (any_of(LCSSAPhi.incoming_values(),
4390                [LoopExitInst](Value *V) { return V == LoopExitInst; }))
4391       LCSSAPhi.addIncoming(ReducedPartRdx, LoopMiddleBlock);
4392 
4393   // Fix the scalar loop reduction variable with the incoming reduction sum
4394   // from the vector body and from the backedge value.
4395   int IncomingEdgeBlockIdx =
4396     Phi->getBasicBlockIndex(OrigLoop->getLoopLatch());
4397   assert(IncomingEdgeBlockIdx >= 0 && "Invalid block index");
4398   // Pick the other block.
4399   int SelfEdgeBlockIdx = (IncomingEdgeBlockIdx ? 0 : 1);
4400   Phi->setIncomingValue(SelfEdgeBlockIdx, BCBlockPhi);
4401   Phi->setIncomingValue(IncomingEdgeBlockIdx, LoopExitInst);
4402 }
4403 
4404 void InnerLoopVectorizer::clearReductionWrapFlags(
4405     RecurrenceDescriptor &RdxDesc) {
4406   RecurKind RK = RdxDesc.getRecurrenceKind();
4407   if (RK != RecurKind::Add && RK != RecurKind::Mul)
4408     return;
4409 
4410   Instruction *LoopExitInstr = RdxDesc.getLoopExitInstr();
4411   assert(LoopExitInstr && "null loop exit instruction");
4412   SmallVector<Instruction *, 8> Worklist;
4413   SmallPtrSet<Instruction *, 8> Visited;
4414   Worklist.push_back(LoopExitInstr);
4415   Visited.insert(LoopExitInstr);
4416 
4417   while (!Worklist.empty()) {
4418     Instruction *Cur = Worklist.pop_back_val();
4419     if (isa<OverflowingBinaryOperator>(Cur))
4420       for (unsigned Part = 0; Part < UF; ++Part) {
4421         Value *V = getOrCreateVectorValue(Cur, Part);
4422         cast<Instruction>(V)->dropPoisonGeneratingFlags();
4423       }
4424 
4425     for (User *U : Cur->users()) {
4426       Instruction *UI = cast<Instruction>(U);
4427       if ((Cur != LoopExitInstr || OrigLoop->contains(UI->getParent())) &&
4428           Visited.insert(UI).second)
4429         Worklist.push_back(UI);
4430     }
4431   }
4432 }
4433 
4434 void InnerLoopVectorizer::fixLCSSAPHIs(VPTransformState &State) {
4435   for (PHINode &LCSSAPhi : LoopExitBlock->phis()) {
4436     if (LCSSAPhi.getBasicBlockIndex(LoopMiddleBlock) != -1)
4437       // Some phis were already hand updated by the reduction and recurrence
4438       // code above, leave them alone.
4439       continue;
4440 
4441     auto *IncomingValue = LCSSAPhi.getIncomingValue(0);
4442     // Non-instruction incoming values will have only one value.
4443     unsigned LastLane = 0;
4444     if (isa<Instruction>(IncomingValue))
4445       LastLane = Cost->isUniformAfterVectorization(
4446                      cast<Instruction>(IncomingValue), VF)
4447                      ? 0
4448                      : VF.getKnownMinValue() - 1;
4449     assert((!VF.isScalable() || LastLane == 0) &&
4450            "scalable vectors dont support non-uniform scalars yet");
4451     // Can be a loop invariant incoming value or the last scalar value to be
4452     // extracted from the vectorized loop.
4453     Builder.SetInsertPoint(LoopMiddleBlock->getTerminator());
4454     Value *lastIncomingValue =
4455         OrigLoop->isLoopInvariant(IncomingValue)
4456             ? IncomingValue
4457             : State.get(State.Plan->getVPValue(IncomingValue),
4458                         VPIteration(UF - 1, LastLane));
4459     LCSSAPhi.addIncoming(lastIncomingValue, LoopMiddleBlock);
4460   }
4461 }
4462 
4463 void InnerLoopVectorizer::sinkScalarOperands(Instruction *PredInst) {
4464   // The basic block and loop containing the predicated instruction.
4465   auto *PredBB = PredInst->getParent();
4466   auto *VectorLoop = LI->getLoopFor(PredBB);
4467 
4468   // Initialize a worklist with the operands of the predicated instruction.
4469   SetVector<Value *> Worklist(PredInst->op_begin(), PredInst->op_end());
4470 
4471   // Holds instructions that we need to analyze again. An instruction may be
4472   // reanalyzed if we don't yet know if we can sink it or not.
4473   SmallVector<Instruction *, 8> InstsToReanalyze;
4474 
4475   // Returns true if a given use occurs in the predicated block. Phi nodes use
4476   // their operands in their corresponding predecessor blocks.
4477   auto isBlockOfUsePredicated = [&](Use &U) -> bool {
4478     auto *I = cast<Instruction>(U.getUser());
4479     BasicBlock *BB = I->getParent();
4480     if (auto *Phi = dyn_cast<PHINode>(I))
4481       BB = Phi->getIncomingBlock(
4482           PHINode::getIncomingValueNumForOperand(U.getOperandNo()));
4483     return BB == PredBB;
4484   };
4485 
4486   // Iteratively sink the scalarized operands of the predicated instruction
4487   // into the block we created for it. When an instruction is sunk, it's
4488   // operands are then added to the worklist. The algorithm ends after one pass
4489   // through the worklist doesn't sink a single instruction.
4490   bool Changed;
4491   do {
4492     // Add the instructions that need to be reanalyzed to the worklist, and
4493     // reset the changed indicator.
4494     Worklist.insert(InstsToReanalyze.begin(), InstsToReanalyze.end());
4495     InstsToReanalyze.clear();
4496     Changed = false;
4497 
4498     while (!Worklist.empty()) {
4499       auto *I = dyn_cast<Instruction>(Worklist.pop_back_val());
4500 
4501       // We can't sink an instruction if it is a phi node, is already in the
4502       // predicated block, is not in the loop, or may have side effects.
4503       if (!I || isa<PHINode>(I) || I->getParent() == PredBB ||
4504           !VectorLoop->contains(I) || I->mayHaveSideEffects())
4505         continue;
4506 
4507       // It's legal to sink the instruction if all its uses occur in the
4508       // predicated block. Otherwise, there's nothing to do yet, and we may
4509       // need to reanalyze the instruction.
4510       if (!llvm::all_of(I->uses(), isBlockOfUsePredicated)) {
4511         InstsToReanalyze.push_back(I);
4512         continue;
4513       }
4514 
4515       // Move the instruction to the beginning of the predicated block, and add
4516       // it's operands to the worklist.
4517       I->moveBefore(&*PredBB->getFirstInsertionPt());
4518       Worklist.insert(I->op_begin(), I->op_end());
4519 
4520       // The sinking may have enabled other instructions to be sunk, so we will
4521       // need to iterate.
4522       Changed = true;
4523     }
4524   } while (Changed);
4525 }
4526 
4527 void InnerLoopVectorizer::fixNonInductionPHIs(VPTransformState &State) {
4528   for (PHINode *OrigPhi : OrigPHIsToFix) {
4529     PHINode *NewPhi =
4530         cast<PHINode>(State.get(State.Plan->getVPValue(OrigPhi), 0));
4531     unsigned NumIncomingValues = OrigPhi->getNumIncomingValues();
4532 
4533     SmallVector<BasicBlock *, 2> ScalarBBPredecessors(
4534         predecessors(OrigPhi->getParent()));
4535     SmallVector<BasicBlock *, 2> VectorBBPredecessors(
4536         predecessors(NewPhi->getParent()));
4537     assert(ScalarBBPredecessors.size() == VectorBBPredecessors.size() &&
4538            "Scalar and Vector BB should have the same number of predecessors");
4539 
4540     // The insertion point in Builder may be invalidated by the time we get
4541     // here. Force the Builder insertion point to something valid so that we do
4542     // not run into issues during insertion point restore in
4543     // getOrCreateVectorValue calls below.
4544     Builder.SetInsertPoint(NewPhi);
4545 
4546     // The predecessor order is preserved and we can rely on mapping between
4547     // scalar and vector block predecessors.
4548     for (unsigned i = 0; i < NumIncomingValues; ++i) {
4549       BasicBlock *NewPredBB = VectorBBPredecessors[i];
4550 
4551       // When looking up the new scalar/vector values to fix up, use incoming
4552       // values from original phi.
4553       Value *ScIncV =
4554           OrigPhi->getIncomingValueForBlock(ScalarBBPredecessors[i]);
4555 
4556       // Scalar incoming value may need a broadcast
4557       Value *NewIncV = getOrCreateVectorValue(ScIncV, 0);
4558       NewPhi->addIncoming(NewIncV, NewPredBB);
4559     }
4560   }
4561 }
4562 
4563 void InnerLoopVectorizer::widenGEP(GetElementPtrInst *GEP, VPValue *VPDef,
4564                                    VPUser &Operands, unsigned UF,
4565                                    ElementCount VF, bool IsPtrLoopInvariant,
4566                                    SmallBitVector &IsIndexLoopInvariant,
4567                                    VPTransformState &State) {
4568   // Construct a vector GEP by widening the operands of the scalar GEP as
4569   // necessary. We mark the vector GEP 'inbounds' if appropriate. A GEP
4570   // results in a vector of pointers when at least one operand of the GEP
4571   // is vector-typed. Thus, to keep the representation compact, we only use
4572   // vector-typed operands for loop-varying values.
4573 
4574   if (VF.isVector() && IsPtrLoopInvariant && IsIndexLoopInvariant.all()) {
4575     // If we are vectorizing, but the GEP has only loop-invariant operands,
4576     // the GEP we build (by only using vector-typed operands for
4577     // loop-varying values) would be a scalar pointer. Thus, to ensure we
4578     // produce a vector of pointers, we need to either arbitrarily pick an
4579     // operand to broadcast, or broadcast a clone of the original GEP.
4580     // Here, we broadcast a clone of the original.
4581     //
4582     // TODO: If at some point we decide to scalarize instructions having
4583     //       loop-invariant operands, this special case will no longer be
4584     //       required. We would add the scalarization decision to
4585     //       collectLoopScalars() and teach getVectorValue() to broadcast
4586     //       the lane-zero scalar value.
4587     auto *Clone = Builder.Insert(GEP->clone());
4588     for (unsigned Part = 0; Part < UF; ++Part) {
4589       Value *EntryPart = Builder.CreateVectorSplat(VF, Clone);
4590       State.set(VPDef, GEP, EntryPart, Part);
4591       addMetadata(EntryPart, GEP);
4592     }
4593   } else {
4594     // If the GEP has at least one loop-varying operand, we are sure to
4595     // produce a vector of pointers. But if we are only unrolling, we want
4596     // to produce a scalar GEP for each unroll part. Thus, the GEP we
4597     // produce with the code below will be scalar (if VF == 1) or vector
4598     // (otherwise). Note that for the unroll-only case, we still maintain
4599     // values in the vector mapping with initVector, as we do for other
4600     // instructions.
4601     for (unsigned Part = 0; Part < UF; ++Part) {
4602       // The pointer operand of the new GEP. If it's loop-invariant, we
4603       // won't broadcast it.
4604       auto *Ptr = IsPtrLoopInvariant
4605                       ? State.get(Operands.getOperand(0), VPIteration(0, 0))
4606                       : State.get(Operands.getOperand(0), Part);
4607 
4608       // Collect all the indices for the new GEP. If any index is
4609       // loop-invariant, we won't broadcast it.
4610       SmallVector<Value *, 4> Indices;
4611       for (unsigned I = 1, E = Operands.getNumOperands(); I < E; I++) {
4612         VPValue *Operand = Operands.getOperand(I);
4613         if (IsIndexLoopInvariant[I - 1])
4614           Indices.push_back(State.get(Operand, VPIteration(0, 0)));
4615         else
4616           Indices.push_back(State.get(Operand, Part));
4617       }
4618 
4619       // Create the new GEP. Note that this GEP may be a scalar if VF == 1,
4620       // but it should be a vector, otherwise.
4621       auto *NewGEP =
4622           GEP->isInBounds()
4623               ? Builder.CreateInBoundsGEP(GEP->getSourceElementType(), Ptr,
4624                                           Indices)
4625               : Builder.CreateGEP(GEP->getSourceElementType(), Ptr, Indices);
4626       assert((VF.isScalar() || NewGEP->getType()->isVectorTy()) &&
4627              "NewGEP is not a pointer vector");
4628       State.set(VPDef, GEP, NewGEP, Part);
4629       addMetadata(NewGEP, GEP);
4630     }
4631   }
4632 }
4633 
4634 void InnerLoopVectorizer::widenPHIInstruction(Instruction *PN,
4635                                               RecurrenceDescriptor *RdxDesc,
4636                                               Value *StartV, VPValue *Def,
4637                                               VPTransformState &State) {
4638   PHINode *P = cast<PHINode>(PN);
4639   if (EnableVPlanNativePath) {
4640     // Currently we enter here in the VPlan-native path for non-induction
4641     // PHIs where all control flow is uniform. We simply widen these PHIs.
4642     // Create a vector phi with no operands - the vector phi operands will be
4643     // set at the end of vector code generation.
4644     Type *VecTy = (State.VF.isScalar())
4645                       ? PN->getType()
4646                       : VectorType::get(PN->getType(), State.VF);
4647     Value *VecPhi = Builder.CreatePHI(VecTy, PN->getNumOperands(), "vec.phi");
4648     State.set(Def, P, VecPhi, 0);
4649     OrigPHIsToFix.push_back(P);
4650 
4651     return;
4652   }
4653 
4654   assert(PN->getParent() == OrigLoop->getHeader() &&
4655          "Non-header phis should have been handled elsewhere");
4656 
4657   // In order to support recurrences we need to be able to vectorize Phi nodes.
4658   // Phi nodes have cycles, so we need to vectorize them in two stages. This is
4659   // stage #1: We create a new vector PHI node with no incoming edges. We'll use
4660   // this value when we vectorize all of the instructions that use the PHI.
4661   if (RdxDesc || Legal->isFirstOrderRecurrence(P)) {
4662     Value *Iden = nullptr;
4663     bool ScalarPHI =
4664         (State.VF.isScalar()) || Cost->isInLoopReduction(cast<PHINode>(PN));
4665     Type *VecTy =
4666         ScalarPHI ? PN->getType() : VectorType::get(PN->getType(), State.VF);
4667 
4668     if (RdxDesc) {
4669       assert(Legal->isReductionVariable(P) && StartV &&
4670              "RdxDesc should only be set for reduction variables; in that case "
4671              "a StartV is also required");
4672       RecurKind RK = RdxDesc->getRecurrenceKind();
4673       if (RecurrenceDescriptor::isMinMaxRecurrenceKind(RK)) {
4674         // MinMax reduction have the start value as their identify.
4675         if (ScalarPHI) {
4676           Iden = StartV;
4677         } else {
4678           IRBuilderBase::InsertPointGuard IPBuilder(Builder);
4679           Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
4680           StartV = Iden =
4681               Builder.CreateVectorSplat(State.VF, StartV, "minmax.ident");
4682         }
4683       } else {
4684         Constant *IdenC = RecurrenceDescriptor::getRecurrenceIdentity(
4685             RK, VecTy->getScalarType());
4686         Iden = IdenC;
4687 
4688         if (!ScalarPHI) {
4689           Iden = ConstantVector::getSplat(State.VF, IdenC);
4690           IRBuilderBase::InsertPointGuard IPBuilder(Builder);
4691           Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
4692           Constant *Zero = Builder.getInt32(0);
4693           StartV = Builder.CreateInsertElement(Iden, StartV, Zero);
4694         }
4695       }
4696     }
4697 
4698     for (unsigned Part = 0; Part < State.UF; ++Part) {
4699       // This is phase one of vectorizing PHIs.
4700       Value *EntryPart = PHINode::Create(
4701           VecTy, 2, "vec.phi", &*LoopVectorBody->getFirstInsertionPt());
4702       State.set(Def, P, EntryPart, Part);
4703       if (StartV) {
4704         // Make sure to add the reduction start value only to the
4705         // first unroll part.
4706         Value *StartVal = (Part == 0) ? StartV : Iden;
4707         cast<PHINode>(EntryPart)->addIncoming(StartVal, LoopVectorPreHeader);
4708       }
4709     }
4710     return;
4711   }
4712 
4713   assert(!Legal->isReductionVariable(P) &&
4714          "reductions should be handled above");
4715 
4716   setDebugLocFromInst(Builder, P);
4717 
4718   // This PHINode must be an induction variable.
4719   // Make sure that we know about it.
4720   assert(Legal->getInductionVars().count(P) && "Not an induction variable");
4721 
4722   InductionDescriptor II = Legal->getInductionVars().lookup(P);
4723   const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout();
4724 
4725   // FIXME: The newly created binary instructions should contain nsw/nuw flags,
4726   // which can be found from the original scalar operations.
4727   switch (II.getKind()) {
4728   case InductionDescriptor::IK_NoInduction:
4729     llvm_unreachable("Unknown induction");
4730   case InductionDescriptor::IK_IntInduction:
4731   case InductionDescriptor::IK_FpInduction:
4732     llvm_unreachable("Integer/fp induction is handled elsewhere.");
4733   case InductionDescriptor::IK_PtrInduction: {
4734     // Handle the pointer induction variable case.
4735     assert(P->getType()->isPointerTy() && "Unexpected type.");
4736 
4737     if (Cost->isScalarAfterVectorization(P, State.VF)) {
4738       // This is the normalized GEP that starts counting at zero.
4739       Value *PtrInd =
4740           Builder.CreateSExtOrTrunc(Induction, II.getStep()->getType());
4741       // Determine the number of scalars we need to generate for each unroll
4742       // iteration. If the instruction is uniform, we only need to generate the
4743       // first lane. Otherwise, we generate all VF values.
4744       unsigned Lanes = Cost->isUniformAfterVectorization(P, State.VF)
4745                            ? 1
4746                            : State.VF.getKnownMinValue();
4747       for (unsigned Part = 0; Part < UF; ++Part) {
4748         for (unsigned Lane = 0; Lane < Lanes; ++Lane) {
4749           Constant *Idx = ConstantInt::get(
4750               PtrInd->getType(), Lane + Part * State.VF.getKnownMinValue());
4751           Value *GlobalIdx = Builder.CreateAdd(PtrInd, Idx);
4752           Value *SclrGep =
4753               emitTransformedIndex(Builder, GlobalIdx, PSE.getSE(), DL, II);
4754           SclrGep->setName("next.gep");
4755           State.set(Def, P, SclrGep, VPIteration(Part, Lane));
4756         }
4757       }
4758       return;
4759     }
4760     assert(isa<SCEVConstant>(II.getStep()) &&
4761            "Induction step not a SCEV constant!");
4762     Type *PhiType = II.getStep()->getType();
4763 
4764     // Build a pointer phi
4765     Value *ScalarStartValue = II.getStartValue();
4766     Type *ScStValueType = ScalarStartValue->getType();
4767     PHINode *NewPointerPhi =
4768         PHINode::Create(ScStValueType, 2, "pointer.phi", Induction);
4769     NewPointerPhi->addIncoming(ScalarStartValue, LoopVectorPreHeader);
4770 
4771     // A pointer induction, performed by using a gep
4772     BasicBlock *LoopLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch();
4773     Instruction *InductionLoc = LoopLatch->getTerminator();
4774     const SCEV *ScalarStep = II.getStep();
4775     SCEVExpander Exp(*PSE.getSE(), DL, "induction");
4776     Value *ScalarStepValue =
4777         Exp.expandCodeFor(ScalarStep, PhiType, InductionLoc);
4778     Value *InductionGEP = GetElementPtrInst::Create(
4779         ScStValueType->getPointerElementType(), NewPointerPhi,
4780         Builder.CreateMul(
4781             ScalarStepValue,
4782             ConstantInt::get(PhiType, State.VF.getKnownMinValue() * State.UF)),
4783         "ptr.ind", InductionLoc);
4784     NewPointerPhi->addIncoming(InductionGEP, LoopLatch);
4785 
4786     // Create UF many actual address geps that use the pointer
4787     // phi as base and a vectorized version of the step value
4788     // (<step*0, ..., step*N>) as offset.
4789     for (unsigned Part = 0; Part < State.UF; ++Part) {
4790       SmallVector<Constant *, 8> Indices;
4791       // Create a vector of consecutive numbers from zero to VF.
4792       for (unsigned i = 0; i < State.VF.getKnownMinValue(); ++i)
4793         Indices.push_back(
4794             ConstantInt::get(PhiType, i + Part * State.VF.getKnownMinValue()));
4795       Constant *StartOffset = ConstantVector::get(Indices);
4796 
4797       Value *GEP = Builder.CreateGEP(
4798           ScStValueType->getPointerElementType(), NewPointerPhi,
4799           Builder.CreateMul(StartOffset,
4800                             Builder.CreateVectorSplat(
4801                                 State.VF.getKnownMinValue(), ScalarStepValue),
4802                             "vector.gep"));
4803       State.set(Def, P, GEP, Part);
4804     }
4805   }
4806   }
4807 }
4808 
4809 /// A helper function for checking whether an integer division-related
4810 /// instruction may divide by zero (in which case it must be predicated if
4811 /// executed conditionally in the scalar code).
4812 /// TODO: It may be worthwhile to generalize and check isKnownNonZero().
4813 /// Non-zero divisors that are non compile-time constants will not be
4814 /// converted into multiplication, so we will still end up scalarizing
4815 /// the division, but can do so w/o predication.
4816 static bool mayDivideByZero(Instruction &I) {
4817   assert((I.getOpcode() == Instruction::UDiv ||
4818           I.getOpcode() == Instruction::SDiv ||
4819           I.getOpcode() == Instruction::URem ||
4820           I.getOpcode() == Instruction::SRem) &&
4821          "Unexpected instruction");
4822   Value *Divisor = I.getOperand(1);
4823   auto *CInt = dyn_cast<ConstantInt>(Divisor);
4824   return !CInt || CInt->isZero();
4825 }
4826 
4827 void InnerLoopVectorizer::widenInstruction(Instruction &I, VPValue *Def,
4828                                            VPUser &User,
4829                                            VPTransformState &State) {
4830   switch (I.getOpcode()) {
4831   case Instruction::Call:
4832   case Instruction::Br:
4833   case Instruction::PHI:
4834   case Instruction::GetElementPtr:
4835   case Instruction::Select:
4836     llvm_unreachable("This instruction is handled by a different recipe.");
4837   case Instruction::UDiv:
4838   case Instruction::SDiv:
4839   case Instruction::SRem:
4840   case Instruction::URem:
4841   case Instruction::Add:
4842   case Instruction::FAdd:
4843   case Instruction::Sub:
4844   case Instruction::FSub:
4845   case Instruction::FNeg:
4846   case Instruction::Mul:
4847   case Instruction::FMul:
4848   case Instruction::FDiv:
4849   case Instruction::FRem:
4850   case Instruction::Shl:
4851   case Instruction::LShr:
4852   case Instruction::AShr:
4853   case Instruction::And:
4854   case Instruction::Or:
4855   case Instruction::Xor: {
4856     // Just widen unops and binops.
4857     setDebugLocFromInst(Builder, &I);
4858 
4859     for (unsigned Part = 0; Part < UF; ++Part) {
4860       SmallVector<Value *, 2> Ops;
4861       for (VPValue *VPOp : User.operands())
4862         Ops.push_back(State.get(VPOp, Part));
4863 
4864       Value *V = Builder.CreateNAryOp(I.getOpcode(), Ops);
4865 
4866       if (auto *VecOp = dyn_cast<Instruction>(V))
4867         VecOp->copyIRFlags(&I);
4868 
4869       // Use this vector value for all users of the original instruction.
4870       State.set(Def, &I, V, Part);
4871       addMetadata(V, &I);
4872     }
4873 
4874     break;
4875   }
4876   case Instruction::ICmp:
4877   case Instruction::FCmp: {
4878     // Widen compares. Generate vector compares.
4879     bool FCmp = (I.getOpcode() == Instruction::FCmp);
4880     auto *Cmp = cast<CmpInst>(&I);
4881     setDebugLocFromInst(Builder, Cmp);
4882     for (unsigned Part = 0; Part < UF; ++Part) {
4883       Value *A = State.get(User.getOperand(0), Part);
4884       Value *B = State.get(User.getOperand(1), Part);
4885       Value *C = nullptr;
4886       if (FCmp) {
4887         // Propagate fast math flags.
4888         IRBuilder<>::FastMathFlagGuard FMFG(Builder);
4889         Builder.setFastMathFlags(Cmp->getFastMathFlags());
4890         C = Builder.CreateFCmp(Cmp->getPredicate(), A, B);
4891       } else {
4892         C = Builder.CreateICmp(Cmp->getPredicate(), A, B);
4893       }
4894       State.set(Def, &I, C, Part);
4895       addMetadata(C, &I);
4896     }
4897 
4898     break;
4899   }
4900 
4901   case Instruction::ZExt:
4902   case Instruction::SExt:
4903   case Instruction::FPToUI:
4904   case Instruction::FPToSI:
4905   case Instruction::FPExt:
4906   case Instruction::PtrToInt:
4907   case Instruction::IntToPtr:
4908   case Instruction::SIToFP:
4909   case Instruction::UIToFP:
4910   case Instruction::Trunc:
4911   case Instruction::FPTrunc:
4912   case Instruction::BitCast: {
4913     auto *CI = cast<CastInst>(&I);
4914     setDebugLocFromInst(Builder, CI);
4915 
4916     /// Vectorize casts.
4917     Type *DestTy =
4918         (VF.isScalar()) ? CI->getType() : VectorType::get(CI->getType(), VF);
4919 
4920     for (unsigned Part = 0; Part < UF; ++Part) {
4921       Value *A = State.get(User.getOperand(0), Part);
4922       Value *Cast = Builder.CreateCast(CI->getOpcode(), A, DestTy);
4923       State.set(Def, &I, Cast, Part);
4924       addMetadata(Cast, &I);
4925     }
4926     break;
4927   }
4928   default:
4929     // This instruction is not vectorized by simple widening.
4930     LLVM_DEBUG(dbgs() << "LV: Found an unhandled instruction: " << I);
4931     llvm_unreachable("Unhandled instruction!");
4932   } // end of switch.
4933 }
4934 
4935 void InnerLoopVectorizer::widenCallInstruction(CallInst &I, VPValue *Def,
4936                                                VPUser &ArgOperands,
4937                                                VPTransformState &State) {
4938   assert(!isa<DbgInfoIntrinsic>(I) &&
4939          "DbgInfoIntrinsic should have been dropped during VPlan construction");
4940   setDebugLocFromInst(Builder, &I);
4941 
4942   Module *M = I.getParent()->getParent()->getParent();
4943   auto *CI = cast<CallInst>(&I);
4944 
4945   SmallVector<Type *, 4> Tys;
4946   for (Value *ArgOperand : CI->arg_operands())
4947     Tys.push_back(ToVectorTy(ArgOperand->getType(), VF.getKnownMinValue()));
4948 
4949   Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
4950 
4951   // The flag shows whether we use Intrinsic or a usual Call for vectorized
4952   // version of the instruction.
4953   // Is it beneficial to perform intrinsic call compared to lib call?
4954   bool NeedToScalarize = false;
4955   InstructionCost CallCost = Cost->getVectorCallCost(CI, VF, NeedToScalarize);
4956   InstructionCost IntrinsicCost = ID ? Cost->getVectorIntrinsicCost(CI, VF) : 0;
4957   bool UseVectorIntrinsic = ID && IntrinsicCost <= CallCost;
4958   assert((UseVectorIntrinsic || !NeedToScalarize) &&
4959          "Instruction should be scalarized elsewhere.");
4960   assert(IntrinsicCost.isValid() && CallCost.isValid() &&
4961          "Cannot have invalid costs while widening");
4962 
4963   for (unsigned Part = 0; Part < UF; ++Part) {
4964     SmallVector<Value *, 4> Args;
4965     for (auto &I : enumerate(ArgOperands.operands())) {
4966       // Some intrinsics have a scalar argument - don't replace it with a
4967       // vector.
4968       Value *Arg;
4969       if (!UseVectorIntrinsic || !hasVectorInstrinsicScalarOpd(ID, I.index()))
4970         Arg = State.get(I.value(), Part);
4971       else
4972         Arg = State.get(I.value(), VPIteration(0, 0));
4973       Args.push_back(Arg);
4974     }
4975 
4976     Function *VectorF;
4977     if (UseVectorIntrinsic) {
4978       // Use vector version of the intrinsic.
4979       Type *TysForDecl[] = {CI->getType()};
4980       if (VF.isVector())
4981         TysForDecl[0] = VectorType::get(CI->getType()->getScalarType(), VF);
4982       VectorF = Intrinsic::getDeclaration(M, ID, TysForDecl);
4983       assert(VectorF && "Can't retrieve vector intrinsic.");
4984     } else {
4985       // Use vector version of the function call.
4986       const VFShape Shape = VFShape::get(*CI, VF, false /*HasGlobalPred*/);
4987 #ifndef NDEBUG
4988       assert(VFDatabase(*CI).getVectorizedFunction(Shape) != nullptr &&
4989              "Can't create vector function.");
4990 #endif
4991         VectorF = VFDatabase(*CI).getVectorizedFunction(Shape);
4992     }
4993       SmallVector<OperandBundleDef, 1> OpBundles;
4994       CI->getOperandBundlesAsDefs(OpBundles);
4995       CallInst *V = Builder.CreateCall(VectorF, Args, OpBundles);
4996 
4997       if (isa<FPMathOperator>(V))
4998         V->copyFastMathFlags(CI);
4999 
5000       State.set(Def, &I, V, Part);
5001       addMetadata(V, &I);
5002   }
5003 }
5004 
5005 void InnerLoopVectorizer::widenSelectInstruction(SelectInst &I, VPValue *VPDef,
5006                                                  VPUser &Operands,
5007                                                  bool InvariantCond,
5008                                                  VPTransformState &State) {
5009   setDebugLocFromInst(Builder, &I);
5010 
5011   // The condition can be loop invariant  but still defined inside the
5012   // loop. This means that we can't just use the original 'cond' value.
5013   // We have to take the 'vectorized' value and pick the first lane.
5014   // Instcombine will make this a no-op.
5015   auto *InvarCond = InvariantCond
5016                         ? State.get(Operands.getOperand(0), VPIteration(0, 0))
5017                         : nullptr;
5018 
5019   for (unsigned Part = 0; Part < UF; ++Part) {
5020     Value *Cond =
5021         InvarCond ? InvarCond : State.get(Operands.getOperand(0), Part);
5022     Value *Op0 = State.get(Operands.getOperand(1), Part);
5023     Value *Op1 = State.get(Operands.getOperand(2), Part);
5024     Value *Sel = Builder.CreateSelect(Cond, Op0, Op1);
5025     State.set(VPDef, &I, Sel, Part);
5026     addMetadata(Sel, &I);
5027   }
5028 }
5029 
5030 void LoopVectorizationCostModel::collectLoopScalars(ElementCount VF) {
5031   // We should not collect Scalars more than once per VF. Right now, this
5032   // function is called from collectUniformsAndScalars(), which already does
5033   // this check. Collecting Scalars for VF=1 does not make any sense.
5034   assert(VF.isVector() && Scalars.find(VF) == Scalars.end() &&
5035          "This function should not be visited twice for the same VF");
5036 
5037   SmallSetVector<Instruction *, 8> Worklist;
5038 
5039   // These sets are used to seed the analysis with pointers used by memory
5040   // accesses that will remain scalar.
5041   SmallSetVector<Instruction *, 8> ScalarPtrs;
5042   SmallPtrSet<Instruction *, 8> PossibleNonScalarPtrs;
5043   auto *Latch = TheLoop->getLoopLatch();
5044 
5045   // A helper that returns true if the use of Ptr by MemAccess will be scalar.
5046   // The pointer operands of loads and stores will be scalar as long as the
5047   // memory access is not a gather or scatter operation. The value operand of a
5048   // store will remain scalar if the store is scalarized.
5049   auto isScalarUse = [&](Instruction *MemAccess, Value *Ptr) {
5050     InstWidening WideningDecision = getWideningDecision(MemAccess, VF);
5051     assert(WideningDecision != CM_Unknown &&
5052            "Widening decision should be ready at this moment");
5053     if (auto *Store = dyn_cast<StoreInst>(MemAccess))
5054       if (Ptr == Store->getValueOperand())
5055         return WideningDecision == CM_Scalarize;
5056     assert(Ptr == getLoadStorePointerOperand(MemAccess) &&
5057            "Ptr is neither a value or pointer operand");
5058     return WideningDecision != CM_GatherScatter;
5059   };
5060 
5061   // A helper that returns true if the given value is a bitcast or
5062   // getelementptr instruction contained in the loop.
5063   auto isLoopVaryingBitCastOrGEP = [&](Value *V) {
5064     return ((isa<BitCastInst>(V) && V->getType()->isPointerTy()) ||
5065             isa<GetElementPtrInst>(V)) &&
5066            !TheLoop->isLoopInvariant(V);
5067   };
5068 
5069   auto isScalarPtrInduction = [&](Instruction *MemAccess, Value *Ptr) {
5070     if (!isa<PHINode>(Ptr) ||
5071         !Legal->getInductionVars().count(cast<PHINode>(Ptr)))
5072       return false;
5073     auto &Induction = Legal->getInductionVars()[cast<PHINode>(Ptr)];
5074     if (Induction.getKind() != InductionDescriptor::IK_PtrInduction)
5075       return false;
5076     return isScalarUse(MemAccess, Ptr);
5077   };
5078 
5079   // A helper that evaluates a memory access's use of a pointer. If the
5080   // pointer is actually the pointer induction of a loop, it is being
5081   // inserted into Worklist. If the use will be a scalar use, and the
5082   // pointer is only used by memory accesses, we place the pointer in
5083   // ScalarPtrs. Otherwise, the pointer is placed in PossibleNonScalarPtrs.
5084   auto evaluatePtrUse = [&](Instruction *MemAccess, Value *Ptr) {
5085     if (isScalarPtrInduction(MemAccess, Ptr)) {
5086       Worklist.insert(cast<Instruction>(Ptr));
5087       Instruction *Update = cast<Instruction>(
5088           cast<PHINode>(Ptr)->getIncomingValueForBlock(Latch));
5089       Worklist.insert(Update);
5090       LLVM_DEBUG(dbgs() << "LV: Found new scalar instruction: " << *Ptr
5091                         << "\n");
5092       LLVM_DEBUG(dbgs() << "LV: Found new scalar instruction: " << *Update
5093                         << "\n");
5094       return;
5095     }
5096     // We only care about bitcast and getelementptr instructions contained in
5097     // the loop.
5098     if (!isLoopVaryingBitCastOrGEP(Ptr))
5099       return;
5100 
5101     // If the pointer has already been identified as scalar (e.g., if it was
5102     // also identified as uniform), there's nothing to do.
5103     auto *I = cast<Instruction>(Ptr);
5104     if (Worklist.count(I))
5105       return;
5106 
5107     // If the use of the pointer will be a scalar use, and all users of the
5108     // pointer are memory accesses, place the pointer in ScalarPtrs. Otherwise,
5109     // place the pointer in PossibleNonScalarPtrs.
5110     if (isScalarUse(MemAccess, Ptr) && llvm::all_of(I->users(), [&](User *U) {
5111           return isa<LoadInst>(U) || isa<StoreInst>(U);
5112         }))
5113       ScalarPtrs.insert(I);
5114     else
5115       PossibleNonScalarPtrs.insert(I);
5116   };
5117 
5118   // We seed the scalars analysis with three classes of instructions: (1)
5119   // instructions marked uniform-after-vectorization and (2) bitcast,
5120   // getelementptr and (pointer) phi instructions used by memory accesses
5121   // requiring a scalar use.
5122   //
5123   // (1) Add to the worklist all instructions that have been identified as
5124   // uniform-after-vectorization.
5125   Worklist.insert(Uniforms[VF].begin(), Uniforms[VF].end());
5126 
5127   // (2) Add to the worklist all bitcast and getelementptr instructions used by
5128   // memory accesses requiring a scalar use. The pointer operands of loads and
5129   // stores will be scalar as long as the memory accesses is not a gather or
5130   // scatter operation. The value operand of a store will remain scalar if the
5131   // store is scalarized.
5132   for (auto *BB : TheLoop->blocks())
5133     for (auto &I : *BB) {
5134       if (auto *Load = dyn_cast<LoadInst>(&I)) {
5135         evaluatePtrUse(Load, Load->getPointerOperand());
5136       } else if (auto *Store = dyn_cast<StoreInst>(&I)) {
5137         evaluatePtrUse(Store, Store->getPointerOperand());
5138         evaluatePtrUse(Store, Store->getValueOperand());
5139       }
5140     }
5141   for (auto *I : ScalarPtrs)
5142     if (!PossibleNonScalarPtrs.count(I)) {
5143       LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *I << "\n");
5144       Worklist.insert(I);
5145     }
5146 
5147   // Insert the forced scalars.
5148   // FIXME: Currently widenPHIInstruction() often creates a dead vector
5149   // induction variable when the PHI user is scalarized.
5150   auto ForcedScalar = ForcedScalars.find(VF);
5151   if (ForcedScalar != ForcedScalars.end())
5152     for (auto *I : ForcedScalar->second)
5153       Worklist.insert(I);
5154 
5155   // Expand the worklist by looking through any bitcasts and getelementptr
5156   // instructions we've already identified as scalar. This is similar to the
5157   // expansion step in collectLoopUniforms(); however, here we're only
5158   // expanding to include additional bitcasts and getelementptr instructions.
5159   unsigned Idx = 0;
5160   while (Idx != Worklist.size()) {
5161     Instruction *Dst = Worklist[Idx++];
5162     if (!isLoopVaryingBitCastOrGEP(Dst->getOperand(0)))
5163       continue;
5164     auto *Src = cast<Instruction>(Dst->getOperand(0));
5165     if (llvm::all_of(Src->users(), [&](User *U) -> bool {
5166           auto *J = cast<Instruction>(U);
5167           return !TheLoop->contains(J) || Worklist.count(J) ||
5168                  ((isa<LoadInst>(J) || isa<StoreInst>(J)) &&
5169                   isScalarUse(J, Src));
5170         })) {
5171       Worklist.insert(Src);
5172       LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Src << "\n");
5173     }
5174   }
5175 
5176   // An induction variable will remain scalar if all users of the induction
5177   // variable and induction variable update remain scalar.
5178   for (auto &Induction : Legal->getInductionVars()) {
5179     auto *Ind = Induction.first;
5180     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
5181 
5182     // If tail-folding is applied, the primary induction variable will be used
5183     // to feed a vector compare.
5184     if (Ind == Legal->getPrimaryInduction() && foldTailByMasking())
5185       continue;
5186 
5187     // Determine if all users of the induction variable are scalar after
5188     // vectorization.
5189     auto ScalarInd = llvm::all_of(Ind->users(), [&](User *U) -> bool {
5190       auto *I = cast<Instruction>(U);
5191       return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I);
5192     });
5193     if (!ScalarInd)
5194       continue;
5195 
5196     // Determine if all users of the induction variable update instruction are
5197     // scalar after vectorization.
5198     auto ScalarIndUpdate =
5199         llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
5200           auto *I = cast<Instruction>(U);
5201           return I == Ind || !TheLoop->contains(I) || Worklist.count(I);
5202         });
5203     if (!ScalarIndUpdate)
5204       continue;
5205 
5206     // The induction variable and its update instruction will remain scalar.
5207     Worklist.insert(Ind);
5208     Worklist.insert(IndUpdate);
5209     LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n");
5210     LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate
5211                       << "\n");
5212   }
5213 
5214   Scalars[VF].insert(Worklist.begin(), Worklist.end());
5215 }
5216 
5217 bool LoopVectorizationCostModel::isScalarWithPredication(Instruction *I,
5218                                                          ElementCount VF) {
5219   if (!blockNeedsPredication(I->getParent()))
5220     return false;
5221   switch(I->getOpcode()) {
5222   default:
5223     break;
5224   case Instruction::Load:
5225   case Instruction::Store: {
5226     if (!Legal->isMaskRequired(I))
5227       return false;
5228     auto *Ptr = getLoadStorePointerOperand(I);
5229     auto *Ty = getMemInstValueType(I);
5230     // We have already decided how to vectorize this instruction, get that
5231     // result.
5232     if (VF.isVector()) {
5233       InstWidening WideningDecision = getWideningDecision(I, VF);
5234       assert(WideningDecision != CM_Unknown &&
5235              "Widening decision should be ready at this moment");
5236       return WideningDecision == CM_Scalarize;
5237     }
5238     const Align Alignment = getLoadStoreAlignment(I);
5239     return isa<LoadInst>(I) ? !(isLegalMaskedLoad(Ty, Ptr, Alignment) ||
5240                                 isLegalMaskedGather(Ty, Alignment))
5241                             : !(isLegalMaskedStore(Ty, Ptr, Alignment) ||
5242                                 isLegalMaskedScatter(Ty, Alignment));
5243   }
5244   case Instruction::UDiv:
5245   case Instruction::SDiv:
5246   case Instruction::SRem:
5247   case Instruction::URem:
5248     return mayDivideByZero(*I);
5249   }
5250   return false;
5251 }
5252 
5253 bool LoopVectorizationCostModel::interleavedAccessCanBeWidened(
5254     Instruction *I, ElementCount VF) {
5255   assert(isAccessInterleaved(I) && "Expecting interleaved access.");
5256   assert(getWideningDecision(I, VF) == CM_Unknown &&
5257          "Decision should not be set yet.");
5258   auto *Group = getInterleavedAccessGroup(I);
5259   assert(Group && "Must have a group.");
5260 
5261   // If the instruction's allocated size doesn't equal it's type size, it
5262   // requires padding and will be scalarized.
5263   auto &DL = I->getModule()->getDataLayout();
5264   auto *ScalarTy = getMemInstValueType(I);
5265   if (hasIrregularType(ScalarTy, DL, VF))
5266     return false;
5267 
5268   // Check if masking is required.
5269   // A Group may need masking for one of two reasons: it resides in a block that
5270   // needs predication, or it was decided to use masking to deal with gaps.
5271   bool PredicatedAccessRequiresMasking =
5272       Legal->blockNeedsPredication(I->getParent()) && Legal->isMaskRequired(I);
5273   bool AccessWithGapsRequiresMasking =
5274       Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed();
5275   if (!PredicatedAccessRequiresMasking && !AccessWithGapsRequiresMasking)
5276     return true;
5277 
5278   // If masked interleaving is required, we expect that the user/target had
5279   // enabled it, because otherwise it either wouldn't have been created or
5280   // it should have been invalidated by the CostModel.
5281   assert(useMaskedInterleavedAccesses(TTI) &&
5282          "Masked interleave-groups for predicated accesses are not enabled.");
5283 
5284   auto *Ty = getMemInstValueType(I);
5285   const Align Alignment = getLoadStoreAlignment(I);
5286   return isa<LoadInst>(I) ? TTI.isLegalMaskedLoad(Ty, Alignment)
5287                           : TTI.isLegalMaskedStore(Ty, Alignment);
5288 }
5289 
5290 bool LoopVectorizationCostModel::memoryInstructionCanBeWidened(
5291     Instruction *I, ElementCount VF) {
5292   // Get and ensure we have a valid memory instruction.
5293   LoadInst *LI = dyn_cast<LoadInst>(I);
5294   StoreInst *SI = dyn_cast<StoreInst>(I);
5295   assert((LI || SI) && "Invalid memory instruction");
5296 
5297   auto *Ptr = getLoadStorePointerOperand(I);
5298 
5299   // In order to be widened, the pointer should be consecutive, first of all.
5300   if (!Legal->isConsecutivePtr(Ptr))
5301     return false;
5302 
5303   // If the instruction is a store located in a predicated block, it will be
5304   // scalarized.
5305   if (isScalarWithPredication(I))
5306     return false;
5307 
5308   // If the instruction's allocated size doesn't equal it's type size, it
5309   // requires padding and will be scalarized.
5310   auto &DL = I->getModule()->getDataLayout();
5311   auto *ScalarTy = LI ? LI->getType() : SI->getValueOperand()->getType();
5312   if (hasIrregularType(ScalarTy, DL, VF))
5313     return false;
5314 
5315   return true;
5316 }
5317 
5318 void LoopVectorizationCostModel::collectLoopUniforms(ElementCount VF) {
5319   // We should not collect Uniforms more than once per VF. Right now,
5320   // this function is called from collectUniformsAndScalars(), which
5321   // already does this check. Collecting Uniforms for VF=1 does not make any
5322   // sense.
5323 
5324   assert(VF.isVector() && Uniforms.find(VF) == Uniforms.end() &&
5325          "This function should not be visited twice for the same VF");
5326 
5327   // Visit the list of Uniforms. If we'll not find any uniform value, we'll
5328   // not analyze again.  Uniforms.count(VF) will return 1.
5329   Uniforms[VF].clear();
5330 
5331   // We now know that the loop is vectorizable!
5332   // Collect instructions inside the loop that will remain uniform after
5333   // vectorization.
5334 
5335   // Global values, params and instructions outside of current loop are out of
5336   // scope.
5337   auto isOutOfScope = [&](Value *V) -> bool {
5338     Instruction *I = dyn_cast<Instruction>(V);
5339     return (!I || !TheLoop->contains(I));
5340   };
5341 
5342   SetVector<Instruction *> Worklist;
5343   BasicBlock *Latch = TheLoop->getLoopLatch();
5344 
5345   // Instructions that are scalar with predication must not be considered
5346   // uniform after vectorization, because that would create an erroneous
5347   // replicating region where only a single instance out of VF should be formed.
5348   // TODO: optimize such seldom cases if found important, see PR40816.
5349   auto addToWorklistIfAllowed = [&](Instruction *I) -> void {
5350     if (isOutOfScope(I)) {
5351       LLVM_DEBUG(dbgs() << "LV: Found not uniform due to scope: "
5352                         << *I << "\n");
5353       return;
5354     }
5355     if (isScalarWithPredication(I, VF)) {
5356       LLVM_DEBUG(dbgs() << "LV: Found not uniform being ScalarWithPredication: "
5357                         << *I << "\n");
5358       return;
5359     }
5360     LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *I << "\n");
5361     Worklist.insert(I);
5362   };
5363 
5364   // Start with the conditional branch. If the branch condition is an
5365   // instruction contained in the loop that is only used by the branch, it is
5366   // uniform.
5367   auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0));
5368   if (Cmp && TheLoop->contains(Cmp) && Cmp->hasOneUse())
5369     addToWorklistIfAllowed(Cmp);
5370 
5371   auto isUniformDecision = [&](Instruction *I, ElementCount VF) {
5372     InstWidening WideningDecision = getWideningDecision(I, VF);
5373     assert(WideningDecision != CM_Unknown &&
5374            "Widening decision should be ready at this moment");
5375 
5376     // A uniform memory op is itself uniform.  We exclude uniform stores
5377     // here as they demand the last lane, not the first one.
5378     if (isa<LoadInst>(I) && Legal->isUniformMemOp(*I)) {
5379       assert(WideningDecision == CM_Scalarize);
5380       return true;
5381     }
5382 
5383     return (WideningDecision == CM_Widen ||
5384             WideningDecision == CM_Widen_Reverse ||
5385             WideningDecision == CM_Interleave);
5386   };
5387 
5388 
5389   // Returns true if Ptr is the pointer operand of a memory access instruction
5390   // I, and I is known to not require scalarization.
5391   auto isVectorizedMemAccessUse = [&](Instruction *I, Value *Ptr) -> bool {
5392     return getLoadStorePointerOperand(I) == Ptr && isUniformDecision(I, VF);
5393   };
5394 
5395   // Holds a list of values which are known to have at least one uniform use.
5396   // Note that there may be other uses which aren't uniform.  A "uniform use"
5397   // here is something which only demands lane 0 of the unrolled iterations;
5398   // it does not imply that all lanes produce the same value (e.g. this is not
5399   // the usual meaning of uniform)
5400   SmallPtrSet<Value *, 8> HasUniformUse;
5401 
5402   // Scan the loop for instructions which are either a) known to have only
5403   // lane 0 demanded or b) are uses which demand only lane 0 of their operand.
5404   for (auto *BB : TheLoop->blocks())
5405     for (auto &I : *BB) {
5406       // If there's no pointer operand, there's nothing to do.
5407       auto *Ptr = getLoadStorePointerOperand(&I);
5408       if (!Ptr)
5409         continue;
5410 
5411       // A uniform memory op is itself uniform.  We exclude uniform stores
5412       // here as they demand the last lane, not the first one.
5413       if (isa<LoadInst>(I) && Legal->isUniformMemOp(I))
5414         addToWorklistIfAllowed(&I);
5415 
5416       if (isUniformDecision(&I, VF)) {
5417         assert(isVectorizedMemAccessUse(&I, Ptr) && "consistency check");
5418         HasUniformUse.insert(Ptr);
5419       }
5420     }
5421 
5422   // Add to the worklist any operands which have *only* uniform (e.g. lane 0
5423   // demanding) users.  Since loops are assumed to be in LCSSA form, this
5424   // disallows uses outside the loop as well.
5425   for (auto *V : HasUniformUse) {
5426     if (isOutOfScope(V))
5427       continue;
5428     auto *I = cast<Instruction>(V);
5429     auto UsersAreMemAccesses =
5430       llvm::all_of(I->users(), [&](User *U) -> bool {
5431         return isVectorizedMemAccessUse(cast<Instruction>(U), V);
5432       });
5433     if (UsersAreMemAccesses)
5434       addToWorklistIfAllowed(I);
5435   }
5436 
5437   // Expand Worklist in topological order: whenever a new instruction
5438   // is added , its users should be already inside Worklist.  It ensures
5439   // a uniform instruction will only be used by uniform instructions.
5440   unsigned idx = 0;
5441   while (idx != Worklist.size()) {
5442     Instruction *I = Worklist[idx++];
5443 
5444     for (auto OV : I->operand_values()) {
5445       // isOutOfScope operands cannot be uniform instructions.
5446       if (isOutOfScope(OV))
5447         continue;
5448       // First order recurrence Phi's should typically be considered
5449       // non-uniform.
5450       auto *OP = dyn_cast<PHINode>(OV);
5451       if (OP && Legal->isFirstOrderRecurrence(OP))
5452         continue;
5453       // If all the users of the operand are uniform, then add the
5454       // operand into the uniform worklist.
5455       auto *OI = cast<Instruction>(OV);
5456       if (llvm::all_of(OI->users(), [&](User *U) -> bool {
5457             auto *J = cast<Instruction>(U);
5458             return Worklist.count(J) || isVectorizedMemAccessUse(J, OI);
5459           }))
5460         addToWorklistIfAllowed(OI);
5461     }
5462   }
5463 
5464   // For an instruction to be added into Worklist above, all its users inside
5465   // the loop should also be in Worklist. However, this condition cannot be
5466   // true for phi nodes that form a cyclic dependence. We must process phi
5467   // nodes separately. An induction variable will remain uniform if all users
5468   // of the induction variable and induction variable update remain uniform.
5469   // The code below handles both pointer and non-pointer induction variables.
5470   for (auto &Induction : Legal->getInductionVars()) {
5471     auto *Ind = Induction.first;
5472     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
5473 
5474     // Determine if all users of the induction variable are uniform after
5475     // vectorization.
5476     auto UniformInd = llvm::all_of(Ind->users(), [&](User *U) -> bool {
5477       auto *I = cast<Instruction>(U);
5478       return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) ||
5479              isVectorizedMemAccessUse(I, Ind);
5480     });
5481     if (!UniformInd)
5482       continue;
5483 
5484     // Determine if all users of the induction variable update instruction are
5485     // uniform after vectorization.
5486     auto UniformIndUpdate =
5487         llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
5488           auto *I = cast<Instruction>(U);
5489           return I == Ind || !TheLoop->contains(I) || Worklist.count(I) ||
5490                  isVectorizedMemAccessUse(I, IndUpdate);
5491         });
5492     if (!UniformIndUpdate)
5493       continue;
5494 
5495     // The induction variable and its update instruction will remain uniform.
5496     addToWorklistIfAllowed(Ind);
5497     addToWorklistIfAllowed(IndUpdate);
5498   }
5499 
5500   Uniforms[VF].insert(Worklist.begin(), Worklist.end());
5501 }
5502 
5503 bool LoopVectorizationCostModel::runtimeChecksRequired() {
5504   LLVM_DEBUG(dbgs() << "LV: Performing code size checks.\n");
5505 
5506   if (Legal->getRuntimePointerChecking()->Need) {
5507     reportVectorizationFailure("Runtime ptr check is required with -Os/-Oz",
5508         "runtime pointer checks needed. Enable vectorization of this "
5509         "loop with '#pragma clang loop vectorize(enable)' when "
5510         "compiling with -Os/-Oz",
5511         "CantVersionLoopWithOptForSize", ORE, TheLoop);
5512     return true;
5513   }
5514 
5515   if (!PSE.getUnionPredicate().getPredicates().empty()) {
5516     reportVectorizationFailure("Runtime SCEV check is required with -Os/-Oz",
5517         "runtime SCEV checks needed. Enable vectorization of this "
5518         "loop with '#pragma clang loop vectorize(enable)' when "
5519         "compiling with -Os/-Oz",
5520         "CantVersionLoopWithOptForSize", ORE, TheLoop);
5521     return true;
5522   }
5523 
5524   // FIXME: Avoid specializing for stride==1 instead of bailing out.
5525   if (!Legal->getLAI()->getSymbolicStrides().empty()) {
5526     reportVectorizationFailure("Runtime stride check for small trip count",
5527         "runtime stride == 1 checks needed. Enable vectorization of "
5528         "this loop without such check by compiling with -Os/-Oz",
5529         "CantVersionLoopWithOptForSize", ORE, TheLoop);
5530     return true;
5531   }
5532 
5533   return false;
5534 }
5535 
5536 Optional<ElementCount>
5537 LoopVectorizationCostModel::computeMaxVF(ElementCount UserVF, unsigned UserIC) {
5538   if (Legal->getRuntimePointerChecking()->Need && TTI.hasBranchDivergence()) {
5539     // TODO: It may by useful to do since it's still likely to be dynamically
5540     // uniform if the target can skip.
5541     reportVectorizationFailure(
5542         "Not inserting runtime ptr check for divergent target",
5543         "runtime pointer checks needed. Not enabled for divergent target",
5544         "CantVersionLoopWithDivergentTarget", ORE, TheLoop);
5545     return None;
5546   }
5547 
5548   unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop);
5549   LLVM_DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n');
5550   if (TC == 1) {
5551     reportVectorizationFailure("Single iteration (non) loop",
5552         "loop trip count is one, irrelevant for vectorization",
5553         "SingleIterationLoop", ORE, TheLoop);
5554     return None;
5555   }
5556 
5557   switch (ScalarEpilogueStatus) {
5558   case CM_ScalarEpilogueAllowed:
5559     return computeFeasibleMaxVF(TC, UserVF);
5560   case CM_ScalarEpilogueNotAllowedUsePredicate:
5561     LLVM_FALLTHROUGH;
5562   case CM_ScalarEpilogueNotNeededUsePredicate:
5563     LLVM_DEBUG(
5564         dbgs() << "LV: vector predicate hint/switch found.\n"
5565                << "LV: Not allowing scalar epilogue, creating predicated "
5566                << "vector loop.\n");
5567     break;
5568   case CM_ScalarEpilogueNotAllowedLowTripLoop:
5569     // fallthrough as a special case of OptForSize
5570   case CM_ScalarEpilogueNotAllowedOptSize:
5571     if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedOptSize)
5572       LLVM_DEBUG(
5573           dbgs() << "LV: Not allowing scalar epilogue due to -Os/-Oz.\n");
5574     else
5575       LLVM_DEBUG(dbgs() << "LV: Not allowing scalar epilogue due to low trip "
5576                         << "count.\n");
5577 
5578     // Bail if runtime checks are required, which are not good when optimising
5579     // for size.
5580     if (runtimeChecksRequired())
5581       return None;
5582 
5583     break;
5584   }
5585 
5586   // The only loops we can vectorize without a scalar epilogue, are loops with
5587   // a bottom-test and a single exiting block. We'd have to handle the fact
5588   // that not every instruction executes on the last iteration.  This will
5589   // require a lane mask which varies through the vector loop body.  (TODO)
5590   if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) {
5591     // If there was a tail-folding hint/switch, but we can't fold the tail by
5592     // masking, fallback to a vectorization with a scalar epilogue.
5593     if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) {
5594       LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a "
5595                            "scalar epilogue instead.\n");
5596       ScalarEpilogueStatus = CM_ScalarEpilogueAllowed;
5597       return computeFeasibleMaxVF(TC, UserVF);
5598     }
5599     return None;
5600   }
5601 
5602   // Now try the tail folding
5603 
5604   // Invalidate interleave groups that require an epilogue if we can't mask
5605   // the interleave-group.
5606   if (!useMaskedInterleavedAccesses(TTI)) {
5607     assert(WideningDecisions.empty() && Uniforms.empty() && Scalars.empty() &&
5608            "No decisions should have been taken at this point");
5609     // Note: There is no need to invalidate any cost modeling decisions here, as
5610     // non where taken so far.
5611     InterleaveInfo.invalidateGroupsRequiringScalarEpilogue();
5612   }
5613 
5614   ElementCount MaxVF = computeFeasibleMaxVF(TC, UserVF);
5615   assert(!MaxVF.isScalable() &&
5616          "Scalable vectors do not yet support tail folding");
5617   assert((UserVF.isNonZero() || isPowerOf2_32(MaxVF.getFixedValue())) &&
5618          "MaxVF must be a power of 2");
5619   unsigned MaxVFtimesIC =
5620       UserIC ? MaxVF.getFixedValue() * UserIC : MaxVF.getFixedValue();
5621   // Avoid tail folding if the trip count is known to be a multiple of any VF we
5622   // chose.
5623   ScalarEvolution *SE = PSE.getSE();
5624   const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount();
5625   const SCEV *ExitCount = SE->getAddExpr(
5626       BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType()));
5627   const SCEV *Rem = SE->getURemExpr(
5628       SE->applyLoopGuards(ExitCount, TheLoop),
5629       SE->getConstant(BackedgeTakenCount->getType(), MaxVFtimesIC));
5630   if (Rem->isZero()) {
5631     // Accept MaxVF if we do not have a tail.
5632     LLVM_DEBUG(dbgs() << "LV: No tail will remain for any chosen VF.\n");
5633     return MaxVF;
5634   }
5635 
5636   // If we don't know the precise trip count, or if the trip count that we
5637   // found modulo the vectorization factor is not zero, try to fold the tail
5638   // by masking.
5639   // FIXME: look for a smaller MaxVF that does divide TC rather than masking.
5640   if (Legal->prepareToFoldTailByMasking()) {
5641     FoldTailByMasking = true;
5642     return MaxVF;
5643   }
5644 
5645   // If there was a tail-folding hint/switch, but we can't fold the tail by
5646   // masking, fallback to a vectorization with a scalar epilogue.
5647   if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) {
5648     LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a "
5649                          "scalar epilogue instead.\n");
5650     ScalarEpilogueStatus = CM_ScalarEpilogueAllowed;
5651     return MaxVF;
5652   }
5653 
5654   if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedUsePredicate) {
5655     LLVM_DEBUG(dbgs() << "LV: Can't fold tail by masking: don't vectorize\n");
5656     return None;
5657   }
5658 
5659   if (TC == 0) {
5660     reportVectorizationFailure(
5661         "Unable to calculate the loop count due to complex control flow",
5662         "unable to calculate the loop count due to complex control flow",
5663         "UnknownLoopCountComplexCFG", ORE, TheLoop);
5664     return None;
5665   }
5666 
5667   reportVectorizationFailure(
5668       "Cannot optimize for size and vectorize at the same time.",
5669       "cannot optimize for size and vectorize at the same time. "
5670       "Enable vectorization of this loop with '#pragma clang loop "
5671       "vectorize(enable)' when compiling with -Os/-Oz",
5672       "NoTailLoopWithOptForSize", ORE, TheLoop);
5673   return None;
5674 }
5675 
5676 ElementCount
5677 LoopVectorizationCostModel::computeFeasibleMaxVF(unsigned ConstTripCount,
5678                                                  ElementCount UserVF) {
5679   bool IgnoreScalableUserVF = UserVF.isScalable() &&
5680                               !TTI.supportsScalableVectors() &&
5681                               !ForceTargetSupportsScalableVectors;
5682   if (IgnoreScalableUserVF) {
5683     LLVM_DEBUG(
5684         dbgs() << "LV: Ignoring VF=" << UserVF
5685                << " because target does not support scalable vectors.\n");
5686     ORE->emit([&]() {
5687       return OptimizationRemarkAnalysis(DEBUG_TYPE, "IgnoreScalableUserVF",
5688                                         TheLoop->getStartLoc(),
5689                                         TheLoop->getHeader())
5690              << "Ignoring VF=" << ore::NV("UserVF", UserVF)
5691              << " because target does not support scalable vectors.";
5692     });
5693   }
5694 
5695   // Beyond this point two scenarios are handled. If UserVF isn't specified
5696   // then a suitable VF is chosen. If UserVF is specified and there are
5697   // dependencies, check if it's legal. However, if a UserVF is specified and
5698   // there are no dependencies, then there's nothing to do.
5699   if (UserVF.isNonZero() && !IgnoreScalableUserVF) {
5700     if (!canVectorizeReductions(UserVF)) {
5701       reportVectorizationFailure(
5702           "LV: Scalable vectorization not supported for the reduction "
5703           "operations found in this loop. Using fixed-width "
5704           "vectorization instead.",
5705           "Scalable vectorization not supported for the reduction operations "
5706           "found in this loop. Using fixed-width vectorization instead.",
5707           "ScalableVFUnfeasible", ORE, TheLoop);
5708       return computeFeasibleMaxVF(
5709           ConstTripCount, ElementCount::getFixed(UserVF.getKnownMinValue()));
5710     }
5711 
5712     if (Legal->isSafeForAnyVectorWidth())
5713       return UserVF;
5714   }
5715 
5716   MinBWs = computeMinimumValueSizes(TheLoop->getBlocks(), *DB, &TTI);
5717   unsigned SmallestType, WidestType;
5718   std::tie(SmallestType, WidestType) = getSmallestAndWidestTypes();
5719   unsigned WidestRegister = TTI.getRegisterBitWidth(true);
5720 
5721   // Get the maximum safe dependence distance in bits computed by LAA.
5722   // It is computed by MaxVF * sizeOf(type) * 8, where type is taken from
5723   // the memory accesses that is most restrictive (involved in the smallest
5724   // dependence distance).
5725   unsigned MaxSafeVectorWidthInBits = Legal->getMaxSafeVectorWidthInBits();
5726 
5727   // If the user vectorization factor is legally unsafe, clamp it to a safe
5728   // value. Otherwise, return as is.
5729   if (UserVF.isNonZero() && !IgnoreScalableUserVF) {
5730     unsigned MaxSafeElements =
5731         PowerOf2Floor(MaxSafeVectorWidthInBits / WidestType);
5732     ElementCount MaxSafeVF = ElementCount::getFixed(MaxSafeElements);
5733 
5734     if (UserVF.isScalable()) {
5735       Optional<unsigned> MaxVScale = TTI.getMaxVScale();
5736 
5737       // Scale VF by vscale before checking if it's safe.
5738       MaxSafeVF = ElementCount::getScalable(
5739           MaxVScale ? (MaxSafeElements / MaxVScale.getValue()) : 0);
5740 
5741       if (MaxSafeVF.isZero()) {
5742         // The dependence distance is too small to use scalable vectors,
5743         // fallback on fixed.
5744         LLVM_DEBUG(
5745             dbgs()
5746             << "LV: Max legal vector width too small, scalable vectorization "
5747                "unfeasible. Using fixed-width vectorization instead.\n");
5748         ORE->emit([&]() {
5749           return OptimizationRemarkAnalysis(DEBUG_TYPE, "ScalableVFUnfeasible",
5750                                             TheLoop->getStartLoc(),
5751                                             TheLoop->getHeader())
5752                  << "Max legal vector width too small, scalable vectorization "
5753                  << "unfeasible. Using fixed-width vectorization instead.";
5754         });
5755         return computeFeasibleMaxVF(
5756             ConstTripCount, ElementCount::getFixed(UserVF.getKnownMinValue()));
5757       }
5758     }
5759 
5760     LLVM_DEBUG(dbgs() << "LV: The max safe VF is: " << MaxSafeVF << ".\n");
5761 
5762     if (ElementCount::isKnownLE(UserVF, MaxSafeVF))
5763       return UserVF;
5764 
5765     LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF
5766                       << " is unsafe, clamping to max safe VF=" << MaxSafeVF
5767                       << ".\n");
5768     ORE->emit([&]() {
5769       return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor",
5770                                         TheLoop->getStartLoc(),
5771                                         TheLoop->getHeader())
5772              << "User-specified vectorization factor "
5773              << ore::NV("UserVectorizationFactor", UserVF)
5774              << " is unsafe, clamping to maximum safe vectorization factor "
5775              << ore::NV("VectorizationFactor", MaxSafeVF);
5776     });
5777     return MaxSafeVF;
5778   }
5779 
5780   WidestRegister = std::min(WidestRegister, MaxSafeVectorWidthInBits);
5781 
5782   // Ensure MaxVF is a power of 2; the dependence distance bound may not be.
5783   // Note that both WidestRegister and WidestType may not be a powers of 2.
5784   auto MaxVectorSize =
5785       ElementCount::getFixed(PowerOf2Floor(WidestRegister / WidestType));
5786 
5787   LLVM_DEBUG(dbgs() << "LV: The Smallest and Widest types: " << SmallestType
5788                     << " / " << WidestType << " bits.\n");
5789   LLVM_DEBUG(dbgs() << "LV: The Widest register safe to use is: "
5790                     << WidestRegister << " bits.\n");
5791 
5792   assert(MaxVectorSize.getFixedValue() <= WidestRegister &&
5793          "Did not expect to pack so many elements"
5794          " into one vector!");
5795   if (MaxVectorSize.getFixedValue() == 0) {
5796     LLVM_DEBUG(dbgs() << "LV: The target has no vector registers.\n");
5797     return ElementCount::getFixed(1);
5798   } else if (ConstTripCount && ConstTripCount < MaxVectorSize.getFixedValue() &&
5799              isPowerOf2_32(ConstTripCount)) {
5800     // We need to clamp the VF to be the ConstTripCount. There is no point in
5801     // choosing a higher viable VF as done in the loop below.
5802     LLVM_DEBUG(dbgs() << "LV: Clamping the MaxVF to the constant trip count: "
5803                       << ConstTripCount << "\n");
5804     return ElementCount::getFixed(ConstTripCount);
5805   }
5806 
5807   ElementCount MaxVF = MaxVectorSize;
5808   if (TTI.shouldMaximizeVectorBandwidth(!isScalarEpilogueAllowed()) ||
5809       (MaximizeBandwidth && isScalarEpilogueAllowed())) {
5810     // Collect all viable vectorization factors larger than the default MaxVF
5811     // (i.e. MaxVectorSize).
5812     SmallVector<ElementCount, 8> VFs;
5813     auto MaxVectorSizeMaxBW =
5814         ElementCount::getFixed(WidestRegister / SmallestType);
5815     for (ElementCount VS = MaxVectorSize * 2;
5816          ElementCount::isKnownLE(VS, MaxVectorSizeMaxBW); VS *= 2)
5817       VFs.push_back(VS);
5818 
5819     // For each VF calculate its register usage.
5820     auto RUs = calculateRegisterUsage(VFs);
5821 
5822     // Select the largest VF which doesn't require more registers than existing
5823     // ones.
5824     for (int i = RUs.size() - 1; i >= 0; --i) {
5825       bool Selected = true;
5826       for (auto &pair : RUs[i].MaxLocalUsers) {
5827         unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first);
5828         if (pair.second > TargetNumRegisters)
5829           Selected = false;
5830       }
5831       if (Selected) {
5832         MaxVF = VFs[i];
5833         break;
5834       }
5835     }
5836     if (ElementCount MinVF =
5837             TTI.getMinimumVF(SmallestType, /*IsScalable=*/false)) {
5838       if (ElementCount::isKnownLT(MaxVF, MinVF)) {
5839         LLVM_DEBUG(dbgs() << "LV: Overriding calculated MaxVF(" << MaxVF
5840                           << ") with target's minimum: " << MinVF << '\n');
5841         MaxVF = MinVF;
5842       }
5843     }
5844   }
5845   return MaxVF;
5846 }
5847 
5848 VectorizationFactor
5849 LoopVectorizationCostModel::selectVectorizationFactor(ElementCount MaxVF) {
5850   // FIXME: This can be fixed for scalable vectors later, because at this stage
5851   // the LoopVectorizer will only consider vectorizing a loop with scalable
5852   // vectors when the loop has a hint to enable vectorization for a given VF.
5853   assert(!MaxVF.isScalable() && "scalable vectors not yet supported");
5854 
5855   InstructionCost ExpectedCost = expectedCost(ElementCount::getFixed(1)).first;
5856   LLVM_DEBUG(dbgs() << "LV: Scalar loop costs: " << ExpectedCost << ".\n");
5857   assert(ExpectedCost.isValid() && "Unexpected invalid cost for scalar loop");
5858 
5859   auto Width = ElementCount::getFixed(1);
5860   const float ScalarCost = *ExpectedCost.getValue();
5861   float Cost = ScalarCost;
5862 
5863   bool ForceVectorization = Hints->getForce() == LoopVectorizeHints::FK_Enabled;
5864   if (ForceVectorization && MaxVF.isVector()) {
5865     // Ignore scalar width, because the user explicitly wants vectorization.
5866     // Initialize cost to max so that VF = 2 is, at least, chosen during cost
5867     // evaluation.
5868     Cost = std::numeric_limits<float>::max();
5869   }
5870 
5871   for (auto i = ElementCount::getFixed(2); ElementCount::isKnownLE(i, MaxVF);
5872        i *= 2) {
5873     // Notice that the vector loop needs to be executed less times, so
5874     // we need to divide the cost of the vector loops by the width of
5875     // the vector elements.
5876     VectorizationCostTy C = expectedCost(i);
5877     assert(C.first.isValid() && "Unexpected invalid cost for vector loop");
5878     float VectorCost = *C.first.getValue() / (float)i.getFixedValue();
5879     LLVM_DEBUG(dbgs() << "LV: Vector loop of width " << i
5880                       << " costs: " << (int)VectorCost << ".\n");
5881     if (!C.second && !ForceVectorization) {
5882       LLVM_DEBUG(
5883           dbgs() << "LV: Not considering vector loop of width " << i
5884                  << " because it will not generate any vector instructions.\n");
5885       continue;
5886     }
5887 
5888     // If profitable add it to ProfitableVF list.
5889     if (VectorCost < ScalarCost) {
5890       ProfitableVFs.push_back(VectorizationFactor(
5891           {i, (unsigned)VectorCost}));
5892     }
5893 
5894     if (VectorCost < Cost) {
5895       Cost = VectorCost;
5896       Width = i;
5897     }
5898   }
5899 
5900   if (!EnableCondStoresVectorization && NumPredStores) {
5901     reportVectorizationFailure("There are conditional stores.",
5902         "store that is conditionally executed prevents vectorization",
5903         "ConditionalStore", ORE, TheLoop);
5904     Width = ElementCount::getFixed(1);
5905     Cost = ScalarCost;
5906   }
5907 
5908   LLVM_DEBUG(if (ForceVectorization && !Width.isScalar() && Cost >= ScalarCost) dbgs()
5909              << "LV: Vectorization seems to be not beneficial, "
5910              << "but was forced by a user.\n");
5911   LLVM_DEBUG(dbgs() << "LV: Selecting VF: " << Width << ".\n");
5912   VectorizationFactor Factor = {Width,
5913                                 (unsigned)(Width.getKnownMinValue() * Cost)};
5914   return Factor;
5915 }
5916 
5917 bool LoopVectorizationCostModel::isCandidateForEpilogueVectorization(
5918     const Loop &L, ElementCount VF) const {
5919   // Cross iteration phis such as reductions need special handling and are
5920   // currently unsupported.
5921   if (any_of(L.getHeader()->phis(), [&](PHINode &Phi) {
5922         return Legal->isFirstOrderRecurrence(&Phi) ||
5923                Legal->isReductionVariable(&Phi);
5924       }))
5925     return false;
5926 
5927   // Phis with uses outside of the loop require special handling and are
5928   // currently unsupported.
5929   for (auto &Entry : Legal->getInductionVars()) {
5930     // Look for uses of the value of the induction at the last iteration.
5931     Value *PostInc = Entry.first->getIncomingValueForBlock(L.getLoopLatch());
5932     for (User *U : PostInc->users())
5933       if (!L.contains(cast<Instruction>(U)))
5934         return false;
5935     // Look for uses of penultimate value of the induction.
5936     for (User *U : Entry.first->users())
5937       if (!L.contains(cast<Instruction>(U)))
5938         return false;
5939   }
5940 
5941   // Induction variables that are widened require special handling that is
5942   // currently not supported.
5943   if (any_of(Legal->getInductionVars(), [&](auto &Entry) {
5944         return !(this->isScalarAfterVectorization(Entry.first, VF) ||
5945                  this->isProfitableToScalarize(Entry.first, VF));
5946       }))
5947     return false;
5948 
5949   return true;
5950 }
5951 
5952 bool LoopVectorizationCostModel::isEpilogueVectorizationProfitable(
5953     const ElementCount VF) const {
5954   // FIXME: We need a much better cost-model to take different parameters such
5955   // as register pressure, code size increase and cost of extra branches into
5956   // account. For now we apply a very crude heuristic and only consider loops
5957   // with vectorization factors larger than a certain value.
5958   // We also consider epilogue vectorization unprofitable for targets that don't
5959   // consider interleaving beneficial (eg. MVE).
5960   if (TTI.getMaxInterleaveFactor(VF.getKnownMinValue()) <= 1)
5961     return false;
5962   if (VF.getFixedValue() >= EpilogueVectorizationMinVF)
5963     return true;
5964   return false;
5965 }
5966 
5967 VectorizationFactor
5968 LoopVectorizationCostModel::selectEpilogueVectorizationFactor(
5969     const ElementCount MainLoopVF, const LoopVectorizationPlanner &LVP) {
5970   VectorizationFactor Result = VectorizationFactor::Disabled();
5971   if (!EnableEpilogueVectorization) {
5972     LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization is disabled.\n";);
5973     return Result;
5974   }
5975 
5976   if (!isScalarEpilogueAllowed()) {
5977     LLVM_DEBUG(
5978         dbgs() << "LEV: Unable to vectorize epilogue because no epilogue is "
5979                   "allowed.\n";);
5980     return Result;
5981   }
5982 
5983   // FIXME: This can be fixed for scalable vectors later, because at this stage
5984   // the LoopVectorizer will only consider vectorizing a loop with scalable
5985   // vectors when the loop has a hint to enable vectorization for a given VF.
5986   if (MainLoopVF.isScalable()) {
5987     LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization for scalable vectors not "
5988                          "yet supported.\n");
5989     return Result;
5990   }
5991 
5992   // Not really a cost consideration, but check for unsupported cases here to
5993   // simplify the logic.
5994   if (!isCandidateForEpilogueVectorization(*TheLoop, MainLoopVF)) {
5995     LLVM_DEBUG(
5996         dbgs() << "LEV: Unable to vectorize epilogue because the loop is "
5997                   "not a supported candidate.\n";);
5998     return Result;
5999   }
6000 
6001   if (EpilogueVectorizationForceVF > 1) {
6002     LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization factor is forced.\n";);
6003     if (LVP.hasPlanWithVFs(
6004             {MainLoopVF, ElementCount::getFixed(EpilogueVectorizationForceVF)}))
6005       return {ElementCount::getFixed(EpilogueVectorizationForceVF), 0};
6006     else {
6007       LLVM_DEBUG(
6008           dbgs()
6009               << "LEV: Epilogue vectorization forced factor is not viable.\n";);
6010       return Result;
6011     }
6012   }
6013 
6014   if (TheLoop->getHeader()->getParent()->hasOptSize() ||
6015       TheLoop->getHeader()->getParent()->hasMinSize()) {
6016     LLVM_DEBUG(
6017         dbgs()
6018             << "LEV: Epilogue vectorization skipped due to opt for size.\n";);
6019     return Result;
6020   }
6021 
6022   if (!isEpilogueVectorizationProfitable(MainLoopVF))
6023     return Result;
6024 
6025   for (auto &NextVF : ProfitableVFs)
6026     if (ElementCount::isKnownLT(NextVF.Width, MainLoopVF) &&
6027         (Result.Width.getFixedValue() == 1 || NextVF.Cost < Result.Cost) &&
6028         LVP.hasPlanWithVFs({MainLoopVF, NextVF.Width}))
6029       Result = NextVF;
6030 
6031   if (Result != VectorizationFactor::Disabled())
6032     LLVM_DEBUG(dbgs() << "LEV: Vectorizing epilogue loop with VF = "
6033                       << Result.Width.getFixedValue() << "\n";);
6034   return Result;
6035 }
6036 
6037 std::pair<unsigned, unsigned>
6038 LoopVectorizationCostModel::getSmallestAndWidestTypes() {
6039   unsigned MinWidth = -1U;
6040   unsigned MaxWidth = 8;
6041   const DataLayout &DL = TheFunction->getParent()->getDataLayout();
6042 
6043   // For each block.
6044   for (BasicBlock *BB : TheLoop->blocks()) {
6045     // For each instruction in the loop.
6046     for (Instruction &I : BB->instructionsWithoutDebug()) {
6047       Type *T = I.getType();
6048 
6049       // Skip ignored values.
6050       if (ValuesToIgnore.count(&I))
6051         continue;
6052 
6053       // Only examine Loads, Stores and PHINodes.
6054       if (!isa<LoadInst>(I) && !isa<StoreInst>(I) && !isa<PHINode>(I))
6055         continue;
6056 
6057       // Examine PHI nodes that are reduction variables. Update the type to
6058       // account for the recurrence type.
6059       if (auto *PN = dyn_cast<PHINode>(&I)) {
6060         if (!Legal->isReductionVariable(PN))
6061           continue;
6062         RecurrenceDescriptor RdxDesc = Legal->getReductionVars()[PN];
6063         if (PreferInLoopReductions ||
6064             TTI.preferInLoopReduction(RdxDesc.getOpcode(),
6065                                       RdxDesc.getRecurrenceType(),
6066                                       TargetTransformInfo::ReductionFlags()))
6067           continue;
6068         T = RdxDesc.getRecurrenceType();
6069       }
6070 
6071       // Examine the stored values.
6072       if (auto *ST = dyn_cast<StoreInst>(&I))
6073         T = ST->getValueOperand()->getType();
6074 
6075       // Ignore loaded pointer types and stored pointer types that are not
6076       // vectorizable.
6077       //
6078       // FIXME: The check here attempts to predict whether a load or store will
6079       //        be vectorized. We only know this for certain after a VF has
6080       //        been selected. Here, we assume that if an access can be
6081       //        vectorized, it will be. We should also look at extending this
6082       //        optimization to non-pointer types.
6083       //
6084       if (T->isPointerTy() && !isConsecutiveLoadOrStore(&I) &&
6085           !isAccessInterleaved(&I) && !isLegalGatherOrScatter(&I))
6086         continue;
6087 
6088       MinWidth = std::min(MinWidth,
6089                           (unsigned)DL.getTypeSizeInBits(T->getScalarType()));
6090       MaxWidth = std::max(MaxWidth,
6091                           (unsigned)DL.getTypeSizeInBits(T->getScalarType()));
6092     }
6093   }
6094 
6095   return {MinWidth, MaxWidth};
6096 }
6097 
6098 unsigned LoopVectorizationCostModel::selectInterleaveCount(ElementCount VF,
6099                                                            unsigned LoopCost) {
6100   // -- The interleave heuristics --
6101   // We interleave the loop in order to expose ILP and reduce the loop overhead.
6102   // There are many micro-architectural considerations that we can't predict
6103   // at this level. For example, frontend pressure (on decode or fetch) due to
6104   // code size, or the number and capabilities of the execution ports.
6105   //
6106   // We use the following heuristics to select the interleave count:
6107   // 1. If the code has reductions, then we interleave to break the cross
6108   // iteration dependency.
6109   // 2. If the loop is really small, then we interleave to reduce the loop
6110   // overhead.
6111   // 3. We don't interleave if we think that we will spill registers to memory
6112   // due to the increased register pressure.
6113 
6114   if (!isScalarEpilogueAllowed())
6115     return 1;
6116 
6117   // We used the distance for the interleave count.
6118   if (Legal->getMaxSafeDepDistBytes() != -1U)
6119     return 1;
6120 
6121   auto BestKnownTC = getSmallBestKnownTC(*PSE.getSE(), TheLoop);
6122   const bool HasReductions = !Legal->getReductionVars().empty();
6123   // Do not interleave loops with a relatively small known or estimated trip
6124   // count. But we will interleave when InterleaveSmallLoopScalarReduction is
6125   // enabled, and the code has scalar reductions(HasReductions && VF = 1),
6126   // because with the above conditions interleaving can expose ILP and break
6127   // cross iteration dependences for reductions.
6128   if (BestKnownTC && (*BestKnownTC < TinyTripCountInterleaveThreshold) &&
6129       !(InterleaveSmallLoopScalarReduction && HasReductions && VF.isScalar()))
6130     return 1;
6131 
6132   RegisterUsage R = calculateRegisterUsage({VF})[0];
6133   // We divide by these constants so assume that we have at least one
6134   // instruction that uses at least one register.
6135   for (auto& pair : R.MaxLocalUsers) {
6136     pair.second = std::max(pair.second, 1U);
6137   }
6138 
6139   // We calculate the interleave count using the following formula.
6140   // Subtract the number of loop invariants from the number of available
6141   // registers. These registers are used by all of the interleaved instances.
6142   // Next, divide the remaining registers by the number of registers that is
6143   // required by the loop, in order to estimate how many parallel instances
6144   // fit without causing spills. All of this is rounded down if necessary to be
6145   // a power of two. We want power of two interleave count to simplify any
6146   // addressing operations or alignment considerations.
6147   // We also want power of two interleave counts to ensure that the induction
6148   // variable of the vector loop wraps to zero, when tail is folded by masking;
6149   // this currently happens when OptForSize, in which case IC is set to 1 above.
6150   unsigned IC = UINT_MAX;
6151 
6152   for (auto& pair : R.MaxLocalUsers) {
6153     unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first);
6154     LLVM_DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters
6155                       << " registers of "
6156                       << TTI.getRegisterClassName(pair.first) << " register class\n");
6157     if (VF.isScalar()) {
6158       if (ForceTargetNumScalarRegs.getNumOccurrences() > 0)
6159         TargetNumRegisters = ForceTargetNumScalarRegs;
6160     } else {
6161       if (ForceTargetNumVectorRegs.getNumOccurrences() > 0)
6162         TargetNumRegisters = ForceTargetNumVectorRegs;
6163     }
6164     unsigned MaxLocalUsers = pair.second;
6165     unsigned LoopInvariantRegs = 0;
6166     if (R.LoopInvariantRegs.find(pair.first) != R.LoopInvariantRegs.end())
6167       LoopInvariantRegs = R.LoopInvariantRegs[pair.first];
6168 
6169     unsigned TmpIC = PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs) / MaxLocalUsers);
6170     // Don't count the induction variable as interleaved.
6171     if (EnableIndVarRegisterHeur) {
6172       TmpIC =
6173           PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs - 1) /
6174                         std::max(1U, (MaxLocalUsers - 1)));
6175     }
6176 
6177     IC = std::min(IC, TmpIC);
6178   }
6179 
6180   // Clamp the interleave ranges to reasonable counts.
6181   unsigned MaxInterleaveCount =
6182       TTI.getMaxInterleaveFactor(VF.getKnownMinValue());
6183 
6184   // Check if the user has overridden the max.
6185   if (VF.isScalar()) {
6186     if (ForceTargetMaxScalarInterleaveFactor.getNumOccurrences() > 0)
6187       MaxInterleaveCount = ForceTargetMaxScalarInterleaveFactor;
6188   } else {
6189     if (ForceTargetMaxVectorInterleaveFactor.getNumOccurrences() > 0)
6190       MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor;
6191   }
6192 
6193   // If trip count is known or estimated compile time constant, limit the
6194   // interleave count to be less than the trip count divided by VF, provided it
6195   // is at least 1.
6196   //
6197   // For scalable vectors we can't know if interleaving is beneficial. It may
6198   // not be beneficial for small loops if none of the lanes in the second vector
6199   // iterations is enabled. However, for larger loops, there is likely to be a
6200   // similar benefit as for fixed-width vectors. For now, we choose to leave
6201   // the InterleaveCount as if vscale is '1', although if some information about
6202   // the vector is known (e.g. min vector size), we can make a better decision.
6203   if (BestKnownTC) {
6204     MaxInterleaveCount =
6205         std::min(*BestKnownTC / VF.getKnownMinValue(), MaxInterleaveCount);
6206     // Make sure MaxInterleaveCount is greater than 0.
6207     MaxInterleaveCount = std::max(1u, MaxInterleaveCount);
6208   }
6209 
6210   assert(MaxInterleaveCount > 0 &&
6211          "Maximum interleave count must be greater than 0");
6212 
6213   // Clamp the calculated IC to be between the 1 and the max interleave count
6214   // that the target and trip count allows.
6215   if (IC > MaxInterleaveCount)
6216     IC = MaxInterleaveCount;
6217   else
6218     // Make sure IC is greater than 0.
6219     IC = std::max(1u, IC);
6220 
6221   assert(IC > 0 && "Interleave count must be greater than 0.");
6222 
6223   // If we did not calculate the cost for VF (because the user selected the VF)
6224   // then we calculate the cost of VF here.
6225   if (LoopCost == 0) {
6226     assert(expectedCost(VF).first.isValid() && "Expected a valid cost");
6227     LoopCost = *expectedCost(VF).first.getValue();
6228   }
6229 
6230   assert(LoopCost && "Non-zero loop cost expected");
6231 
6232   // Interleave if we vectorized this loop and there is a reduction that could
6233   // benefit from interleaving.
6234   if (VF.isVector() && HasReductions) {
6235     LLVM_DEBUG(dbgs() << "LV: Interleaving because of reductions.\n");
6236     return IC;
6237   }
6238 
6239   // Note that if we've already vectorized the loop we will have done the
6240   // runtime check and so interleaving won't require further checks.
6241   bool InterleavingRequiresRuntimePointerCheck =
6242       (VF.isScalar() && Legal->getRuntimePointerChecking()->Need);
6243 
6244   // We want to interleave small loops in order to reduce the loop overhead and
6245   // potentially expose ILP opportunities.
6246   LLVM_DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n'
6247                     << "LV: IC is " << IC << '\n'
6248                     << "LV: VF is " << VF << '\n');
6249   const bool AggressivelyInterleaveReductions =
6250       TTI.enableAggressiveInterleaving(HasReductions);
6251   if (!InterleavingRequiresRuntimePointerCheck && LoopCost < SmallLoopCost) {
6252     // We assume that the cost overhead is 1 and we use the cost model
6253     // to estimate the cost of the loop and interleave until the cost of the
6254     // loop overhead is about 5% of the cost of the loop.
6255     unsigned SmallIC =
6256         std::min(IC, (unsigned)PowerOf2Floor(SmallLoopCost / LoopCost));
6257 
6258     // Interleave until store/load ports (estimated by max interleave count) are
6259     // saturated.
6260     unsigned NumStores = Legal->getNumStores();
6261     unsigned NumLoads = Legal->getNumLoads();
6262     unsigned StoresIC = IC / (NumStores ? NumStores : 1);
6263     unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1);
6264 
6265     // If we have a scalar reduction (vector reductions are already dealt with
6266     // by this point), we can increase the critical path length if the loop
6267     // we're interleaving is inside another loop. Limit, by default to 2, so the
6268     // critical path only gets increased by one reduction operation.
6269     if (HasReductions && TheLoop->getLoopDepth() > 1) {
6270       unsigned F = static_cast<unsigned>(MaxNestedScalarReductionIC);
6271       SmallIC = std::min(SmallIC, F);
6272       StoresIC = std::min(StoresIC, F);
6273       LoadsIC = std::min(LoadsIC, F);
6274     }
6275 
6276     if (EnableLoadStoreRuntimeInterleave &&
6277         std::max(StoresIC, LoadsIC) > SmallIC) {
6278       LLVM_DEBUG(
6279           dbgs() << "LV: Interleaving to saturate store or load ports.\n");
6280       return std::max(StoresIC, LoadsIC);
6281     }
6282 
6283     // If there are scalar reductions and TTI has enabled aggressive
6284     // interleaving for reductions, we will interleave to expose ILP.
6285     if (InterleaveSmallLoopScalarReduction && VF.isScalar() &&
6286         AggressivelyInterleaveReductions) {
6287       LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n");
6288       // Interleave no less than SmallIC but not as aggressive as the normal IC
6289       // to satisfy the rare situation when resources are too limited.
6290       return std::max(IC / 2, SmallIC);
6291     } else {
6292       LLVM_DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n");
6293       return SmallIC;
6294     }
6295   }
6296 
6297   // Interleave if this is a large loop (small loops are already dealt with by
6298   // this point) that could benefit from interleaving.
6299   if (AggressivelyInterleaveReductions) {
6300     LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n");
6301     return IC;
6302   }
6303 
6304   LLVM_DEBUG(dbgs() << "LV: Not Interleaving.\n");
6305   return 1;
6306 }
6307 
6308 SmallVector<LoopVectorizationCostModel::RegisterUsage, 8>
6309 LoopVectorizationCostModel::calculateRegisterUsage(ArrayRef<ElementCount> VFs) {
6310   // This function calculates the register usage by measuring the highest number
6311   // of values that are alive at a single location. Obviously, this is a very
6312   // rough estimation. We scan the loop in a topological order in order and
6313   // assign a number to each instruction. We use RPO to ensure that defs are
6314   // met before their users. We assume that each instruction that has in-loop
6315   // users starts an interval. We record every time that an in-loop value is
6316   // used, so we have a list of the first and last occurrences of each
6317   // instruction. Next, we transpose this data structure into a multi map that
6318   // holds the list of intervals that *end* at a specific location. This multi
6319   // map allows us to perform a linear search. We scan the instructions linearly
6320   // and record each time that a new interval starts, by placing it in a set.
6321   // If we find this value in the multi-map then we remove it from the set.
6322   // The max register usage is the maximum size of the set.
6323   // We also search for instructions that are defined outside the loop, but are
6324   // used inside the loop. We need this number separately from the max-interval
6325   // usage number because when we unroll, loop-invariant values do not take
6326   // more register.
6327   LoopBlocksDFS DFS(TheLoop);
6328   DFS.perform(LI);
6329 
6330   RegisterUsage RU;
6331 
6332   // Each 'key' in the map opens a new interval. The values
6333   // of the map are the index of the 'last seen' usage of the
6334   // instruction that is the key.
6335   using IntervalMap = DenseMap<Instruction *, unsigned>;
6336 
6337   // Maps instruction to its index.
6338   SmallVector<Instruction *, 64> IdxToInstr;
6339   // Marks the end of each interval.
6340   IntervalMap EndPoint;
6341   // Saves the list of instruction indices that are used in the loop.
6342   SmallPtrSet<Instruction *, 8> Ends;
6343   // Saves the list of values that are used in the loop but are
6344   // defined outside the loop, such as arguments and constants.
6345   SmallPtrSet<Value *, 8> LoopInvariants;
6346 
6347   for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) {
6348     for (Instruction &I : BB->instructionsWithoutDebug()) {
6349       IdxToInstr.push_back(&I);
6350 
6351       // Save the end location of each USE.
6352       for (Value *U : I.operands()) {
6353         auto *Instr = dyn_cast<Instruction>(U);
6354 
6355         // Ignore non-instruction values such as arguments, constants, etc.
6356         if (!Instr)
6357           continue;
6358 
6359         // If this instruction is outside the loop then record it and continue.
6360         if (!TheLoop->contains(Instr)) {
6361           LoopInvariants.insert(Instr);
6362           continue;
6363         }
6364 
6365         // Overwrite previous end points.
6366         EndPoint[Instr] = IdxToInstr.size();
6367         Ends.insert(Instr);
6368       }
6369     }
6370   }
6371 
6372   // Saves the list of intervals that end with the index in 'key'.
6373   using InstrList = SmallVector<Instruction *, 2>;
6374   DenseMap<unsigned, InstrList> TransposeEnds;
6375 
6376   // Transpose the EndPoints to a list of values that end at each index.
6377   for (auto &Interval : EndPoint)
6378     TransposeEnds[Interval.second].push_back(Interval.first);
6379 
6380   SmallPtrSet<Instruction *, 8> OpenIntervals;
6381   SmallVector<RegisterUsage, 8> RUs(VFs.size());
6382   SmallVector<SmallMapVector<unsigned, unsigned, 4>, 8> MaxUsages(VFs.size());
6383 
6384   LLVM_DEBUG(dbgs() << "LV(REG): Calculating max register usage:\n");
6385 
6386   // A lambda that gets the register usage for the given type and VF.
6387   const auto &TTICapture = TTI;
6388   auto GetRegUsage = [&TTICapture](Type *Ty, ElementCount VF) {
6389     if (Ty->isTokenTy() || !VectorType::isValidElementType(Ty))
6390       return 0U;
6391     return TTICapture.getRegUsageForType(VectorType::get(Ty, VF));
6392   };
6393 
6394   for (unsigned int i = 0, s = IdxToInstr.size(); i < s; ++i) {
6395     Instruction *I = IdxToInstr[i];
6396 
6397     // Remove all of the instructions that end at this location.
6398     InstrList &List = TransposeEnds[i];
6399     for (Instruction *ToRemove : List)
6400       OpenIntervals.erase(ToRemove);
6401 
6402     // Ignore instructions that are never used within the loop.
6403     if (!Ends.count(I))
6404       continue;
6405 
6406     // Skip ignored values.
6407     if (ValuesToIgnore.count(I))
6408       continue;
6409 
6410     // For each VF find the maximum usage of registers.
6411     for (unsigned j = 0, e = VFs.size(); j < e; ++j) {
6412       // Count the number of live intervals.
6413       SmallMapVector<unsigned, unsigned, 4> RegUsage;
6414 
6415       if (VFs[j].isScalar()) {
6416         for (auto Inst : OpenIntervals) {
6417           unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType());
6418           if (RegUsage.find(ClassID) == RegUsage.end())
6419             RegUsage[ClassID] = 1;
6420           else
6421             RegUsage[ClassID] += 1;
6422         }
6423       } else {
6424         collectUniformsAndScalars(VFs[j]);
6425         for (auto Inst : OpenIntervals) {
6426           // Skip ignored values for VF > 1.
6427           if (VecValuesToIgnore.count(Inst))
6428             continue;
6429           if (isScalarAfterVectorization(Inst, VFs[j])) {
6430             unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType());
6431             if (RegUsage.find(ClassID) == RegUsage.end())
6432               RegUsage[ClassID] = 1;
6433             else
6434               RegUsage[ClassID] += 1;
6435           } else {
6436             unsigned ClassID = TTI.getRegisterClassForType(true, Inst->getType());
6437             if (RegUsage.find(ClassID) == RegUsage.end())
6438               RegUsage[ClassID] = GetRegUsage(Inst->getType(), VFs[j]);
6439             else
6440               RegUsage[ClassID] += GetRegUsage(Inst->getType(), VFs[j]);
6441           }
6442         }
6443       }
6444 
6445       for (auto& pair : RegUsage) {
6446         if (MaxUsages[j].find(pair.first) != MaxUsages[j].end())
6447           MaxUsages[j][pair.first] = std::max(MaxUsages[j][pair.first], pair.second);
6448         else
6449           MaxUsages[j][pair.first] = pair.second;
6450       }
6451     }
6452 
6453     LLVM_DEBUG(dbgs() << "LV(REG): At #" << i << " Interval # "
6454                       << OpenIntervals.size() << '\n');
6455 
6456     // Add the current instruction to the list of open intervals.
6457     OpenIntervals.insert(I);
6458   }
6459 
6460   for (unsigned i = 0, e = VFs.size(); i < e; ++i) {
6461     SmallMapVector<unsigned, unsigned, 4> Invariant;
6462 
6463     for (auto Inst : LoopInvariants) {
6464       unsigned Usage =
6465           VFs[i].isScalar() ? 1 : GetRegUsage(Inst->getType(), VFs[i]);
6466       unsigned ClassID =
6467           TTI.getRegisterClassForType(VFs[i].isVector(), Inst->getType());
6468       if (Invariant.find(ClassID) == Invariant.end())
6469         Invariant[ClassID] = Usage;
6470       else
6471         Invariant[ClassID] += Usage;
6472     }
6473 
6474     LLVM_DEBUG({
6475       dbgs() << "LV(REG): VF = " << VFs[i] << '\n';
6476       dbgs() << "LV(REG): Found max usage: " << MaxUsages[i].size()
6477              << " item\n";
6478       for (const auto &pair : MaxUsages[i]) {
6479         dbgs() << "LV(REG): RegisterClass: "
6480                << TTI.getRegisterClassName(pair.first) << ", " << pair.second
6481                << " registers\n";
6482       }
6483       dbgs() << "LV(REG): Found invariant usage: " << Invariant.size()
6484              << " item\n";
6485       for (const auto &pair : Invariant) {
6486         dbgs() << "LV(REG): RegisterClass: "
6487                << TTI.getRegisterClassName(pair.first) << ", " << pair.second
6488                << " registers\n";
6489       }
6490     });
6491 
6492     RU.LoopInvariantRegs = Invariant;
6493     RU.MaxLocalUsers = MaxUsages[i];
6494     RUs[i] = RU;
6495   }
6496 
6497   return RUs;
6498 }
6499 
6500 bool LoopVectorizationCostModel::useEmulatedMaskMemRefHack(Instruction *I){
6501   // TODO: Cost model for emulated masked load/store is completely
6502   // broken. This hack guides the cost model to use an artificially
6503   // high enough value to practically disable vectorization with such
6504   // operations, except where previously deployed legality hack allowed
6505   // using very low cost values. This is to avoid regressions coming simply
6506   // from moving "masked load/store" check from legality to cost model.
6507   // Masked Load/Gather emulation was previously never allowed.
6508   // Limited number of Masked Store/Scatter emulation was allowed.
6509   assert(isPredicatedInst(I) && "Expecting a scalar emulated instruction");
6510   return isa<LoadInst>(I) ||
6511          (isa<StoreInst>(I) &&
6512           NumPredStores > NumberOfStoresToPredicate);
6513 }
6514 
6515 void LoopVectorizationCostModel::collectInstsToScalarize(ElementCount VF) {
6516   // If we aren't vectorizing the loop, or if we've already collected the
6517   // instructions to scalarize, there's nothing to do. Collection may already
6518   // have occurred if we have a user-selected VF and are now computing the
6519   // expected cost for interleaving.
6520   if (VF.isScalar() || VF.isZero() ||
6521       InstsToScalarize.find(VF) != InstsToScalarize.end())
6522     return;
6523 
6524   // Initialize a mapping for VF in InstsToScalalarize. If we find that it's
6525   // not profitable to scalarize any instructions, the presence of VF in the
6526   // map will indicate that we've analyzed it already.
6527   ScalarCostsTy &ScalarCostsVF = InstsToScalarize[VF];
6528 
6529   // Find all the instructions that are scalar with predication in the loop and
6530   // determine if it would be better to not if-convert the blocks they are in.
6531   // If so, we also record the instructions to scalarize.
6532   for (BasicBlock *BB : TheLoop->blocks()) {
6533     if (!blockNeedsPredication(BB))
6534       continue;
6535     for (Instruction &I : *BB)
6536       if (isScalarWithPredication(&I)) {
6537         ScalarCostsTy ScalarCosts;
6538         // Do not apply discount logic if hacked cost is needed
6539         // for emulated masked memrefs.
6540         if (!useEmulatedMaskMemRefHack(&I) &&
6541             computePredInstDiscount(&I, ScalarCosts, VF) >= 0)
6542           ScalarCostsVF.insert(ScalarCosts.begin(), ScalarCosts.end());
6543         // Remember that BB will remain after vectorization.
6544         PredicatedBBsAfterVectorization.insert(BB);
6545       }
6546   }
6547 }
6548 
6549 int LoopVectorizationCostModel::computePredInstDiscount(
6550     Instruction *PredInst, ScalarCostsTy &ScalarCosts, ElementCount VF) {
6551   assert(!isUniformAfterVectorization(PredInst, VF) &&
6552          "Instruction marked uniform-after-vectorization will be predicated");
6553 
6554   // Initialize the discount to zero, meaning that the scalar version and the
6555   // vector version cost the same.
6556   InstructionCost Discount = 0;
6557 
6558   // Holds instructions to analyze. The instructions we visit are mapped in
6559   // ScalarCosts. Those instructions are the ones that would be scalarized if
6560   // we find that the scalar version costs less.
6561   SmallVector<Instruction *, 8> Worklist;
6562 
6563   // Returns true if the given instruction can be scalarized.
6564   auto canBeScalarized = [&](Instruction *I) -> bool {
6565     // We only attempt to scalarize instructions forming a single-use chain
6566     // from the original predicated block that would otherwise be vectorized.
6567     // Although not strictly necessary, we give up on instructions we know will
6568     // already be scalar to avoid traversing chains that are unlikely to be
6569     // beneficial.
6570     if (!I->hasOneUse() || PredInst->getParent() != I->getParent() ||
6571         isScalarAfterVectorization(I, VF))
6572       return false;
6573 
6574     // If the instruction is scalar with predication, it will be analyzed
6575     // separately. We ignore it within the context of PredInst.
6576     if (isScalarWithPredication(I))
6577       return false;
6578 
6579     // If any of the instruction's operands are uniform after vectorization,
6580     // the instruction cannot be scalarized. This prevents, for example, a
6581     // masked load from being scalarized.
6582     //
6583     // We assume we will only emit a value for lane zero of an instruction
6584     // marked uniform after vectorization, rather than VF identical values.
6585     // Thus, if we scalarize an instruction that uses a uniform, we would
6586     // create uses of values corresponding to the lanes we aren't emitting code
6587     // for. This behavior can be changed by allowing getScalarValue to clone
6588     // the lane zero values for uniforms rather than asserting.
6589     for (Use &U : I->operands())
6590       if (auto *J = dyn_cast<Instruction>(U.get()))
6591         if (isUniformAfterVectorization(J, VF))
6592           return false;
6593 
6594     // Otherwise, we can scalarize the instruction.
6595     return true;
6596   };
6597 
6598   // Compute the expected cost discount from scalarizing the entire expression
6599   // feeding the predicated instruction. We currently only consider expressions
6600   // that are single-use instruction chains.
6601   Worklist.push_back(PredInst);
6602   while (!Worklist.empty()) {
6603     Instruction *I = Worklist.pop_back_val();
6604 
6605     // If we've already analyzed the instruction, there's nothing to do.
6606     if (ScalarCosts.find(I) != ScalarCosts.end())
6607       continue;
6608 
6609     // Compute the cost of the vector instruction. Note that this cost already
6610     // includes the scalarization overhead of the predicated instruction.
6611     InstructionCost VectorCost = getInstructionCost(I, VF).first;
6612 
6613     // Compute the cost of the scalarized instruction. This cost is the cost of
6614     // the instruction as if it wasn't if-converted and instead remained in the
6615     // predicated block. We will scale this cost by block probability after
6616     // computing the scalarization overhead.
6617     assert(!VF.isScalable() && "scalable vectors not yet supported.");
6618     InstructionCost ScalarCost =
6619         VF.getKnownMinValue() *
6620         getInstructionCost(I, ElementCount::getFixed(1)).first;
6621 
6622     // Compute the scalarization overhead of needed insertelement instructions
6623     // and phi nodes.
6624     if (isScalarWithPredication(I) && !I->getType()->isVoidTy()) {
6625       ScalarCost += TTI.getScalarizationOverhead(
6626           cast<VectorType>(ToVectorTy(I->getType(), VF)),
6627           APInt::getAllOnesValue(VF.getKnownMinValue()), true, false);
6628       assert(!VF.isScalable() && "scalable vectors not yet supported.");
6629       ScalarCost +=
6630           VF.getKnownMinValue() *
6631           TTI.getCFInstrCost(Instruction::PHI, TTI::TCK_RecipThroughput);
6632     }
6633 
6634     // Compute the scalarization overhead of needed extractelement
6635     // instructions. For each of the instruction's operands, if the operand can
6636     // be scalarized, add it to the worklist; otherwise, account for the
6637     // overhead.
6638     for (Use &U : I->operands())
6639       if (auto *J = dyn_cast<Instruction>(U.get())) {
6640         assert(VectorType::isValidElementType(J->getType()) &&
6641                "Instruction has non-scalar type");
6642         if (canBeScalarized(J))
6643           Worklist.push_back(J);
6644         else if (needsExtract(J, VF)) {
6645           assert(!VF.isScalable() && "scalable vectors not yet supported.");
6646           ScalarCost += TTI.getScalarizationOverhead(
6647               cast<VectorType>(ToVectorTy(J->getType(), VF)),
6648               APInt::getAllOnesValue(VF.getKnownMinValue()), false, true);
6649         }
6650       }
6651 
6652     // Scale the total scalar cost by block probability.
6653     ScalarCost /= getReciprocalPredBlockProb();
6654 
6655     // Compute the discount. A non-negative discount means the vector version
6656     // of the instruction costs more, and scalarizing would be beneficial.
6657     Discount += VectorCost - ScalarCost;
6658     ScalarCosts[I] = ScalarCost;
6659   }
6660 
6661   return *Discount.getValue();
6662 }
6663 
6664 LoopVectorizationCostModel::VectorizationCostTy
6665 LoopVectorizationCostModel::expectedCost(ElementCount VF) {
6666   VectorizationCostTy Cost;
6667 
6668   // For each block.
6669   for (BasicBlock *BB : TheLoop->blocks()) {
6670     VectorizationCostTy BlockCost;
6671 
6672     // For each instruction in the old loop.
6673     for (Instruction &I : BB->instructionsWithoutDebug()) {
6674       // Skip ignored values.
6675       if (ValuesToIgnore.count(&I) ||
6676           (VF.isVector() && VecValuesToIgnore.count(&I)))
6677         continue;
6678 
6679       VectorizationCostTy C = getInstructionCost(&I, VF);
6680 
6681       // Check if we should override the cost.
6682       if (ForceTargetInstructionCost.getNumOccurrences() > 0)
6683         C.first = InstructionCost(ForceTargetInstructionCost);
6684 
6685       BlockCost.first += C.first;
6686       BlockCost.second |= C.second;
6687       LLVM_DEBUG(dbgs() << "LV: Found an estimated cost of " << C.first
6688                         << " for VF " << VF << " For instruction: " << I
6689                         << '\n');
6690     }
6691 
6692     // If we are vectorizing a predicated block, it will have been
6693     // if-converted. This means that the block's instructions (aside from
6694     // stores and instructions that may divide by zero) will now be
6695     // unconditionally executed. For the scalar case, we may not always execute
6696     // the predicated block, if it is an if-else block. Thus, scale the block's
6697     // cost by the probability of executing it. blockNeedsPredication from
6698     // Legal is used so as to not include all blocks in tail folded loops.
6699     if (VF.isScalar() && Legal->blockNeedsPredication(BB))
6700       BlockCost.first /= getReciprocalPredBlockProb();
6701 
6702     Cost.first += BlockCost.first;
6703     Cost.second |= BlockCost.second;
6704   }
6705 
6706   return Cost;
6707 }
6708 
6709 /// Gets Address Access SCEV after verifying that the access pattern
6710 /// is loop invariant except the induction variable dependence.
6711 ///
6712 /// This SCEV can be sent to the Target in order to estimate the address
6713 /// calculation cost.
6714 static const SCEV *getAddressAccessSCEV(
6715               Value *Ptr,
6716               LoopVectorizationLegality *Legal,
6717               PredicatedScalarEvolution &PSE,
6718               const Loop *TheLoop) {
6719 
6720   auto *Gep = dyn_cast<GetElementPtrInst>(Ptr);
6721   if (!Gep)
6722     return nullptr;
6723 
6724   // We are looking for a gep with all loop invariant indices except for one
6725   // which should be an induction variable.
6726   auto SE = PSE.getSE();
6727   unsigned NumOperands = Gep->getNumOperands();
6728   for (unsigned i = 1; i < NumOperands; ++i) {
6729     Value *Opd = Gep->getOperand(i);
6730     if (!SE->isLoopInvariant(SE->getSCEV(Opd), TheLoop) &&
6731         !Legal->isInductionVariable(Opd))
6732       return nullptr;
6733   }
6734 
6735   // Now we know we have a GEP ptr, %inv, %ind, %inv. return the Ptr SCEV.
6736   return PSE.getSCEV(Ptr);
6737 }
6738 
6739 static bool isStrideMul(Instruction *I, LoopVectorizationLegality *Legal) {
6740   return Legal->hasStride(I->getOperand(0)) ||
6741          Legal->hasStride(I->getOperand(1));
6742 }
6743 
6744 InstructionCost
6745 LoopVectorizationCostModel::getMemInstScalarizationCost(Instruction *I,
6746                                                         ElementCount VF) {
6747   assert(VF.isVector() &&
6748          "Scalarization cost of instruction implies vectorization.");
6749   assert(!VF.isScalable() && "scalable vectors not yet supported.");
6750   Type *ValTy = getMemInstValueType(I);
6751   auto SE = PSE.getSE();
6752 
6753   unsigned AS = getLoadStoreAddressSpace(I);
6754   Value *Ptr = getLoadStorePointerOperand(I);
6755   Type *PtrTy = ToVectorTy(Ptr->getType(), VF);
6756 
6757   // Figure out whether the access is strided and get the stride value
6758   // if it's known in compile time
6759   const SCEV *PtrSCEV = getAddressAccessSCEV(Ptr, Legal, PSE, TheLoop);
6760 
6761   // Get the cost of the scalar memory instruction and address computation.
6762   InstructionCost Cost =
6763       VF.getKnownMinValue() * TTI.getAddressComputationCost(PtrTy, SE, PtrSCEV);
6764 
6765   // Don't pass *I here, since it is scalar but will actually be part of a
6766   // vectorized loop where the user of it is a vectorized instruction.
6767   const Align Alignment = getLoadStoreAlignment(I);
6768   Cost += VF.getKnownMinValue() *
6769           TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), Alignment,
6770                               AS, TTI::TCK_RecipThroughput);
6771 
6772   // Get the overhead of the extractelement and insertelement instructions
6773   // we might create due to scalarization.
6774   Cost += getScalarizationOverhead(I, VF);
6775 
6776   // If we have a predicated store, it may not be executed for each vector
6777   // lane. Scale the cost by the probability of executing the predicated
6778   // block.
6779   if (isPredicatedInst(I)) {
6780     Cost /= getReciprocalPredBlockProb();
6781 
6782     if (useEmulatedMaskMemRefHack(I))
6783       // Artificially setting to a high enough value to practically disable
6784       // vectorization with such operations.
6785       Cost = 3000000;
6786   }
6787 
6788   return Cost;
6789 }
6790 
6791 InstructionCost
6792 LoopVectorizationCostModel::getConsecutiveMemOpCost(Instruction *I,
6793                                                     ElementCount VF) {
6794   Type *ValTy = getMemInstValueType(I);
6795   auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
6796   Value *Ptr = getLoadStorePointerOperand(I);
6797   unsigned AS = getLoadStoreAddressSpace(I);
6798   int ConsecutiveStride = Legal->isConsecutivePtr(Ptr);
6799   enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
6800 
6801   assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) &&
6802          "Stride should be 1 or -1 for consecutive memory access");
6803   const Align Alignment = getLoadStoreAlignment(I);
6804   InstructionCost Cost = 0;
6805   if (Legal->isMaskRequired(I))
6806     Cost += TTI.getMaskedMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS,
6807                                       CostKind);
6808   else
6809     Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS,
6810                                 CostKind, I);
6811 
6812   bool Reverse = ConsecutiveStride < 0;
6813   if (Reverse)
6814     Cost += TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, 0);
6815   return Cost;
6816 }
6817 
6818 InstructionCost
6819 LoopVectorizationCostModel::getUniformMemOpCost(Instruction *I,
6820                                                 ElementCount VF) {
6821   assert(Legal->isUniformMemOp(*I));
6822 
6823   Type *ValTy = getMemInstValueType(I);
6824   auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
6825   const Align Alignment = getLoadStoreAlignment(I);
6826   unsigned AS = getLoadStoreAddressSpace(I);
6827   enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
6828   if (isa<LoadInst>(I)) {
6829     return TTI.getAddressComputationCost(ValTy) +
6830            TTI.getMemoryOpCost(Instruction::Load, ValTy, Alignment, AS,
6831                                CostKind) +
6832            TTI.getShuffleCost(TargetTransformInfo::SK_Broadcast, VectorTy);
6833   }
6834   StoreInst *SI = cast<StoreInst>(I);
6835 
6836   bool isLoopInvariantStoreValue = Legal->isUniform(SI->getValueOperand());
6837   return TTI.getAddressComputationCost(ValTy) +
6838          TTI.getMemoryOpCost(Instruction::Store, ValTy, Alignment, AS,
6839                              CostKind) +
6840          (isLoopInvariantStoreValue
6841               ? 0
6842               : TTI.getVectorInstrCost(Instruction::ExtractElement, VectorTy,
6843                                        VF.getKnownMinValue() - 1));
6844 }
6845 
6846 InstructionCost
6847 LoopVectorizationCostModel::getGatherScatterCost(Instruction *I,
6848                                                  ElementCount VF) {
6849   Type *ValTy = getMemInstValueType(I);
6850   auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
6851   const Align Alignment = getLoadStoreAlignment(I);
6852   const Value *Ptr = getLoadStorePointerOperand(I);
6853 
6854   return TTI.getAddressComputationCost(VectorTy) +
6855          TTI.getGatherScatterOpCost(
6856              I->getOpcode(), VectorTy, Ptr, Legal->isMaskRequired(I), Alignment,
6857              TargetTransformInfo::TCK_RecipThroughput, I);
6858 }
6859 
6860 InstructionCost
6861 LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I,
6862                                                    ElementCount VF) {
6863   // TODO: Once we have support for interleaving with scalable vectors
6864   // we can calculate the cost properly here.
6865   if (VF.isScalable())
6866     return InstructionCost::getInvalid();
6867 
6868   Type *ValTy = getMemInstValueType(I);
6869   auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
6870   unsigned AS = getLoadStoreAddressSpace(I);
6871 
6872   auto Group = getInterleavedAccessGroup(I);
6873   assert(Group && "Fail to get an interleaved access group.");
6874 
6875   unsigned InterleaveFactor = Group->getFactor();
6876   auto *WideVecTy = VectorType::get(ValTy, VF * InterleaveFactor);
6877 
6878   // Holds the indices of existing members in an interleaved load group.
6879   // An interleaved store group doesn't need this as it doesn't allow gaps.
6880   SmallVector<unsigned, 4> Indices;
6881   if (isa<LoadInst>(I)) {
6882     for (unsigned i = 0; i < InterleaveFactor; i++)
6883       if (Group->getMember(i))
6884         Indices.push_back(i);
6885   }
6886 
6887   // Calculate the cost of the whole interleaved group.
6888   bool UseMaskForGaps =
6889       Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed();
6890   InstructionCost Cost = TTI.getInterleavedMemoryOpCost(
6891       I->getOpcode(), WideVecTy, Group->getFactor(), Indices, Group->getAlign(),
6892       AS, TTI::TCK_RecipThroughput, Legal->isMaskRequired(I), UseMaskForGaps);
6893 
6894   if (Group->isReverse()) {
6895     // TODO: Add support for reversed masked interleaved access.
6896     assert(!Legal->isMaskRequired(I) &&
6897            "Reverse masked interleaved access not supported.");
6898     Cost += Group->getNumMembers() *
6899             TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, 0);
6900   }
6901   return Cost;
6902 }
6903 
6904 InstructionCost LoopVectorizationCostModel::getReductionPatternCost(
6905     Instruction *I, ElementCount VF, Type *Ty, TTI::TargetCostKind CostKind) {
6906   // Early exit for no inloop reductions
6907   if (InLoopReductionChains.empty() || VF.isScalar() || !isa<VectorType>(Ty))
6908     return InstructionCost::getInvalid();
6909   auto *VectorTy = cast<VectorType>(Ty);
6910 
6911   // We are looking for a pattern of, and finding the minimal acceptable cost:
6912   //  reduce(mul(ext(A), ext(B))) or
6913   //  reduce(mul(A, B)) or
6914   //  reduce(ext(A)) or
6915   //  reduce(A).
6916   // The basic idea is that we walk down the tree to do that, finding the root
6917   // reduction instruction in InLoopReductionImmediateChains. From there we find
6918   // the pattern of mul/ext and test the cost of the entire pattern vs the cost
6919   // of the components. If the reduction cost is lower then we return it for the
6920   // reduction instruction and 0 for the other instructions in the pattern. If
6921   // it is not we return an invalid cost specifying the orignal cost method
6922   // should be used.
6923   Instruction *RetI = I;
6924   if ((RetI->getOpcode() == Instruction::SExt ||
6925        RetI->getOpcode() == Instruction::ZExt)) {
6926     if (!RetI->hasOneUser())
6927       return InstructionCost::getInvalid();
6928     RetI = RetI->user_back();
6929   }
6930   if (RetI->getOpcode() == Instruction::Mul &&
6931       RetI->user_back()->getOpcode() == Instruction::Add) {
6932     if (!RetI->hasOneUser())
6933       return InstructionCost::getInvalid();
6934     RetI = RetI->user_back();
6935   }
6936 
6937   // Test if the found instruction is a reduction, and if not return an invalid
6938   // cost specifying the parent to use the original cost modelling.
6939   if (!InLoopReductionImmediateChains.count(RetI))
6940     return InstructionCost::getInvalid();
6941 
6942   // Find the reduction this chain is a part of and calculate the basic cost of
6943   // the reduction on its own.
6944   Instruction *LastChain = InLoopReductionImmediateChains[RetI];
6945   Instruction *ReductionPhi = LastChain;
6946   while (!isa<PHINode>(ReductionPhi))
6947     ReductionPhi = InLoopReductionImmediateChains[ReductionPhi];
6948 
6949   RecurrenceDescriptor RdxDesc =
6950       Legal->getReductionVars()[cast<PHINode>(ReductionPhi)];
6951   unsigned BaseCost = TTI.getArithmeticReductionCost(RdxDesc.getOpcode(),
6952                                                      VectorTy, false, CostKind);
6953 
6954   // Get the operand that was not the reduction chain and match it to one of the
6955   // patterns, returning the better cost if it is found.
6956   Instruction *RedOp = RetI->getOperand(1) == LastChain
6957                            ? dyn_cast<Instruction>(RetI->getOperand(0))
6958                            : dyn_cast<Instruction>(RetI->getOperand(1));
6959 
6960   VectorTy = VectorType::get(I->getOperand(0)->getType(), VectorTy);
6961 
6962   if (RedOp && (isa<SExtInst>(RedOp) || isa<ZExtInst>(RedOp)) &&
6963       !TheLoop->isLoopInvariant(RedOp)) {
6964     bool IsUnsigned = isa<ZExtInst>(RedOp);
6965     auto *ExtType = VectorType::get(RedOp->getOperand(0)->getType(), VectorTy);
6966     InstructionCost RedCost = TTI.getExtendedAddReductionCost(
6967         /*IsMLA=*/false, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType,
6968         CostKind);
6969 
6970     unsigned ExtCost =
6971         TTI.getCastInstrCost(RedOp->getOpcode(), VectorTy, ExtType,
6972                              TTI::CastContextHint::None, CostKind, RedOp);
6973     if (RedCost.isValid() && RedCost < BaseCost + ExtCost)
6974       return I == RetI ? *RedCost.getValue() : 0;
6975   } else if (RedOp && RedOp->getOpcode() == Instruction::Mul) {
6976     Instruction *Mul = RedOp;
6977     Instruction *Op0 = dyn_cast<Instruction>(Mul->getOperand(0));
6978     Instruction *Op1 = dyn_cast<Instruction>(Mul->getOperand(1));
6979     if (Op0 && Op1 && (isa<SExtInst>(Op0) || isa<ZExtInst>(Op0)) &&
6980         Op0->getOpcode() == Op1->getOpcode() &&
6981         Op0->getOperand(0)->getType() == Op1->getOperand(0)->getType() &&
6982         !TheLoop->isLoopInvariant(Op0) && !TheLoop->isLoopInvariant(Op1)) {
6983       bool IsUnsigned = isa<ZExtInst>(Op0);
6984       auto *ExtType = VectorType::get(Op0->getOperand(0)->getType(), VectorTy);
6985       // reduce(mul(ext, ext))
6986       unsigned ExtCost =
6987           TTI.getCastInstrCost(Op0->getOpcode(), VectorTy, ExtType,
6988                                TTI::CastContextHint::None, CostKind, Op0);
6989       unsigned MulCost =
6990           TTI.getArithmeticInstrCost(Mul->getOpcode(), VectorTy, CostKind);
6991 
6992       InstructionCost RedCost = TTI.getExtendedAddReductionCost(
6993           /*IsMLA=*/true, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType,
6994           CostKind);
6995 
6996       if (RedCost.isValid() && RedCost < ExtCost * 2 + MulCost + BaseCost)
6997         return I == RetI ? *RedCost.getValue() : 0;
6998     } else {
6999       unsigned MulCost =
7000           TTI.getArithmeticInstrCost(Mul->getOpcode(), VectorTy, CostKind);
7001 
7002       InstructionCost RedCost = TTI.getExtendedAddReductionCost(
7003           /*IsMLA=*/true, true, RdxDesc.getRecurrenceType(), VectorTy,
7004           CostKind);
7005 
7006       if (RedCost.isValid() && RedCost < MulCost + BaseCost)
7007         return I == RetI ? *RedCost.getValue() : 0;
7008     }
7009   }
7010 
7011   return I == RetI ? BaseCost : InstructionCost::getInvalid();
7012 }
7013 
7014 InstructionCost
7015 LoopVectorizationCostModel::getMemoryInstructionCost(Instruction *I,
7016                                                      ElementCount VF) {
7017   // Calculate scalar cost only. Vectorization cost should be ready at this
7018   // moment.
7019   if (VF.isScalar()) {
7020     Type *ValTy = getMemInstValueType(I);
7021     const Align Alignment = getLoadStoreAlignment(I);
7022     unsigned AS = getLoadStoreAddressSpace(I);
7023 
7024     return TTI.getAddressComputationCost(ValTy) +
7025            TTI.getMemoryOpCost(I->getOpcode(), ValTy, Alignment, AS,
7026                                TTI::TCK_RecipThroughput, I);
7027   }
7028   return getWideningCost(I, VF);
7029 }
7030 
7031 LoopVectorizationCostModel::VectorizationCostTy
7032 LoopVectorizationCostModel::getInstructionCost(Instruction *I,
7033                                                ElementCount VF) {
7034   // If we know that this instruction will remain uniform, check the cost of
7035   // the scalar version.
7036   if (isUniformAfterVectorization(I, VF))
7037     VF = ElementCount::getFixed(1);
7038 
7039   if (VF.isVector() && isProfitableToScalarize(I, VF))
7040     return VectorizationCostTy(InstsToScalarize[VF][I], false);
7041 
7042   // Forced scalars do not have any scalarization overhead.
7043   auto ForcedScalar = ForcedScalars.find(VF);
7044   if (VF.isVector() && ForcedScalar != ForcedScalars.end()) {
7045     auto InstSet = ForcedScalar->second;
7046     if (InstSet.count(I))
7047       return VectorizationCostTy(
7048           (getInstructionCost(I, ElementCount::getFixed(1)).first *
7049            VF.getKnownMinValue()),
7050           false);
7051   }
7052 
7053   Type *VectorTy;
7054   InstructionCost C = getInstructionCost(I, VF, VectorTy);
7055 
7056   bool TypeNotScalarized =
7057       VF.isVector() && VectorTy->isVectorTy() &&
7058       TTI.getNumberOfParts(VectorTy) < VF.getKnownMinValue();
7059   return VectorizationCostTy(C, TypeNotScalarized);
7060 }
7061 
7062 InstructionCost
7063 LoopVectorizationCostModel::getScalarizationOverhead(Instruction *I,
7064                                                      ElementCount VF) {
7065 
7066   if (VF.isScalable())
7067     return InstructionCost::getInvalid();
7068 
7069   if (VF.isScalar())
7070     return 0;
7071 
7072   InstructionCost Cost = 0;
7073   Type *RetTy = ToVectorTy(I->getType(), VF);
7074   if (!RetTy->isVoidTy() &&
7075       (!isa<LoadInst>(I) || !TTI.supportsEfficientVectorElementLoadStore()))
7076     Cost += TTI.getScalarizationOverhead(
7077         cast<VectorType>(RetTy), APInt::getAllOnesValue(VF.getKnownMinValue()),
7078         true, false);
7079 
7080   // Some targets keep addresses scalar.
7081   if (isa<LoadInst>(I) && !TTI.prefersVectorizedAddressing())
7082     return Cost;
7083 
7084   // Some targets support efficient element stores.
7085   if (isa<StoreInst>(I) && TTI.supportsEfficientVectorElementLoadStore())
7086     return Cost;
7087 
7088   // Collect operands to consider.
7089   CallInst *CI = dyn_cast<CallInst>(I);
7090   Instruction::op_range Ops = CI ? CI->arg_operands() : I->operands();
7091 
7092   // Skip operands that do not require extraction/scalarization and do not incur
7093   // any overhead.
7094   return Cost + TTI.getOperandsScalarizationOverhead(
7095                     filterExtractingOperands(Ops, VF), VF.getKnownMinValue());
7096 }
7097 
7098 void LoopVectorizationCostModel::setCostBasedWideningDecision(ElementCount VF) {
7099   if (VF.isScalar())
7100     return;
7101   NumPredStores = 0;
7102   for (BasicBlock *BB : TheLoop->blocks()) {
7103     // For each instruction in the old loop.
7104     for (Instruction &I : *BB) {
7105       Value *Ptr =  getLoadStorePointerOperand(&I);
7106       if (!Ptr)
7107         continue;
7108 
7109       // TODO: We should generate better code and update the cost model for
7110       // predicated uniform stores. Today they are treated as any other
7111       // predicated store (see added test cases in
7112       // invariant-store-vectorization.ll).
7113       if (isa<StoreInst>(&I) && isScalarWithPredication(&I))
7114         NumPredStores++;
7115 
7116       if (Legal->isUniformMemOp(I)) {
7117         // TODO: Avoid replicating loads and stores instead of
7118         // relying on instcombine to remove them.
7119         // Load: Scalar load + broadcast
7120         // Store: Scalar store + isLoopInvariantStoreValue ? 0 : extract
7121         InstructionCost Cost = getUniformMemOpCost(&I, VF);
7122         setWideningDecision(&I, VF, CM_Scalarize, Cost);
7123         continue;
7124       }
7125 
7126       // We assume that widening is the best solution when possible.
7127       if (memoryInstructionCanBeWidened(&I, VF)) {
7128         InstructionCost Cost = getConsecutiveMemOpCost(&I, VF);
7129         int ConsecutiveStride =
7130                Legal->isConsecutivePtr(getLoadStorePointerOperand(&I));
7131         assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) &&
7132                "Expected consecutive stride.");
7133         InstWidening Decision =
7134             ConsecutiveStride == 1 ? CM_Widen : CM_Widen_Reverse;
7135         setWideningDecision(&I, VF, Decision, Cost);
7136         continue;
7137       }
7138 
7139       // Choose between Interleaving, Gather/Scatter or Scalarization.
7140       InstructionCost InterleaveCost = InstructionCost::getInvalid();
7141       unsigned NumAccesses = 1;
7142       if (isAccessInterleaved(&I)) {
7143         auto Group = getInterleavedAccessGroup(&I);
7144         assert(Group && "Fail to get an interleaved access group.");
7145 
7146         // Make one decision for the whole group.
7147         if (getWideningDecision(&I, VF) != CM_Unknown)
7148           continue;
7149 
7150         NumAccesses = Group->getNumMembers();
7151         if (interleavedAccessCanBeWidened(&I, VF))
7152           InterleaveCost = getInterleaveGroupCost(&I, VF);
7153       }
7154 
7155       InstructionCost GatherScatterCost =
7156           isLegalGatherOrScatter(&I)
7157               ? getGatherScatterCost(&I, VF) * NumAccesses
7158               : InstructionCost::getInvalid();
7159 
7160       InstructionCost ScalarizationCost =
7161           !VF.isScalable() ? getMemInstScalarizationCost(&I, VF) * NumAccesses
7162                            : InstructionCost::getInvalid();
7163 
7164       // Choose better solution for the current VF,
7165       // write down this decision and use it during vectorization.
7166       InstructionCost Cost;
7167       InstWidening Decision;
7168       if (InterleaveCost <= GatherScatterCost &&
7169           InterleaveCost < ScalarizationCost) {
7170         Decision = CM_Interleave;
7171         Cost = InterleaveCost;
7172       } else if (GatherScatterCost < ScalarizationCost) {
7173         Decision = CM_GatherScatter;
7174         Cost = GatherScatterCost;
7175       } else {
7176         assert(!VF.isScalable() &&
7177                "We cannot yet scalarise for scalable vectors");
7178         Decision = CM_Scalarize;
7179         Cost = ScalarizationCost;
7180       }
7181       // If the instructions belongs to an interleave group, the whole group
7182       // receives the same decision. The whole group receives the cost, but
7183       // the cost will actually be assigned to one instruction.
7184       if (auto Group = getInterleavedAccessGroup(&I))
7185         setWideningDecision(Group, VF, Decision, Cost);
7186       else
7187         setWideningDecision(&I, VF, Decision, Cost);
7188     }
7189   }
7190 
7191   // Make sure that any load of address and any other address computation
7192   // remains scalar unless there is gather/scatter support. This avoids
7193   // inevitable extracts into address registers, and also has the benefit of
7194   // activating LSR more, since that pass can't optimize vectorized
7195   // addresses.
7196   if (TTI.prefersVectorizedAddressing())
7197     return;
7198 
7199   // Start with all scalar pointer uses.
7200   SmallPtrSet<Instruction *, 8> AddrDefs;
7201   for (BasicBlock *BB : TheLoop->blocks())
7202     for (Instruction &I : *BB) {
7203       Instruction *PtrDef =
7204         dyn_cast_or_null<Instruction>(getLoadStorePointerOperand(&I));
7205       if (PtrDef && TheLoop->contains(PtrDef) &&
7206           getWideningDecision(&I, VF) != CM_GatherScatter)
7207         AddrDefs.insert(PtrDef);
7208     }
7209 
7210   // Add all instructions used to generate the addresses.
7211   SmallVector<Instruction *, 4> Worklist;
7212   append_range(Worklist, AddrDefs);
7213   while (!Worklist.empty()) {
7214     Instruction *I = Worklist.pop_back_val();
7215     for (auto &Op : I->operands())
7216       if (auto *InstOp = dyn_cast<Instruction>(Op))
7217         if ((InstOp->getParent() == I->getParent()) && !isa<PHINode>(InstOp) &&
7218             AddrDefs.insert(InstOp).second)
7219           Worklist.push_back(InstOp);
7220   }
7221 
7222   for (auto *I : AddrDefs) {
7223     if (isa<LoadInst>(I)) {
7224       // Setting the desired widening decision should ideally be handled in
7225       // by cost functions, but since this involves the task of finding out
7226       // if the loaded register is involved in an address computation, it is
7227       // instead changed here when we know this is the case.
7228       InstWidening Decision = getWideningDecision(I, VF);
7229       if (Decision == CM_Widen || Decision == CM_Widen_Reverse)
7230         // Scalarize a widened load of address.
7231         setWideningDecision(
7232             I, VF, CM_Scalarize,
7233             (VF.getKnownMinValue() *
7234              getMemoryInstructionCost(I, ElementCount::getFixed(1))));
7235       else if (auto Group = getInterleavedAccessGroup(I)) {
7236         // Scalarize an interleave group of address loads.
7237         for (unsigned I = 0; I < Group->getFactor(); ++I) {
7238           if (Instruction *Member = Group->getMember(I))
7239             setWideningDecision(
7240                 Member, VF, CM_Scalarize,
7241                 (VF.getKnownMinValue() *
7242                  getMemoryInstructionCost(Member, ElementCount::getFixed(1))));
7243         }
7244       }
7245     } else
7246       // Make sure I gets scalarized and a cost estimate without
7247       // scalarization overhead.
7248       ForcedScalars[VF].insert(I);
7249   }
7250 }
7251 
7252 InstructionCost
7253 LoopVectorizationCostModel::getInstructionCost(Instruction *I, ElementCount VF,
7254                                                Type *&VectorTy) {
7255   Type *RetTy = I->getType();
7256   if (canTruncateToMinimalBitwidth(I, VF))
7257     RetTy = IntegerType::get(RetTy->getContext(), MinBWs[I]);
7258   VectorTy = isScalarAfterVectorization(I, VF) ? RetTy : ToVectorTy(RetTy, VF);
7259   auto SE = PSE.getSE();
7260   TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
7261 
7262   // TODO: We need to estimate the cost of intrinsic calls.
7263   switch (I->getOpcode()) {
7264   case Instruction::GetElementPtr:
7265     // We mark this instruction as zero-cost because the cost of GEPs in
7266     // vectorized code depends on whether the corresponding memory instruction
7267     // is scalarized or not. Therefore, we handle GEPs with the memory
7268     // instruction cost.
7269     return 0;
7270   case Instruction::Br: {
7271     // In cases of scalarized and predicated instructions, there will be VF
7272     // predicated blocks in the vectorized loop. Each branch around these
7273     // blocks requires also an extract of its vector compare i1 element.
7274     bool ScalarPredicatedBB = false;
7275     BranchInst *BI = cast<BranchInst>(I);
7276     if (VF.isVector() && BI->isConditional() &&
7277         (PredicatedBBsAfterVectorization.count(BI->getSuccessor(0)) ||
7278          PredicatedBBsAfterVectorization.count(BI->getSuccessor(1))))
7279       ScalarPredicatedBB = true;
7280 
7281     if (ScalarPredicatedBB) {
7282       // Return cost for branches around scalarized and predicated blocks.
7283       assert(!VF.isScalable() && "scalable vectors not yet supported.");
7284       auto *Vec_i1Ty =
7285           VectorType::get(IntegerType::getInt1Ty(RetTy->getContext()), VF);
7286       return (TTI.getScalarizationOverhead(
7287                   Vec_i1Ty, APInt::getAllOnesValue(VF.getKnownMinValue()),
7288                   false, true) +
7289               (TTI.getCFInstrCost(Instruction::Br, CostKind) *
7290                VF.getKnownMinValue()));
7291     } else if (I->getParent() == TheLoop->getLoopLatch() || VF.isScalar())
7292       // The back-edge branch will remain, as will all scalar branches.
7293       return TTI.getCFInstrCost(Instruction::Br, CostKind);
7294     else
7295       // This branch will be eliminated by if-conversion.
7296       return 0;
7297     // Note: We currently assume zero cost for an unconditional branch inside
7298     // a predicated block since it will become a fall-through, although we
7299     // may decide in the future to call TTI for all branches.
7300   }
7301   case Instruction::PHI: {
7302     auto *Phi = cast<PHINode>(I);
7303 
7304     // First-order recurrences are replaced by vector shuffles inside the loop.
7305     // NOTE: Don't use ToVectorTy as SK_ExtractSubvector expects a vector type.
7306     if (VF.isVector() && Legal->isFirstOrderRecurrence(Phi))
7307       return TTI.getShuffleCost(
7308           TargetTransformInfo::SK_ExtractSubvector, cast<VectorType>(VectorTy),
7309           VF.getKnownMinValue() - 1, FixedVectorType::get(RetTy, 1));
7310 
7311     // Phi nodes in non-header blocks (not inductions, reductions, etc.) are
7312     // converted into select instructions. We require N - 1 selects per phi
7313     // node, where N is the number of incoming values.
7314     if (VF.isVector() && Phi->getParent() != TheLoop->getHeader())
7315       return (Phi->getNumIncomingValues() - 1) *
7316              TTI.getCmpSelInstrCost(
7317                  Instruction::Select, ToVectorTy(Phi->getType(), VF),
7318                  ToVectorTy(Type::getInt1Ty(Phi->getContext()), VF),
7319                  CmpInst::BAD_ICMP_PREDICATE, CostKind);
7320 
7321     return TTI.getCFInstrCost(Instruction::PHI, CostKind);
7322   }
7323   case Instruction::UDiv:
7324   case Instruction::SDiv:
7325   case Instruction::URem:
7326   case Instruction::SRem:
7327     // If we have a predicated instruction, it may not be executed for each
7328     // vector lane. Get the scalarization cost and scale this amount by the
7329     // probability of executing the predicated block. If the instruction is not
7330     // predicated, we fall through to the next case.
7331     if (VF.isVector() && isScalarWithPredication(I)) {
7332       InstructionCost Cost = 0;
7333 
7334       // These instructions have a non-void type, so account for the phi nodes
7335       // that we will create. This cost is likely to be zero. The phi node
7336       // cost, if any, should be scaled by the block probability because it
7337       // models a copy at the end of each predicated block.
7338       Cost += VF.getKnownMinValue() *
7339               TTI.getCFInstrCost(Instruction::PHI, CostKind);
7340 
7341       // The cost of the non-predicated instruction.
7342       Cost += VF.getKnownMinValue() *
7343               TTI.getArithmeticInstrCost(I->getOpcode(), RetTy, CostKind);
7344 
7345       // The cost of insertelement and extractelement instructions needed for
7346       // scalarization.
7347       Cost += getScalarizationOverhead(I, VF);
7348 
7349       // Scale the cost by the probability of executing the predicated blocks.
7350       // This assumes the predicated block for each vector lane is equally
7351       // likely.
7352       return Cost / getReciprocalPredBlockProb();
7353     }
7354     LLVM_FALLTHROUGH;
7355   case Instruction::Add:
7356   case Instruction::FAdd:
7357   case Instruction::Sub:
7358   case Instruction::FSub:
7359   case Instruction::Mul:
7360   case Instruction::FMul:
7361   case Instruction::FDiv:
7362   case Instruction::FRem:
7363   case Instruction::Shl:
7364   case Instruction::LShr:
7365   case Instruction::AShr:
7366   case Instruction::And:
7367   case Instruction::Or:
7368   case Instruction::Xor: {
7369     // Since we will replace the stride by 1 the multiplication should go away.
7370     if (I->getOpcode() == Instruction::Mul && isStrideMul(I, Legal))
7371       return 0;
7372 
7373     // Detect reduction patterns
7374     InstructionCost RedCost;
7375     if ((RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind))
7376             .isValid())
7377       return RedCost;
7378 
7379     // Certain instructions can be cheaper to vectorize if they have a constant
7380     // second vector operand. One example of this are shifts on x86.
7381     Value *Op2 = I->getOperand(1);
7382     TargetTransformInfo::OperandValueProperties Op2VP;
7383     TargetTransformInfo::OperandValueKind Op2VK =
7384         TTI.getOperandInfo(Op2, Op2VP);
7385     if (Op2VK == TargetTransformInfo::OK_AnyValue && Legal->isUniform(Op2))
7386       Op2VK = TargetTransformInfo::OK_UniformValue;
7387 
7388     SmallVector<const Value *, 4> Operands(I->operand_values());
7389     unsigned N = isScalarAfterVectorization(I, VF) ? VF.getKnownMinValue() : 1;
7390     return N * TTI.getArithmeticInstrCost(
7391                    I->getOpcode(), VectorTy, CostKind,
7392                    TargetTransformInfo::OK_AnyValue,
7393                    Op2VK, TargetTransformInfo::OP_None, Op2VP, Operands, I);
7394   }
7395   case Instruction::FNeg: {
7396     assert(!VF.isScalable() && "VF is assumed to be non scalable.");
7397     unsigned N = isScalarAfterVectorization(I, VF) ? VF.getKnownMinValue() : 1;
7398     return N * TTI.getArithmeticInstrCost(
7399                    I->getOpcode(), VectorTy, CostKind,
7400                    TargetTransformInfo::OK_AnyValue,
7401                    TargetTransformInfo::OK_AnyValue,
7402                    TargetTransformInfo::OP_None, TargetTransformInfo::OP_None,
7403                    I->getOperand(0), I);
7404   }
7405   case Instruction::Select: {
7406     SelectInst *SI = cast<SelectInst>(I);
7407     const SCEV *CondSCEV = SE->getSCEV(SI->getCondition());
7408     bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop));
7409     Type *CondTy = SI->getCondition()->getType();
7410     if (!ScalarCond)
7411       CondTy = VectorType::get(CondTy, VF);
7412     return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, CondTy,
7413                                   CmpInst::BAD_ICMP_PREDICATE, CostKind, I);
7414   }
7415   case Instruction::ICmp:
7416   case Instruction::FCmp: {
7417     Type *ValTy = I->getOperand(0)->getType();
7418     Instruction *Op0AsInstruction = dyn_cast<Instruction>(I->getOperand(0));
7419     if (canTruncateToMinimalBitwidth(Op0AsInstruction, VF))
7420       ValTy = IntegerType::get(ValTy->getContext(), MinBWs[Op0AsInstruction]);
7421     VectorTy = ToVectorTy(ValTy, VF);
7422     return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, nullptr,
7423                                   CmpInst::BAD_ICMP_PREDICATE, CostKind, I);
7424   }
7425   case Instruction::Store:
7426   case Instruction::Load: {
7427     ElementCount Width = VF;
7428     if (Width.isVector()) {
7429       InstWidening Decision = getWideningDecision(I, Width);
7430       assert(Decision != CM_Unknown &&
7431              "CM decision should be taken at this point");
7432       if (Decision == CM_Scalarize)
7433         Width = ElementCount::getFixed(1);
7434     }
7435     VectorTy = ToVectorTy(getMemInstValueType(I), Width);
7436     return getMemoryInstructionCost(I, VF);
7437   }
7438   case Instruction::ZExt:
7439   case Instruction::SExt:
7440   case Instruction::FPToUI:
7441   case Instruction::FPToSI:
7442   case Instruction::FPExt:
7443   case Instruction::PtrToInt:
7444   case Instruction::IntToPtr:
7445   case Instruction::SIToFP:
7446   case Instruction::UIToFP:
7447   case Instruction::Trunc:
7448   case Instruction::FPTrunc:
7449   case Instruction::BitCast: {
7450     // Computes the CastContextHint from a Load/Store instruction.
7451     auto ComputeCCH = [&](Instruction *I) -> TTI::CastContextHint {
7452       assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
7453              "Expected a load or a store!");
7454 
7455       if (VF.isScalar() || !TheLoop->contains(I))
7456         return TTI::CastContextHint::Normal;
7457 
7458       switch (getWideningDecision(I, VF)) {
7459       case LoopVectorizationCostModel::CM_GatherScatter:
7460         return TTI::CastContextHint::GatherScatter;
7461       case LoopVectorizationCostModel::CM_Interleave:
7462         return TTI::CastContextHint::Interleave;
7463       case LoopVectorizationCostModel::CM_Scalarize:
7464       case LoopVectorizationCostModel::CM_Widen:
7465         return Legal->isMaskRequired(I) ? TTI::CastContextHint::Masked
7466                                         : TTI::CastContextHint::Normal;
7467       case LoopVectorizationCostModel::CM_Widen_Reverse:
7468         return TTI::CastContextHint::Reversed;
7469       case LoopVectorizationCostModel::CM_Unknown:
7470         llvm_unreachable("Instr did not go through cost modelling?");
7471       }
7472 
7473       llvm_unreachable("Unhandled case!");
7474     };
7475 
7476     unsigned Opcode = I->getOpcode();
7477     TTI::CastContextHint CCH = TTI::CastContextHint::None;
7478     // For Trunc, the context is the only user, which must be a StoreInst.
7479     if (Opcode == Instruction::Trunc || Opcode == Instruction::FPTrunc) {
7480       if (I->hasOneUse())
7481         if (StoreInst *Store = dyn_cast<StoreInst>(*I->user_begin()))
7482           CCH = ComputeCCH(Store);
7483     }
7484     // For Z/Sext, the context is the operand, which must be a LoadInst.
7485     else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt ||
7486              Opcode == Instruction::FPExt) {
7487       if (LoadInst *Load = dyn_cast<LoadInst>(I->getOperand(0)))
7488         CCH = ComputeCCH(Load);
7489     }
7490 
7491     // We optimize the truncation of induction variables having constant
7492     // integer steps. The cost of these truncations is the same as the scalar
7493     // operation.
7494     if (isOptimizableIVTruncate(I, VF)) {
7495       auto *Trunc = cast<TruncInst>(I);
7496       return TTI.getCastInstrCost(Instruction::Trunc, Trunc->getDestTy(),
7497                                   Trunc->getSrcTy(), CCH, CostKind, Trunc);
7498     }
7499 
7500     // Detect reduction patterns
7501     InstructionCost RedCost;
7502     if ((RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind))
7503             .isValid())
7504       return RedCost;
7505 
7506     Type *SrcScalarTy = I->getOperand(0)->getType();
7507     Type *SrcVecTy =
7508         VectorTy->isVectorTy() ? ToVectorTy(SrcScalarTy, VF) : SrcScalarTy;
7509     if (canTruncateToMinimalBitwidth(I, VF)) {
7510       // This cast is going to be shrunk. This may remove the cast or it might
7511       // turn it into slightly different cast. For example, if MinBW == 16,
7512       // "zext i8 %1 to i32" becomes "zext i8 %1 to i16".
7513       //
7514       // Calculate the modified src and dest types.
7515       Type *MinVecTy = VectorTy;
7516       if (Opcode == Instruction::Trunc) {
7517         SrcVecTy = smallestIntegerVectorType(SrcVecTy, MinVecTy);
7518         VectorTy =
7519             largestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy);
7520       } else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt) {
7521         SrcVecTy = largestIntegerVectorType(SrcVecTy, MinVecTy);
7522         VectorTy =
7523             smallestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy);
7524       }
7525     }
7526 
7527     unsigned N;
7528     if (isScalarAfterVectorization(I, VF)) {
7529       assert(!VF.isScalable() && "VF is assumed to be non scalable");
7530       N = VF.getKnownMinValue();
7531     } else
7532       N = 1;
7533     return N *
7534            TTI.getCastInstrCost(Opcode, VectorTy, SrcVecTy, CCH, CostKind, I);
7535   }
7536   case Instruction::Call: {
7537     bool NeedToScalarize;
7538     CallInst *CI = cast<CallInst>(I);
7539     InstructionCost CallCost = getVectorCallCost(CI, VF, NeedToScalarize);
7540     if (getVectorIntrinsicIDForCall(CI, TLI)) {
7541       InstructionCost IntrinsicCost = getVectorIntrinsicCost(CI, VF);
7542       return std::min(CallCost, IntrinsicCost);
7543     }
7544     return CallCost;
7545   }
7546   case Instruction::ExtractValue:
7547     return TTI.getInstructionCost(I, TTI::TCK_RecipThroughput);
7548   default:
7549     // The cost of executing VF copies of the scalar instruction. This opcode
7550     // is unknown. Assume that it is the same as 'mul'.
7551     return VF.getKnownMinValue() * TTI.getArithmeticInstrCost(
7552                                        Instruction::Mul, VectorTy, CostKind) +
7553            getScalarizationOverhead(I, VF);
7554   } // end of switch.
7555 }
7556 
7557 char LoopVectorize::ID = 0;
7558 
7559 static const char lv_name[] = "Loop Vectorization";
7560 
7561 INITIALIZE_PASS_BEGIN(LoopVectorize, LV_NAME, lv_name, false, false)
7562 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
7563 INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass)
7564 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
7565 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
7566 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
7567 INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass)
7568 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
7569 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
7570 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
7571 INITIALIZE_PASS_DEPENDENCY(LoopAccessLegacyAnalysis)
7572 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass)
7573 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass)
7574 INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass)
7575 INITIALIZE_PASS_DEPENDENCY(InjectTLIMappingsLegacy)
7576 INITIALIZE_PASS_END(LoopVectorize, LV_NAME, lv_name, false, false)
7577 
7578 namespace llvm {
7579 
7580 Pass *createLoopVectorizePass() { return new LoopVectorize(); }
7581 
7582 Pass *createLoopVectorizePass(bool InterleaveOnlyWhenForced,
7583                               bool VectorizeOnlyWhenForced) {
7584   return new LoopVectorize(InterleaveOnlyWhenForced, VectorizeOnlyWhenForced);
7585 }
7586 
7587 } // end namespace llvm
7588 
7589 bool LoopVectorizationCostModel::isConsecutiveLoadOrStore(Instruction *Inst) {
7590   // Check if the pointer operand of a load or store instruction is
7591   // consecutive.
7592   if (auto *Ptr = getLoadStorePointerOperand(Inst))
7593     return Legal->isConsecutivePtr(Ptr);
7594   return false;
7595 }
7596 
7597 void LoopVectorizationCostModel::collectValuesToIgnore() {
7598   // Ignore ephemeral values.
7599   CodeMetrics::collectEphemeralValues(TheLoop, AC, ValuesToIgnore);
7600 
7601   // Ignore type-promoting instructions we identified during reduction
7602   // detection.
7603   for (auto &Reduction : Legal->getReductionVars()) {
7604     RecurrenceDescriptor &RedDes = Reduction.second;
7605     const SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts();
7606     VecValuesToIgnore.insert(Casts.begin(), Casts.end());
7607   }
7608   // Ignore type-casting instructions we identified during induction
7609   // detection.
7610   for (auto &Induction : Legal->getInductionVars()) {
7611     InductionDescriptor &IndDes = Induction.second;
7612     const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts();
7613     VecValuesToIgnore.insert(Casts.begin(), Casts.end());
7614   }
7615 }
7616 
7617 void LoopVectorizationCostModel::collectInLoopReductions() {
7618   for (auto &Reduction : Legal->getReductionVars()) {
7619     PHINode *Phi = Reduction.first;
7620     RecurrenceDescriptor &RdxDesc = Reduction.second;
7621 
7622     // We don't collect reductions that are type promoted (yet).
7623     if (RdxDesc.getRecurrenceType() != Phi->getType())
7624       continue;
7625 
7626     // If the target would prefer this reduction to happen "in-loop", then we
7627     // want to record it as such.
7628     unsigned Opcode = RdxDesc.getOpcode();
7629     if (!PreferInLoopReductions &&
7630         !TTI.preferInLoopReduction(Opcode, Phi->getType(),
7631                                    TargetTransformInfo::ReductionFlags()))
7632       continue;
7633 
7634     // Check that we can correctly put the reductions into the loop, by
7635     // finding the chain of operations that leads from the phi to the loop
7636     // exit value.
7637     SmallVector<Instruction *, 4> ReductionOperations =
7638         RdxDesc.getReductionOpChain(Phi, TheLoop);
7639     bool InLoop = !ReductionOperations.empty();
7640     if (InLoop) {
7641       InLoopReductionChains[Phi] = ReductionOperations;
7642       // Add the elements to InLoopReductionImmediateChains for cost modelling.
7643       Instruction *LastChain = Phi;
7644       for (auto *I : ReductionOperations) {
7645         InLoopReductionImmediateChains[I] = LastChain;
7646         LastChain = I;
7647       }
7648     }
7649     LLVM_DEBUG(dbgs() << "LV: Using " << (InLoop ? "inloop" : "out of loop")
7650                       << " reduction for phi: " << *Phi << "\n");
7651   }
7652 }
7653 
7654 // TODO: we could return a pair of values that specify the max VF and
7655 // min VF, to be used in `buildVPlans(MinVF, MaxVF)` instead of
7656 // `buildVPlans(VF, VF)`. We cannot do it because VPLAN at the moment
7657 // doesn't have a cost model that can choose which plan to execute if
7658 // more than one is generated.
7659 static unsigned determineVPlanVF(const unsigned WidestVectorRegBits,
7660                                  LoopVectorizationCostModel &CM) {
7661   unsigned WidestType;
7662   std::tie(std::ignore, WidestType) = CM.getSmallestAndWidestTypes();
7663   return WidestVectorRegBits / WidestType;
7664 }
7665 
7666 VectorizationFactor
7667 LoopVectorizationPlanner::planInVPlanNativePath(ElementCount UserVF) {
7668   assert(!UserVF.isScalable() && "scalable vectors not yet supported");
7669   ElementCount VF = UserVF;
7670   // Outer loop handling: They may require CFG and instruction level
7671   // transformations before even evaluating whether vectorization is profitable.
7672   // Since we cannot modify the incoming IR, we need to build VPlan upfront in
7673   // the vectorization pipeline.
7674   if (!OrigLoop->isInnermost()) {
7675     // If the user doesn't provide a vectorization factor, determine a
7676     // reasonable one.
7677     if (UserVF.isZero()) {
7678       VF = ElementCount::getFixed(
7679           determineVPlanVF(TTI->getRegisterBitWidth(true /* Vector*/), CM));
7680       LLVM_DEBUG(dbgs() << "LV: VPlan computed VF " << VF << ".\n");
7681 
7682       // Make sure we have a VF > 1 for stress testing.
7683       if (VPlanBuildStressTest && (VF.isScalar() || VF.isZero())) {
7684         LLVM_DEBUG(dbgs() << "LV: VPlan stress testing: "
7685                           << "overriding computed VF.\n");
7686         VF = ElementCount::getFixed(4);
7687       }
7688     }
7689     assert(EnableVPlanNativePath && "VPlan-native path is not enabled.");
7690     assert(isPowerOf2_32(VF.getKnownMinValue()) &&
7691            "VF needs to be a power of two");
7692     LLVM_DEBUG(dbgs() << "LV: Using " << (!UserVF.isZero() ? "user " : "")
7693                       << "VF " << VF << " to build VPlans.\n");
7694     buildVPlans(VF, VF);
7695 
7696     // For VPlan build stress testing, we bail out after VPlan construction.
7697     if (VPlanBuildStressTest)
7698       return VectorizationFactor::Disabled();
7699 
7700     return {VF, 0 /*Cost*/};
7701   }
7702 
7703   LLVM_DEBUG(
7704       dbgs() << "LV: Not vectorizing. Inner loops aren't supported in the "
7705                 "VPlan-native path.\n");
7706   return VectorizationFactor::Disabled();
7707 }
7708 
7709 Optional<VectorizationFactor>
7710 LoopVectorizationPlanner::plan(ElementCount UserVF, unsigned UserIC) {
7711   assert(OrigLoop->isInnermost() && "Inner loop expected.");
7712   Optional<ElementCount> MaybeMaxVF = CM.computeMaxVF(UserVF, UserIC);
7713   if (!MaybeMaxVF) // Cases that should not to be vectorized nor interleaved.
7714     return None;
7715 
7716   // Invalidate interleave groups if all blocks of loop will be predicated.
7717   if (CM.blockNeedsPredication(OrigLoop->getHeader()) &&
7718       !useMaskedInterleavedAccesses(*TTI)) {
7719     LLVM_DEBUG(
7720         dbgs()
7721         << "LV: Invalidate all interleaved groups due to fold-tail by masking "
7722            "which requires masked-interleaved support.\n");
7723     if (CM.InterleaveInfo.invalidateGroups())
7724       // Invalidating interleave groups also requires invalidating all decisions
7725       // based on them, which includes widening decisions and uniform and scalar
7726       // values.
7727       CM.invalidateCostModelingDecisions();
7728   }
7729 
7730   ElementCount MaxVF = MaybeMaxVF.getValue();
7731   assert(MaxVF.isNonZero() && "MaxVF is zero.");
7732 
7733   bool UserVFIsLegal = ElementCount::isKnownLE(UserVF, MaxVF);
7734   if (!UserVF.isZero() &&
7735       (UserVFIsLegal || (UserVF.isScalable() && MaxVF.isScalable()))) {
7736     // FIXME: MaxVF is temporarily used inplace of UserVF for illegal scalable
7737     // VFs here, this should be reverted to only use legal UserVFs once the
7738     // loop below supports scalable VFs.
7739     ElementCount VF = UserVFIsLegal ? UserVF : MaxVF;
7740     LLVM_DEBUG(dbgs() << "LV: Using " << (UserVFIsLegal ? "user" : "max")
7741                       << " VF " << VF << ".\n");
7742     assert(isPowerOf2_32(VF.getKnownMinValue()) &&
7743            "VF needs to be a power of two");
7744     // Collect the instructions (and their associated costs) that will be more
7745     // profitable to scalarize.
7746     CM.selectUserVectorizationFactor(VF);
7747     CM.collectInLoopReductions();
7748     buildVPlansWithVPRecipes(VF, VF);
7749     LLVM_DEBUG(printPlans(dbgs()));
7750     return {{VF, 0}};
7751   }
7752 
7753   assert(!MaxVF.isScalable() &&
7754          "Scalable vectors not yet supported beyond this point");
7755 
7756   for (ElementCount VF = ElementCount::getFixed(1);
7757        ElementCount::isKnownLE(VF, MaxVF); VF *= 2) {
7758     // Collect Uniform and Scalar instructions after vectorization with VF.
7759     CM.collectUniformsAndScalars(VF);
7760 
7761     // Collect the instructions (and their associated costs) that will be more
7762     // profitable to scalarize.
7763     if (VF.isVector())
7764       CM.collectInstsToScalarize(VF);
7765   }
7766 
7767   CM.collectInLoopReductions();
7768 
7769   buildVPlansWithVPRecipes(ElementCount::getFixed(1), MaxVF);
7770   LLVM_DEBUG(printPlans(dbgs()));
7771   if (MaxVF.isScalar())
7772     return VectorizationFactor::Disabled();
7773 
7774   // Select the optimal vectorization factor.
7775   return CM.selectVectorizationFactor(MaxVF);
7776 }
7777 
7778 void LoopVectorizationPlanner::setBestPlan(ElementCount VF, unsigned UF) {
7779   LLVM_DEBUG(dbgs() << "Setting best plan to VF=" << VF << ", UF=" << UF
7780                     << '\n');
7781   BestVF = VF;
7782   BestUF = UF;
7783 
7784   erase_if(VPlans, [VF](const VPlanPtr &Plan) {
7785     return !Plan->hasVF(VF);
7786   });
7787   assert(VPlans.size() == 1 && "Best VF has not a single VPlan.");
7788 }
7789 
7790 void LoopVectorizationPlanner::executePlan(InnerLoopVectorizer &ILV,
7791                                            DominatorTree *DT) {
7792   // Perform the actual loop transformation.
7793 
7794   // 1. Create a new empty loop. Unlink the old loop and connect the new one.
7795   VPCallbackILV CallbackILV(ILV);
7796 
7797   assert(BestVF.hasValue() && "Vectorization Factor is missing");
7798   assert(VPlans.size() == 1 && "Not a single VPlan to execute.");
7799 
7800   VPTransformState State{*BestVF,     BestUF,
7801                          LI,          DT,
7802                          ILV.Builder, ILV.VectorLoopValueMap,
7803                          &ILV,        VPlans.front().get(),
7804                          CallbackILV};
7805   State.CFG.PrevBB = ILV.createVectorizedLoopSkeleton();
7806   State.TripCount = ILV.getOrCreateTripCount(nullptr);
7807   State.CanonicalIV = ILV.Induction;
7808 
7809   ILV.printDebugTracesAtStart();
7810 
7811   //===------------------------------------------------===//
7812   //
7813   // Notice: any optimization or new instruction that go
7814   // into the code below should also be implemented in
7815   // the cost-model.
7816   //
7817   //===------------------------------------------------===//
7818 
7819   // 2. Copy and widen instructions from the old loop into the new loop.
7820   VPlans.front()->execute(&State);
7821 
7822   // 3. Fix the vectorized code: take care of header phi's, live-outs,
7823   //    predication, updating analyses.
7824   ILV.fixVectorizedLoop(State);
7825 
7826   ILV.printDebugTracesAtEnd();
7827 }
7828 
7829 void LoopVectorizationPlanner::collectTriviallyDeadInstructions(
7830     SmallPtrSetImpl<Instruction *> &DeadInstructions) {
7831 
7832   // We create new control-flow for the vectorized loop, so the original exit
7833   // conditions will be dead after vectorization if it's only used by the
7834   // terminator
7835   SmallVector<BasicBlock*> ExitingBlocks;
7836   OrigLoop->getExitingBlocks(ExitingBlocks);
7837   for (auto *BB : ExitingBlocks) {
7838     auto *Cmp = dyn_cast<Instruction>(BB->getTerminator()->getOperand(0));
7839     if (!Cmp || !Cmp->hasOneUse())
7840       continue;
7841 
7842     // TODO: we should introduce a getUniqueExitingBlocks on Loop
7843     if (!DeadInstructions.insert(Cmp).second)
7844       continue;
7845 
7846     // The operands of the icmp is often a dead trunc, used by IndUpdate.
7847     // TODO: can recurse through operands in general
7848     for (Value *Op : Cmp->operands()) {
7849       if (isa<TruncInst>(Op) && Op->hasOneUse())
7850           DeadInstructions.insert(cast<Instruction>(Op));
7851     }
7852   }
7853 
7854   // We create new "steps" for induction variable updates to which the original
7855   // induction variables map. An original update instruction will be dead if
7856   // all its users except the induction variable are dead.
7857   auto *Latch = OrigLoop->getLoopLatch();
7858   for (auto &Induction : Legal->getInductionVars()) {
7859     PHINode *Ind = Induction.first;
7860     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
7861 
7862     // If the tail is to be folded by masking, the primary induction variable,
7863     // if exists, isn't dead: it will be used for masking. Don't kill it.
7864     if (CM.foldTailByMasking() && IndUpdate == Legal->getPrimaryInduction())
7865       continue;
7866 
7867     if (llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
7868           return U == Ind || DeadInstructions.count(cast<Instruction>(U));
7869         }))
7870       DeadInstructions.insert(IndUpdate);
7871 
7872     // We record as "Dead" also the type-casting instructions we had identified
7873     // during induction analysis. We don't need any handling for them in the
7874     // vectorized loop because we have proven that, under a proper runtime
7875     // test guarding the vectorized loop, the value of the phi, and the casted
7876     // value of the phi, are the same. The last instruction in this casting chain
7877     // will get its scalar/vector/widened def from the scalar/vector/widened def
7878     // of the respective phi node. Any other casts in the induction def-use chain
7879     // have no other uses outside the phi update chain, and will be ignored.
7880     InductionDescriptor &IndDes = Induction.second;
7881     const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts();
7882     DeadInstructions.insert(Casts.begin(), Casts.end());
7883   }
7884 }
7885 
7886 Value *InnerLoopUnroller::reverseVector(Value *Vec) { return Vec; }
7887 
7888 Value *InnerLoopUnroller::getBroadcastInstrs(Value *V) { return V; }
7889 
7890 Value *InnerLoopUnroller::getStepVector(Value *Val, int StartIdx, Value *Step,
7891                                         Instruction::BinaryOps BinOp) {
7892   // When unrolling and the VF is 1, we only need to add a simple scalar.
7893   Type *Ty = Val->getType();
7894   assert(!Ty->isVectorTy() && "Val must be a scalar");
7895 
7896   if (Ty->isFloatingPointTy()) {
7897     Constant *C = ConstantFP::get(Ty, (double)StartIdx);
7898 
7899     // Floating point operations had to be 'fast' to enable the unrolling.
7900     Value *MulOp = addFastMathFlag(Builder.CreateFMul(C, Step));
7901     return addFastMathFlag(Builder.CreateBinOp(BinOp, Val, MulOp));
7902   }
7903   Constant *C = ConstantInt::get(Ty, StartIdx);
7904   return Builder.CreateAdd(Val, Builder.CreateMul(C, Step), "induction");
7905 }
7906 
7907 static void AddRuntimeUnrollDisableMetaData(Loop *L) {
7908   SmallVector<Metadata *, 4> MDs;
7909   // Reserve first location for self reference to the LoopID metadata node.
7910   MDs.push_back(nullptr);
7911   bool IsUnrollMetadata = false;
7912   MDNode *LoopID = L->getLoopID();
7913   if (LoopID) {
7914     // First find existing loop unrolling disable metadata.
7915     for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) {
7916       auto *MD = dyn_cast<MDNode>(LoopID->getOperand(i));
7917       if (MD) {
7918         const auto *S = dyn_cast<MDString>(MD->getOperand(0));
7919         IsUnrollMetadata =
7920             S && S->getString().startswith("llvm.loop.unroll.disable");
7921       }
7922       MDs.push_back(LoopID->getOperand(i));
7923     }
7924   }
7925 
7926   if (!IsUnrollMetadata) {
7927     // Add runtime unroll disable metadata.
7928     LLVMContext &Context = L->getHeader()->getContext();
7929     SmallVector<Metadata *, 1> DisableOperands;
7930     DisableOperands.push_back(
7931         MDString::get(Context, "llvm.loop.unroll.runtime.disable"));
7932     MDNode *DisableNode = MDNode::get(Context, DisableOperands);
7933     MDs.push_back(DisableNode);
7934     MDNode *NewLoopID = MDNode::get(Context, MDs);
7935     // Set operand 0 to refer to the loop id itself.
7936     NewLoopID->replaceOperandWith(0, NewLoopID);
7937     L->setLoopID(NewLoopID);
7938   }
7939 }
7940 
7941 //===--------------------------------------------------------------------===//
7942 // EpilogueVectorizerMainLoop
7943 //===--------------------------------------------------------------------===//
7944 
7945 /// This function is partially responsible for generating the control flow
7946 /// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization.
7947 BasicBlock *EpilogueVectorizerMainLoop::createEpilogueVectorizedLoopSkeleton() {
7948   MDNode *OrigLoopID = OrigLoop->getLoopID();
7949   Loop *Lp = createVectorLoopSkeleton("");
7950 
7951   // Generate the code to check the minimum iteration count of the vector
7952   // epilogue (see below).
7953   EPI.EpilogueIterationCountCheck =
7954       emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader, true);
7955   EPI.EpilogueIterationCountCheck->setName("iter.check");
7956 
7957   // Generate the code to check any assumptions that we've made for SCEV
7958   // expressions.
7959   BasicBlock *SavedPreHeader = LoopVectorPreHeader;
7960   emitSCEVChecks(Lp, LoopScalarPreHeader);
7961 
7962   // If a safety check was generated save it.
7963   if (SavedPreHeader != LoopVectorPreHeader)
7964     EPI.SCEVSafetyCheck = SavedPreHeader;
7965 
7966   // Generate the code that checks at runtime if arrays overlap. We put the
7967   // checks into a separate block to make the more common case of few elements
7968   // faster.
7969   SavedPreHeader = LoopVectorPreHeader;
7970   emitMemRuntimeChecks(Lp, LoopScalarPreHeader);
7971 
7972   // If a safety check was generated save/overwite it.
7973   if (SavedPreHeader != LoopVectorPreHeader)
7974     EPI.MemSafetyCheck = SavedPreHeader;
7975 
7976   // Generate the iteration count check for the main loop, *after* the check
7977   // for the epilogue loop, so that the path-length is shorter for the case
7978   // that goes directly through the vector epilogue. The longer-path length for
7979   // the main loop is compensated for, by the gain from vectorizing the larger
7980   // trip count. Note: the branch will get updated later on when we vectorize
7981   // the epilogue.
7982   EPI.MainLoopIterationCountCheck =
7983       emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader, false);
7984 
7985   // Generate the induction variable.
7986   OldInduction = Legal->getPrimaryInduction();
7987   Type *IdxTy = Legal->getWidestInductionType();
7988   Value *StartIdx = ConstantInt::get(IdxTy, 0);
7989   Constant *Step = ConstantInt::get(IdxTy, VF.getKnownMinValue() * UF);
7990   Value *CountRoundDown = getOrCreateVectorTripCount(Lp);
7991   EPI.VectorTripCount = CountRoundDown;
7992   Induction =
7993       createInductionVariable(Lp, StartIdx, CountRoundDown, Step,
7994                               getDebugLocFromInstOrOperands(OldInduction));
7995 
7996   // Skip induction resume value creation here because they will be created in
7997   // the second pass. If we created them here, they wouldn't be used anyway,
7998   // because the vplan in the second pass still contains the inductions from the
7999   // original loop.
8000 
8001   return completeLoopSkeleton(Lp, OrigLoopID);
8002 }
8003 
8004 void EpilogueVectorizerMainLoop::printDebugTracesAtStart() {
8005   LLVM_DEBUG({
8006     dbgs() << "Create Skeleton for epilogue vectorized loop (first pass)\n"
8007            << "Main Loop VF:" << EPI.MainLoopVF.getKnownMinValue()
8008            << ", Main Loop UF:" << EPI.MainLoopUF
8009            << ", Epilogue Loop VF:" << EPI.EpilogueVF.getKnownMinValue()
8010            << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n";
8011   });
8012 }
8013 
8014 void EpilogueVectorizerMainLoop::printDebugTracesAtEnd() {
8015   DEBUG_WITH_TYPE(VerboseDebug, {
8016     dbgs() << "intermediate fn:\n" << *Induction->getFunction() << "\n";
8017   });
8018 }
8019 
8020 BasicBlock *EpilogueVectorizerMainLoop::emitMinimumIterationCountCheck(
8021     Loop *L, BasicBlock *Bypass, bool ForEpilogue) {
8022   assert(L && "Expected valid Loop.");
8023   assert(Bypass && "Expected valid bypass basic block.");
8024   unsigned VFactor =
8025       ForEpilogue ? EPI.EpilogueVF.getKnownMinValue() : VF.getKnownMinValue();
8026   unsigned UFactor = ForEpilogue ? EPI.EpilogueUF : UF;
8027   Value *Count = getOrCreateTripCount(L);
8028   // Reuse existing vector loop preheader for TC checks.
8029   // Note that new preheader block is generated for vector loop.
8030   BasicBlock *const TCCheckBlock = LoopVectorPreHeader;
8031   IRBuilder<> Builder(TCCheckBlock->getTerminator());
8032 
8033   // Generate code to check if the loop's trip count is less than VF * UF of the
8034   // main vector loop.
8035   auto P =
8036       Cost->requiresScalarEpilogue() ? ICmpInst::ICMP_ULE : ICmpInst::ICMP_ULT;
8037 
8038   Value *CheckMinIters = Builder.CreateICmp(
8039       P, Count, ConstantInt::get(Count->getType(), VFactor * UFactor),
8040       "min.iters.check");
8041 
8042   if (!ForEpilogue)
8043     TCCheckBlock->setName("vector.main.loop.iter.check");
8044 
8045   // Create new preheader for vector loop.
8046   LoopVectorPreHeader = SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(),
8047                                    DT, LI, nullptr, "vector.ph");
8048 
8049   if (ForEpilogue) {
8050     assert(DT->properlyDominates(DT->getNode(TCCheckBlock),
8051                                  DT->getNode(Bypass)->getIDom()) &&
8052            "TC check is expected to dominate Bypass");
8053 
8054     // Update dominator for Bypass & LoopExit.
8055     DT->changeImmediateDominator(Bypass, TCCheckBlock);
8056     DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock);
8057 
8058     LoopBypassBlocks.push_back(TCCheckBlock);
8059 
8060     // Save the trip count so we don't have to regenerate it in the
8061     // vec.epilog.iter.check. This is safe to do because the trip count
8062     // generated here dominates the vector epilog iter check.
8063     EPI.TripCount = Count;
8064   }
8065 
8066   ReplaceInstWithInst(
8067       TCCheckBlock->getTerminator(),
8068       BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters));
8069 
8070   return TCCheckBlock;
8071 }
8072 
8073 //===--------------------------------------------------------------------===//
8074 // EpilogueVectorizerEpilogueLoop
8075 //===--------------------------------------------------------------------===//
8076 
8077 /// This function is partially responsible for generating the control flow
8078 /// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization.
8079 BasicBlock *
8080 EpilogueVectorizerEpilogueLoop::createEpilogueVectorizedLoopSkeleton() {
8081   MDNode *OrigLoopID = OrigLoop->getLoopID();
8082   Loop *Lp = createVectorLoopSkeleton("vec.epilog.");
8083 
8084   // Now, compare the remaining count and if there aren't enough iterations to
8085   // execute the vectorized epilogue skip to the scalar part.
8086   BasicBlock *VecEpilogueIterationCountCheck = LoopVectorPreHeader;
8087   VecEpilogueIterationCountCheck->setName("vec.epilog.iter.check");
8088   LoopVectorPreHeader =
8089       SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT,
8090                  LI, nullptr, "vec.epilog.ph");
8091   emitMinimumVectorEpilogueIterCountCheck(Lp, LoopScalarPreHeader,
8092                                           VecEpilogueIterationCountCheck);
8093 
8094   // Adjust the control flow taking the state info from the main loop
8095   // vectorization into account.
8096   assert(EPI.MainLoopIterationCountCheck && EPI.EpilogueIterationCountCheck &&
8097          "expected this to be saved from the previous pass.");
8098   EPI.MainLoopIterationCountCheck->getTerminator()->replaceUsesOfWith(
8099       VecEpilogueIterationCountCheck, LoopVectorPreHeader);
8100 
8101   DT->changeImmediateDominator(LoopVectorPreHeader,
8102                                EPI.MainLoopIterationCountCheck);
8103 
8104   EPI.EpilogueIterationCountCheck->getTerminator()->replaceUsesOfWith(
8105       VecEpilogueIterationCountCheck, LoopScalarPreHeader);
8106 
8107   if (EPI.SCEVSafetyCheck)
8108     EPI.SCEVSafetyCheck->getTerminator()->replaceUsesOfWith(
8109         VecEpilogueIterationCountCheck, LoopScalarPreHeader);
8110   if (EPI.MemSafetyCheck)
8111     EPI.MemSafetyCheck->getTerminator()->replaceUsesOfWith(
8112         VecEpilogueIterationCountCheck, LoopScalarPreHeader);
8113 
8114   DT->changeImmediateDominator(
8115       VecEpilogueIterationCountCheck,
8116       VecEpilogueIterationCountCheck->getSinglePredecessor());
8117 
8118   DT->changeImmediateDominator(LoopScalarPreHeader,
8119                                EPI.EpilogueIterationCountCheck);
8120   DT->changeImmediateDominator(LoopExitBlock, EPI.EpilogueIterationCountCheck);
8121 
8122   // Keep track of bypass blocks, as they feed start values to the induction
8123   // phis in the scalar loop preheader.
8124   if (EPI.SCEVSafetyCheck)
8125     LoopBypassBlocks.push_back(EPI.SCEVSafetyCheck);
8126   if (EPI.MemSafetyCheck)
8127     LoopBypassBlocks.push_back(EPI.MemSafetyCheck);
8128   LoopBypassBlocks.push_back(EPI.EpilogueIterationCountCheck);
8129 
8130   // Generate a resume induction for the vector epilogue and put it in the
8131   // vector epilogue preheader
8132   Type *IdxTy = Legal->getWidestInductionType();
8133   PHINode *EPResumeVal = PHINode::Create(IdxTy, 2, "vec.epilog.resume.val",
8134                                          LoopVectorPreHeader->getFirstNonPHI());
8135   EPResumeVal->addIncoming(EPI.VectorTripCount, VecEpilogueIterationCountCheck);
8136   EPResumeVal->addIncoming(ConstantInt::get(IdxTy, 0),
8137                            EPI.MainLoopIterationCountCheck);
8138 
8139   // Generate the induction variable.
8140   OldInduction = Legal->getPrimaryInduction();
8141   Value *CountRoundDown = getOrCreateVectorTripCount(Lp);
8142   Constant *Step = ConstantInt::get(IdxTy, VF.getKnownMinValue() * UF);
8143   Value *StartIdx = EPResumeVal;
8144   Induction =
8145       createInductionVariable(Lp, StartIdx, CountRoundDown, Step,
8146                               getDebugLocFromInstOrOperands(OldInduction));
8147 
8148   // Generate induction resume values. These variables save the new starting
8149   // indexes for the scalar loop. They are used to test if there are any tail
8150   // iterations left once the vector loop has completed.
8151   // Note that when the vectorized epilogue is skipped due to iteration count
8152   // check, then the resume value for the induction variable comes from
8153   // the trip count of the main vector loop, hence passing the AdditionalBypass
8154   // argument.
8155   createInductionResumeValues(Lp, CountRoundDown,
8156                               {VecEpilogueIterationCountCheck,
8157                                EPI.VectorTripCount} /* AdditionalBypass */);
8158 
8159   AddRuntimeUnrollDisableMetaData(Lp);
8160   return completeLoopSkeleton(Lp, OrigLoopID);
8161 }
8162 
8163 BasicBlock *
8164 EpilogueVectorizerEpilogueLoop::emitMinimumVectorEpilogueIterCountCheck(
8165     Loop *L, BasicBlock *Bypass, BasicBlock *Insert) {
8166 
8167   assert(EPI.TripCount &&
8168          "Expected trip count to have been safed in the first pass.");
8169   assert(
8170       (!isa<Instruction>(EPI.TripCount) ||
8171        DT->dominates(cast<Instruction>(EPI.TripCount)->getParent(), Insert)) &&
8172       "saved trip count does not dominate insertion point.");
8173   Value *TC = EPI.TripCount;
8174   IRBuilder<> Builder(Insert->getTerminator());
8175   Value *Count = Builder.CreateSub(TC, EPI.VectorTripCount, "n.vec.remaining");
8176 
8177   // Generate code to check if the loop's trip count is less than VF * UF of the
8178   // vector epilogue loop.
8179   auto P =
8180       Cost->requiresScalarEpilogue() ? ICmpInst::ICMP_ULE : ICmpInst::ICMP_ULT;
8181 
8182   Value *CheckMinIters = Builder.CreateICmp(
8183       P, Count,
8184       ConstantInt::get(Count->getType(),
8185                        EPI.EpilogueVF.getKnownMinValue() * EPI.EpilogueUF),
8186       "min.epilog.iters.check");
8187 
8188   ReplaceInstWithInst(
8189       Insert->getTerminator(),
8190       BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters));
8191 
8192   LoopBypassBlocks.push_back(Insert);
8193   return Insert;
8194 }
8195 
8196 void EpilogueVectorizerEpilogueLoop::printDebugTracesAtStart() {
8197   LLVM_DEBUG({
8198     dbgs() << "Create Skeleton for epilogue vectorized loop (second pass)\n"
8199            << "Main Loop VF:" << EPI.MainLoopVF.getKnownMinValue()
8200            << ", Main Loop UF:" << EPI.MainLoopUF
8201            << ", Epilogue Loop VF:" << EPI.EpilogueVF.getKnownMinValue()
8202            << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n";
8203   });
8204 }
8205 
8206 void EpilogueVectorizerEpilogueLoop::printDebugTracesAtEnd() {
8207   DEBUG_WITH_TYPE(VerboseDebug, {
8208     dbgs() << "final fn:\n" << *Induction->getFunction() << "\n";
8209   });
8210 }
8211 
8212 bool LoopVectorizationPlanner::getDecisionAndClampRange(
8213     const std::function<bool(ElementCount)> &Predicate, VFRange &Range) {
8214   assert(!Range.isEmpty() && "Trying to test an empty VF range.");
8215   bool PredicateAtRangeStart = Predicate(Range.Start);
8216 
8217   for (ElementCount TmpVF = Range.Start * 2;
8218        ElementCount::isKnownLT(TmpVF, Range.End); TmpVF *= 2)
8219     if (Predicate(TmpVF) != PredicateAtRangeStart) {
8220       Range.End = TmpVF;
8221       break;
8222     }
8223 
8224   return PredicateAtRangeStart;
8225 }
8226 
8227 /// Build VPlans for the full range of feasible VF's = {\p MinVF, 2 * \p MinVF,
8228 /// 4 * \p MinVF, ..., \p MaxVF} by repeatedly building a VPlan for a sub-range
8229 /// of VF's starting at a given VF and extending it as much as possible. Each
8230 /// vectorization decision can potentially shorten this sub-range during
8231 /// buildVPlan().
8232 void LoopVectorizationPlanner::buildVPlans(ElementCount MinVF,
8233                                            ElementCount MaxVF) {
8234   auto MaxVFPlusOne = MaxVF.getWithIncrement(1);
8235   for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFPlusOne);) {
8236     VFRange SubRange = {VF, MaxVFPlusOne};
8237     VPlans.push_back(buildVPlan(SubRange));
8238     VF = SubRange.End;
8239   }
8240 }
8241 
8242 VPValue *VPRecipeBuilder::createEdgeMask(BasicBlock *Src, BasicBlock *Dst,
8243                                          VPlanPtr &Plan) {
8244   assert(is_contained(predecessors(Dst), Src) && "Invalid edge");
8245 
8246   // Look for cached value.
8247   std::pair<BasicBlock *, BasicBlock *> Edge(Src, Dst);
8248   EdgeMaskCacheTy::iterator ECEntryIt = EdgeMaskCache.find(Edge);
8249   if (ECEntryIt != EdgeMaskCache.end())
8250     return ECEntryIt->second;
8251 
8252   VPValue *SrcMask = createBlockInMask(Src, Plan);
8253 
8254   // The terminator has to be a branch inst!
8255   BranchInst *BI = dyn_cast<BranchInst>(Src->getTerminator());
8256   assert(BI && "Unexpected terminator found");
8257 
8258   if (!BI->isConditional() || BI->getSuccessor(0) == BI->getSuccessor(1))
8259     return EdgeMaskCache[Edge] = SrcMask;
8260 
8261   // If source is an exiting block, we know the exit edge is dynamically dead
8262   // in the vector loop, and thus we don't need to restrict the mask.  Avoid
8263   // adding uses of an otherwise potentially dead instruction.
8264   if (OrigLoop->isLoopExiting(Src))
8265     return EdgeMaskCache[Edge] = SrcMask;
8266 
8267   VPValue *EdgeMask = Plan->getOrAddVPValue(BI->getCondition());
8268   assert(EdgeMask && "No Edge Mask found for condition");
8269 
8270   if (BI->getSuccessor(0) != Dst)
8271     EdgeMask = Builder.createNot(EdgeMask);
8272 
8273   if (SrcMask) { // Otherwise block in-mask is all-one, no need to AND.
8274     // The condition is 'SrcMask && EdgeMask', which is equivalent to
8275     // 'select i1 SrcMask, i1 EdgeMask, i1 false'.
8276     // The select version does not introduce new UB if SrcMask is false and
8277     // EdgeMask is poison. Using 'and' here introduces undefined behavior.
8278     VPValue *False = Plan->getOrAddVPValue(
8279         ConstantInt::getFalse(BI->getCondition()->getType()));
8280     EdgeMask = Builder.createSelect(SrcMask, EdgeMask, False);
8281   }
8282 
8283   return EdgeMaskCache[Edge] = EdgeMask;
8284 }
8285 
8286 VPValue *VPRecipeBuilder::createBlockInMask(BasicBlock *BB, VPlanPtr &Plan) {
8287   assert(OrigLoop->contains(BB) && "Block is not a part of a loop");
8288 
8289   // Look for cached value.
8290   BlockMaskCacheTy::iterator BCEntryIt = BlockMaskCache.find(BB);
8291   if (BCEntryIt != BlockMaskCache.end())
8292     return BCEntryIt->second;
8293 
8294   // All-one mask is modelled as no-mask following the convention for masked
8295   // load/store/gather/scatter. Initialize BlockMask to no-mask.
8296   VPValue *BlockMask = nullptr;
8297 
8298   if (OrigLoop->getHeader() == BB) {
8299     if (!CM.blockNeedsPredication(BB))
8300       return BlockMaskCache[BB] = BlockMask; // Loop incoming mask is all-one.
8301 
8302     // Create the block in mask as the first non-phi instruction in the block.
8303     VPBuilder::InsertPointGuard Guard(Builder);
8304     auto NewInsertionPoint = Builder.getInsertBlock()->getFirstNonPhi();
8305     Builder.setInsertPoint(Builder.getInsertBlock(), NewInsertionPoint);
8306 
8307     // Introduce the early-exit compare IV <= BTC to form header block mask.
8308     // This is used instead of IV < TC because TC may wrap, unlike BTC.
8309     // Start by constructing the desired canonical IV.
8310     VPValue *IV = nullptr;
8311     if (Legal->getPrimaryInduction())
8312       IV = Plan->getOrAddVPValue(Legal->getPrimaryInduction());
8313     else {
8314       auto IVRecipe = new VPWidenCanonicalIVRecipe();
8315       Builder.getInsertBlock()->insert(IVRecipe, NewInsertionPoint);
8316       IV = IVRecipe->getVPValue();
8317     }
8318     VPValue *BTC = Plan->getOrCreateBackedgeTakenCount();
8319     bool TailFolded = !CM.isScalarEpilogueAllowed();
8320 
8321     if (TailFolded && CM.TTI.emitGetActiveLaneMask()) {
8322       // While ActiveLaneMask is a binary op that consumes the loop tripcount
8323       // as a second argument, we only pass the IV here and extract the
8324       // tripcount from the transform state where codegen of the VP instructions
8325       // happen.
8326       BlockMask = Builder.createNaryOp(VPInstruction::ActiveLaneMask, {IV});
8327     } else {
8328       BlockMask = Builder.createNaryOp(VPInstruction::ICmpULE, {IV, BTC});
8329     }
8330     return BlockMaskCache[BB] = BlockMask;
8331   }
8332 
8333   // This is the block mask. We OR all incoming edges.
8334   for (auto *Predecessor : predecessors(BB)) {
8335     VPValue *EdgeMask = createEdgeMask(Predecessor, BB, Plan);
8336     if (!EdgeMask) // Mask of predecessor is all-one so mask of block is too.
8337       return BlockMaskCache[BB] = EdgeMask;
8338 
8339     if (!BlockMask) { // BlockMask has its initialized nullptr value.
8340       BlockMask = EdgeMask;
8341       continue;
8342     }
8343 
8344     BlockMask = Builder.createOr(BlockMask, EdgeMask);
8345   }
8346 
8347   return BlockMaskCache[BB] = BlockMask;
8348 }
8349 
8350 VPRecipeBase *VPRecipeBuilder::tryToWidenMemory(Instruction *I, VFRange &Range,
8351                                                 VPlanPtr &Plan) {
8352   assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
8353          "Must be called with either a load or store");
8354 
8355   auto willWiden = [&](ElementCount VF) -> bool {
8356     if (VF.isScalar())
8357       return false;
8358     LoopVectorizationCostModel::InstWidening Decision =
8359         CM.getWideningDecision(I, VF);
8360     assert(Decision != LoopVectorizationCostModel::CM_Unknown &&
8361            "CM decision should be taken at this point.");
8362     if (Decision == LoopVectorizationCostModel::CM_Interleave)
8363       return true;
8364     if (CM.isScalarAfterVectorization(I, VF) ||
8365         CM.isProfitableToScalarize(I, VF))
8366       return false;
8367     return Decision != LoopVectorizationCostModel::CM_Scalarize;
8368   };
8369 
8370   if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range))
8371     return nullptr;
8372 
8373   VPValue *Mask = nullptr;
8374   if (Legal->isMaskRequired(I))
8375     Mask = createBlockInMask(I->getParent(), Plan);
8376 
8377   VPValue *Addr = Plan->getOrAddVPValue(getLoadStorePointerOperand(I));
8378   if (LoadInst *Load = dyn_cast<LoadInst>(I))
8379     return new VPWidenMemoryInstructionRecipe(*Load, Addr, Mask);
8380 
8381   StoreInst *Store = cast<StoreInst>(I);
8382   VPValue *StoredValue = Plan->getOrAddVPValue(Store->getValueOperand());
8383   return new VPWidenMemoryInstructionRecipe(*Store, Addr, StoredValue, Mask);
8384 }
8385 
8386 VPWidenIntOrFpInductionRecipe *
8387 VPRecipeBuilder::tryToOptimizeInductionPHI(PHINode *Phi, VPlan &Plan) const {
8388   // Check if this is an integer or fp induction. If so, build the recipe that
8389   // produces its scalar and vector values.
8390   InductionDescriptor II = Legal->getInductionVars().lookup(Phi);
8391   if (II.getKind() == InductionDescriptor::IK_IntInduction ||
8392       II.getKind() == InductionDescriptor::IK_FpInduction) {
8393     VPValue *Start = Plan.getOrAddVPValue(II.getStartValue());
8394     const SmallVectorImpl<Instruction *> &Casts = II.getCastInsts();
8395     return new VPWidenIntOrFpInductionRecipe(
8396         Phi, Start, Casts.empty() ? nullptr : Casts.front());
8397   }
8398 
8399   return nullptr;
8400 }
8401 
8402 VPWidenIntOrFpInductionRecipe *
8403 VPRecipeBuilder::tryToOptimizeInductionTruncate(TruncInst *I, VFRange &Range,
8404                                                 VPlan &Plan) const {
8405   // Optimize the special case where the source is a constant integer
8406   // induction variable. Notice that we can only optimize the 'trunc' case
8407   // because (a) FP conversions lose precision, (b) sext/zext may wrap, and
8408   // (c) other casts depend on pointer size.
8409 
8410   // Determine whether \p K is a truncation based on an induction variable that
8411   // can be optimized.
8412   auto isOptimizableIVTruncate =
8413       [&](Instruction *K) -> std::function<bool(ElementCount)> {
8414     return [=](ElementCount VF) -> bool {
8415       return CM.isOptimizableIVTruncate(K, VF);
8416     };
8417   };
8418 
8419   if (LoopVectorizationPlanner::getDecisionAndClampRange(
8420           isOptimizableIVTruncate(I), Range)) {
8421 
8422     InductionDescriptor II =
8423         Legal->getInductionVars().lookup(cast<PHINode>(I->getOperand(0)));
8424     VPValue *Start = Plan.getOrAddVPValue(II.getStartValue());
8425     return new VPWidenIntOrFpInductionRecipe(cast<PHINode>(I->getOperand(0)),
8426                                              Start, nullptr, I);
8427   }
8428   return nullptr;
8429 }
8430 
8431 VPBlendRecipe *VPRecipeBuilder::tryToBlend(PHINode *Phi, VPlanPtr &Plan) {
8432   // We know that all PHIs in non-header blocks are converted into selects, so
8433   // we don't have to worry about the insertion order and we can just use the
8434   // builder. At this point we generate the predication tree. There may be
8435   // duplications since this is a simple recursive scan, but future
8436   // optimizations will clean it up.
8437 
8438   SmallVector<VPValue *, 2> Operands;
8439   unsigned NumIncoming = Phi->getNumIncomingValues();
8440   for (unsigned In = 0; In < NumIncoming; In++) {
8441     VPValue *EdgeMask =
8442       createEdgeMask(Phi->getIncomingBlock(In), Phi->getParent(), Plan);
8443     assert((EdgeMask || NumIncoming == 1) &&
8444            "Multiple predecessors with one having a full mask");
8445     Operands.push_back(Plan->getOrAddVPValue(Phi->getIncomingValue(In)));
8446     if (EdgeMask)
8447       Operands.push_back(EdgeMask);
8448   }
8449   return new VPBlendRecipe(Phi, Operands);
8450 }
8451 
8452 VPWidenCallRecipe *VPRecipeBuilder::tryToWidenCall(CallInst *CI, VFRange &Range,
8453                                                    VPlan &Plan) const {
8454 
8455   bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange(
8456       [this, CI](ElementCount VF) {
8457         return CM.isScalarWithPredication(CI, VF);
8458       },
8459       Range);
8460 
8461   if (IsPredicated)
8462     return nullptr;
8463 
8464   Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
8465   if (ID && (ID == Intrinsic::assume || ID == Intrinsic::lifetime_end ||
8466              ID == Intrinsic::lifetime_start || ID == Intrinsic::sideeffect ||
8467              ID == Intrinsic::pseudoprobe ||
8468              ID == Intrinsic::experimental_noalias_scope_decl))
8469     return nullptr;
8470 
8471   auto willWiden = [&](ElementCount VF) -> bool {
8472     Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
8473     // The following case may be scalarized depending on the VF.
8474     // The flag shows whether we use Intrinsic or a usual Call for vectorized
8475     // version of the instruction.
8476     // Is it beneficial to perform intrinsic call compared to lib call?
8477     bool NeedToScalarize = false;
8478     InstructionCost CallCost = CM.getVectorCallCost(CI, VF, NeedToScalarize);
8479     InstructionCost IntrinsicCost = ID ? CM.getVectorIntrinsicCost(CI, VF) : 0;
8480     bool UseVectorIntrinsic = ID && IntrinsicCost <= CallCost;
8481     assert(IntrinsicCost.isValid() && CallCost.isValid() &&
8482            "Cannot have invalid costs while widening");
8483     return UseVectorIntrinsic || !NeedToScalarize;
8484   };
8485 
8486   if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range))
8487     return nullptr;
8488 
8489   return new VPWidenCallRecipe(*CI, Plan.mapToVPValues(CI->arg_operands()));
8490 }
8491 
8492 bool VPRecipeBuilder::shouldWiden(Instruction *I, VFRange &Range) const {
8493   assert(!isa<BranchInst>(I) && !isa<PHINode>(I) && !isa<LoadInst>(I) &&
8494          !isa<StoreInst>(I) && "Instruction should have been handled earlier");
8495   // Instruction should be widened, unless it is scalar after vectorization,
8496   // scalarization is profitable or it is predicated.
8497   auto WillScalarize = [this, I](ElementCount VF) -> bool {
8498     return CM.isScalarAfterVectorization(I, VF) ||
8499            CM.isProfitableToScalarize(I, VF) ||
8500            CM.isScalarWithPredication(I, VF);
8501   };
8502   return !LoopVectorizationPlanner::getDecisionAndClampRange(WillScalarize,
8503                                                              Range);
8504 }
8505 
8506 VPWidenRecipe *VPRecipeBuilder::tryToWiden(Instruction *I, VPlan &Plan) const {
8507   auto IsVectorizableOpcode = [](unsigned Opcode) {
8508     switch (Opcode) {
8509     case Instruction::Add:
8510     case Instruction::And:
8511     case Instruction::AShr:
8512     case Instruction::BitCast:
8513     case Instruction::FAdd:
8514     case Instruction::FCmp:
8515     case Instruction::FDiv:
8516     case Instruction::FMul:
8517     case Instruction::FNeg:
8518     case Instruction::FPExt:
8519     case Instruction::FPToSI:
8520     case Instruction::FPToUI:
8521     case Instruction::FPTrunc:
8522     case Instruction::FRem:
8523     case Instruction::FSub:
8524     case Instruction::ICmp:
8525     case Instruction::IntToPtr:
8526     case Instruction::LShr:
8527     case Instruction::Mul:
8528     case Instruction::Or:
8529     case Instruction::PtrToInt:
8530     case Instruction::SDiv:
8531     case Instruction::Select:
8532     case Instruction::SExt:
8533     case Instruction::Shl:
8534     case Instruction::SIToFP:
8535     case Instruction::SRem:
8536     case Instruction::Sub:
8537     case Instruction::Trunc:
8538     case Instruction::UDiv:
8539     case Instruction::UIToFP:
8540     case Instruction::URem:
8541     case Instruction::Xor:
8542     case Instruction::ZExt:
8543       return true;
8544     }
8545     return false;
8546   };
8547 
8548   if (!IsVectorizableOpcode(I->getOpcode()))
8549     return nullptr;
8550 
8551   // Success: widen this instruction.
8552   return new VPWidenRecipe(*I, Plan.mapToVPValues(I->operands()));
8553 }
8554 
8555 VPBasicBlock *VPRecipeBuilder::handleReplication(
8556     Instruction *I, VFRange &Range, VPBasicBlock *VPBB,
8557     DenseMap<Instruction *, VPReplicateRecipe *> &PredInst2Recipe,
8558     VPlanPtr &Plan) {
8559   bool IsUniform = LoopVectorizationPlanner::getDecisionAndClampRange(
8560       [&](ElementCount VF) { return CM.isUniformAfterVectorization(I, VF); },
8561       Range);
8562 
8563   bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange(
8564       [&](ElementCount VF) { return CM.isScalarWithPredication(I, VF); },
8565       Range);
8566 
8567   auto *Recipe = new VPReplicateRecipe(I, Plan->mapToVPValues(I->operands()),
8568                                        IsUniform, IsPredicated);
8569   setRecipe(I, Recipe);
8570   Plan->addVPValue(I, Recipe);
8571 
8572   // Find if I uses a predicated instruction. If so, it will use its scalar
8573   // value. Avoid hoisting the insert-element which packs the scalar value into
8574   // a vector value, as that happens iff all users use the vector value.
8575   for (auto &Op : I->operands())
8576     if (auto *PredInst = dyn_cast<Instruction>(Op))
8577       if (PredInst2Recipe.find(PredInst) != PredInst2Recipe.end())
8578         PredInst2Recipe[PredInst]->setAlsoPack(false);
8579 
8580   // Finalize the recipe for Instr, first if it is not predicated.
8581   if (!IsPredicated) {
8582     LLVM_DEBUG(dbgs() << "LV: Scalarizing:" << *I << "\n");
8583     VPBB->appendRecipe(Recipe);
8584     return VPBB;
8585   }
8586   LLVM_DEBUG(dbgs() << "LV: Scalarizing and predicating:" << *I << "\n");
8587   assert(VPBB->getSuccessors().empty() &&
8588          "VPBB has successors when handling predicated replication.");
8589   // Record predicated instructions for above packing optimizations.
8590   PredInst2Recipe[I] = Recipe;
8591   VPBlockBase *Region = createReplicateRegion(I, Recipe, Plan);
8592   VPBlockUtils::insertBlockAfter(Region, VPBB);
8593   auto *RegSucc = new VPBasicBlock();
8594   VPBlockUtils::insertBlockAfter(RegSucc, Region);
8595   return RegSucc;
8596 }
8597 
8598 VPRegionBlock *VPRecipeBuilder::createReplicateRegion(Instruction *Instr,
8599                                                       VPRecipeBase *PredRecipe,
8600                                                       VPlanPtr &Plan) {
8601   // Instructions marked for predication are replicated and placed under an
8602   // if-then construct to prevent side-effects.
8603 
8604   // Generate recipes to compute the block mask for this region.
8605   VPValue *BlockInMask = createBlockInMask(Instr->getParent(), Plan);
8606 
8607   // Build the triangular if-then region.
8608   std::string RegionName = (Twine("pred.") + Instr->getOpcodeName()).str();
8609   assert(Instr->getParent() && "Predicated instruction not in any basic block");
8610   auto *BOMRecipe = new VPBranchOnMaskRecipe(BlockInMask);
8611   auto *Entry = new VPBasicBlock(Twine(RegionName) + ".entry", BOMRecipe);
8612   auto *PHIRecipe = Instr->getType()->isVoidTy()
8613                         ? nullptr
8614                         : new VPPredInstPHIRecipe(Plan->getOrAddVPValue(Instr));
8615   if (PHIRecipe) {
8616     Plan->removeVPValueFor(Instr);
8617     Plan->addVPValue(Instr, PHIRecipe);
8618   }
8619   auto *Exit = new VPBasicBlock(Twine(RegionName) + ".continue", PHIRecipe);
8620   auto *Pred = new VPBasicBlock(Twine(RegionName) + ".if", PredRecipe);
8621   VPRegionBlock *Region = new VPRegionBlock(Entry, Exit, RegionName, true);
8622 
8623   // Note: first set Entry as region entry and then connect successors starting
8624   // from it in order, to propagate the "parent" of each VPBasicBlock.
8625   VPBlockUtils::insertTwoBlocksAfter(Pred, Exit, BlockInMask, Entry);
8626   VPBlockUtils::connectBlocks(Pred, Exit);
8627 
8628   return Region;
8629 }
8630 
8631 VPRecipeBase *VPRecipeBuilder::tryToCreateWidenRecipe(Instruction *Instr,
8632                                                       VFRange &Range,
8633                                                       VPlanPtr &Plan) {
8634   // First, check for specific widening recipes that deal with calls, memory
8635   // operations, inductions and Phi nodes.
8636   if (auto *CI = dyn_cast<CallInst>(Instr))
8637     return tryToWidenCall(CI, Range, *Plan);
8638 
8639   if (isa<LoadInst>(Instr) || isa<StoreInst>(Instr))
8640     return tryToWidenMemory(Instr, Range, Plan);
8641 
8642   VPRecipeBase *Recipe;
8643   if (auto Phi = dyn_cast<PHINode>(Instr)) {
8644     if (Phi->getParent() != OrigLoop->getHeader())
8645       return tryToBlend(Phi, Plan);
8646     if ((Recipe = tryToOptimizeInductionPHI(Phi, *Plan)))
8647       return Recipe;
8648 
8649     if (Legal->isReductionVariable(Phi)) {
8650       RecurrenceDescriptor &RdxDesc = Legal->getReductionVars()[Phi];
8651       VPValue *StartV =
8652           Plan->getOrAddVPValue(RdxDesc.getRecurrenceStartValue());
8653       return new VPWidenPHIRecipe(Phi, RdxDesc, *StartV);
8654     }
8655 
8656     return new VPWidenPHIRecipe(Phi);
8657   }
8658 
8659   if (isa<TruncInst>(Instr) && (Recipe = tryToOptimizeInductionTruncate(
8660                                     cast<TruncInst>(Instr), Range, *Plan)))
8661     return Recipe;
8662 
8663   if (!shouldWiden(Instr, Range))
8664     return nullptr;
8665 
8666   if (auto GEP = dyn_cast<GetElementPtrInst>(Instr))
8667     return new VPWidenGEPRecipe(GEP, Plan->mapToVPValues(GEP->operands()),
8668                                 OrigLoop);
8669 
8670   if (auto *SI = dyn_cast<SelectInst>(Instr)) {
8671     bool InvariantCond =
8672         PSE.getSE()->isLoopInvariant(PSE.getSCEV(SI->getOperand(0)), OrigLoop);
8673     return new VPWidenSelectRecipe(*SI, Plan->mapToVPValues(SI->operands()),
8674                                    InvariantCond);
8675   }
8676 
8677   return tryToWiden(Instr, *Plan);
8678 }
8679 
8680 void LoopVectorizationPlanner::buildVPlansWithVPRecipes(ElementCount MinVF,
8681                                                         ElementCount MaxVF) {
8682   assert(OrigLoop->isInnermost() && "Inner loop expected.");
8683 
8684   // Collect instructions from the original loop that will become trivially dead
8685   // in the vectorized loop. We don't need to vectorize these instructions. For
8686   // example, original induction update instructions can become dead because we
8687   // separately emit induction "steps" when generating code for the new loop.
8688   // Similarly, we create a new latch condition when setting up the structure
8689   // of the new loop, so the old one can become dead.
8690   SmallPtrSet<Instruction *, 4> DeadInstructions;
8691   collectTriviallyDeadInstructions(DeadInstructions);
8692 
8693   // Add assume instructions we need to drop to DeadInstructions, to prevent
8694   // them from being added to the VPlan.
8695   // TODO: We only need to drop assumes in blocks that get flattend. If the
8696   // control flow is preserved, we should keep them.
8697   auto &ConditionalAssumes = Legal->getConditionalAssumes();
8698   DeadInstructions.insert(ConditionalAssumes.begin(), ConditionalAssumes.end());
8699 
8700   DenseMap<Instruction *, Instruction *> &SinkAfter = Legal->getSinkAfter();
8701   // Dead instructions do not need sinking. Remove them from SinkAfter.
8702   for (Instruction *I : DeadInstructions)
8703     SinkAfter.erase(I);
8704 
8705   auto MaxVFPlusOne = MaxVF.getWithIncrement(1);
8706   for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFPlusOne);) {
8707     VFRange SubRange = {VF, MaxVFPlusOne};
8708     VPlans.push_back(
8709         buildVPlanWithVPRecipes(SubRange, DeadInstructions, SinkAfter));
8710     VF = SubRange.End;
8711   }
8712 }
8713 
8714 VPlanPtr LoopVectorizationPlanner::buildVPlanWithVPRecipes(
8715     VFRange &Range, SmallPtrSetImpl<Instruction *> &DeadInstructions,
8716     const DenseMap<Instruction *, Instruction *> &SinkAfter) {
8717 
8718   // Hold a mapping from predicated instructions to their recipes, in order to
8719   // fix their AlsoPack behavior if a user is determined to replicate and use a
8720   // scalar instead of vector value.
8721   DenseMap<Instruction *, VPReplicateRecipe *> PredInst2Recipe;
8722 
8723   SmallPtrSet<const InterleaveGroup<Instruction> *, 1> InterleaveGroups;
8724 
8725   VPRecipeBuilder RecipeBuilder(OrigLoop, TLI, Legal, CM, PSE, Builder);
8726 
8727   // ---------------------------------------------------------------------------
8728   // Pre-construction: record ingredients whose recipes we'll need to further
8729   // process after constructing the initial VPlan.
8730   // ---------------------------------------------------------------------------
8731 
8732   // Mark instructions we'll need to sink later and their targets as
8733   // ingredients whose recipe we'll need to record.
8734   for (auto &Entry : SinkAfter) {
8735     RecipeBuilder.recordRecipeOf(Entry.first);
8736     RecipeBuilder.recordRecipeOf(Entry.second);
8737   }
8738   for (auto &Reduction : CM.getInLoopReductionChains()) {
8739     PHINode *Phi = Reduction.first;
8740     RecurKind Kind = Legal->getReductionVars()[Phi].getRecurrenceKind();
8741     const SmallVector<Instruction *, 4> &ReductionOperations = Reduction.second;
8742 
8743     RecipeBuilder.recordRecipeOf(Phi);
8744     for (auto &R : ReductionOperations) {
8745       RecipeBuilder.recordRecipeOf(R);
8746       // For min/max reducitons, where we have a pair of icmp/select, we also
8747       // need to record the ICmp recipe, so it can be removed later.
8748       if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind))
8749         RecipeBuilder.recordRecipeOf(cast<Instruction>(R->getOperand(0)));
8750     }
8751   }
8752 
8753   // For each interleave group which is relevant for this (possibly trimmed)
8754   // Range, add it to the set of groups to be later applied to the VPlan and add
8755   // placeholders for its members' Recipes which we'll be replacing with a
8756   // single VPInterleaveRecipe.
8757   for (InterleaveGroup<Instruction> *IG : IAI.getInterleaveGroups()) {
8758     auto applyIG = [IG, this](ElementCount VF) -> bool {
8759       return (VF.isVector() && // Query is illegal for VF == 1
8760               CM.getWideningDecision(IG->getInsertPos(), VF) ==
8761                   LoopVectorizationCostModel::CM_Interleave);
8762     };
8763     if (!getDecisionAndClampRange(applyIG, Range))
8764       continue;
8765     InterleaveGroups.insert(IG);
8766     for (unsigned i = 0; i < IG->getFactor(); i++)
8767       if (Instruction *Member = IG->getMember(i))
8768         RecipeBuilder.recordRecipeOf(Member);
8769   };
8770 
8771   // ---------------------------------------------------------------------------
8772   // Build initial VPlan: Scan the body of the loop in a topological order to
8773   // visit each basic block after having visited its predecessor basic blocks.
8774   // ---------------------------------------------------------------------------
8775 
8776   // Create a dummy pre-entry VPBasicBlock to start building the VPlan.
8777   auto Plan = std::make_unique<VPlan>();
8778   VPBasicBlock *VPBB = new VPBasicBlock("Pre-Entry");
8779   Plan->setEntry(VPBB);
8780 
8781   // Scan the body of the loop in a topological order to visit each basic block
8782   // after having visited its predecessor basic blocks.
8783   LoopBlocksDFS DFS(OrigLoop);
8784   DFS.perform(LI);
8785 
8786   for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) {
8787     // Relevant instructions from basic block BB will be grouped into VPRecipe
8788     // ingredients and fill a new VPBasicBlock.
8789     unsigned VPBBsForBB = 0;
8790     auto *FirstVPBBForBB = new VPBasicBlock(BB->getName());
8791     VPBlockUtils::insertBlockAfter(FirstVPBBForBB, VPBB);
8792     VPBB = FirstVPBBForBB;
8793     Builder.setInsertPoint(VPBB);
8794 
8795     // Introduce each ingredient into VPlan.
8796     // TODO: Model and preserve debug instrinsics in VPlan.
8797     for (Instruction &I : BB->instructionsWithoutDebug()) {
8798       Instruction *Instr = &I;
8799 
8800       // First filter out irrelevant instructions, to ensure no recipes are
8801       // built for them.
8802       if (isa<BranchInst>(Instr) || DeadInstructions.count(Instr))
8803         continue;
8804 
8805       if (auto Recipe =
8806               RecipeBuilder.tryToCreateWidenRecipe(Instr, Range, Plan)) {
8807         for (auto *Def : Recipe->definedValues()) {
8808           auto *UV = Def->getUnderlyingValue();
8809           Plan->addVPValue(UV, Def);
8810         }
8811 
8812         RecipeBuilder.setRecipe(Instr, Recipe);
8813         VPBB->appendRecipe(Recipe);
8814         continue;
8815       }
8816 
8817       // Otherwise, if all widening options failed, Instruction is to be
8818       // replicated. This may create a successor for VPBB.
8819       VPBasicBlock *NextVPBB = RecipeBuilder.handleReplication(
8820           Instr, Range, VPBB, PredInst2Recipe, Plan);
8821       if (NextVPBB != VPBB) {
8822         VPBB = NextVPBB;
8823         VPBB->setName(BB->hasName() ? BB->getName() + "." + Twine(VPBBsForBB++)
8824                                     : "");
8825       }
8826     }
8827   }
8828 
8829   // Discard empty dummy pre-entry VPBasicBlock. Note that other VPBasicBlocks
8830   // may also be empty, such as the last one VPBB, reflecting original
8831   // basic-blocks with no recipes.
8832   VPBasicBlock *PreEntry = cast<VPBasicBlock>(Plan->getEntry());
8833   assert(PreEntry->empty() && "Expecting empty pre-entry block.");
8834   VPBlockBase *Entry = Plan->setEntry(PreEntry->getSingleSuccessor());
8835   VPBlockUtils::disconnectBlocks(PreEntry, Entry);
8836   delete PreEntry;
8837 
8838   // ---------------------------------------------------------------------------
8839   // Transform initial VPlan: Apply previously taken decisions, in order, to
8840   // bring the VPlan to its final state.
8841   // ---------------------------------------------------------------------------
8842 
8843   // Apply Sink-After legal constraints.
8844   for (auto &Entry : SinkAfter) {
8845     VPRecipeBase *Sink = RecipeBuilder.getRecipe(Entry.first);
8846     VPRecipeBase *Target = RecipeBuilder.getRecipe(Entry.second);
8847     // If the target is in a replication region, make sure to move Sink to the
8848     // block after it, not into the replication region itself.
8849     if (auto *Region =
8850             dyn_cast_or_null<VPRegionBlock>(Target->getParent()->getParent())) {
8851       if (Region->isReplicator()) {
8852         assert(Region->getNumSuccessors() == 1 && "Expected SESE region!");
8853         VPBasicBlock *NextBlock =
8854             cast<VPBasicBlock>(Region->getSuccessors().front());
8855         Sink->moveBefore(*NextBlock, NextBlock->getFirstNonPhi());
8856         continue;
8857       }
8858     }
8859     Sink->moveAfter(Target);
8860   }
8861 
8862   // Interleave memory: for each Interleave Group we marked earlier as relevant
8863   // for this VPlan, replace the Recipes widening its memory instructions with a
8864   // single VPInterleaveRecipe at its insertion point.
8865   for (auto IG : InterleaveGroups) {
8866     auto *Recipe = cast<VPWidenMemoryInstructionRecipe>(
8867         RecipeBuilder.getRecipe(IG->getInsertPos()));
8868     SmallVector<VPValue *, 4> StoredValues;
8869     for (unsigned i = 0; i < IG->getFactor(); ++i)
8870       if (auto *SI = dyn_cast_or_null<StoreInst>(IG->getMember(i)))
8871         StoredValues.push_back(Plan->getOrAddVPValue(SI->getOperand(0)));
8872 
8873     auto *VPIG = new VPInterleaveRecipe(IG, Recipe->getAddr(), StoredValues,
8874                                         Recipe->getMask());
8875     VPIG->insertBefore(Recipe);
8876     unsigned J = 0;
8877     for (unsigned i = 0; i < IG->getFactor(); ++i)
8878       if (Instruction *Member = IG->getMember(i)) {
8879         if (!Member->getType()->isVoidTy()) {
8880           VPValue *OriginalV = Plan->getVPValue(Member);
8881           Plan->removeVPValueFor(Member);
8882           Plan->addVPValue(Member, VPIG->getVPValue(J));
8883           OriginalV->replaceAllUsesWith(VPIG->getVPValue(J));
8884           J++;
8885         }
8886         RecipeBuilder.getRecipe(Member)->eraseFromParent();
8887       }
8888   }
8889 
8890   // Adjust the recipes for any inloop reductions.
8891   if (Range.Start.isVector())
8892     adjustRecipesForInLoopReductions(Plan, RecipeBuilder);
8893 
8894   // Finally, if tail is folded by masking, introduce selects between the phi
8895   // and the live-out instruction of each reduction, at the end of the latch.
8896   if (CM.foldTailByMasking() && !Legal->getReductionVars().empty()) {
8897     Builder.setInsertPoint(VPBB);
8898     auto *Cond = RecipeBuilder.createBlockInMask(OrigLoop->getHeader(), Plan);
8899     for (auto &Reduction : Legal->getReductionVars()) {
8900       if (CM.isInLoopReduction(Reduction.first))
8901         continue;
8902       VPValue *Phi = Plan->getOrAddVPValue(Reduction.first);
8903       VPValue *Red = Plan->getOrAddVPValue(Reduction.second.getLoopExitInstr());
8904       Builder.createNaryOp(Instruction::Select, {Cond, Red, Phi});
8905     }
8906   }
8907 
8908   std::string PlanName;
8909   raw_string_ostream RSO(PlanName);
8910   ElementCount VF = Range.Start;
8911   Plan->addVF(VF);
8912   RSO << "Initial VPlan for VF={" << VF;
8913   for (VF *= 2; ElementCount::isKnownLT(VF, Range.End); VF *= 2) {
8914     Plan->addVF(VF);
8915     RSO << "," << VF;
8916   }
8917   RSO << "},UF>=1";
8918   RSO.flush();
8919   Plan->setName(PlanName);
8920 
8921   return Plan;
8922 }
8923 
8924 VPlanPtr LoopVectorizationPlanner::buildVPlan(VFRange &Range) {
8925   // Outer loop handling: They may require CFG and instruction level
8926   // transformations before even evaluating whether vectorization is profitable.
8927   // Since we cannot modify the incoming IR, we need to build VPlan upfront in
8928   // the vectorization pipeline.
8929   assert(!OrigLoop->isInnermost());
8930   assert(EnableVPlanNativePath && "VPlan-native path is not enabled.");
8931 
8932   // Create new empty VPlan
8933   auto Plan = std::make_unique<VPlan>();
8934 
8935   // Build hierarchical CFG
8936   VPlanHCFGBuilder HCFGBuilder(OrigLoop, LI, *Plan);
8937   HCFGBuilder.buildHierarchicalCFG();
8938 
8939   for (ElementCount VF = Range.Start; ElementCount::isKnownLT(VF, Range.End);
8940        VF *= 2)
8941     Plan->addVF(VF);
8942 
8943   if (EnableVPlanPredication) {
8944     VPlanPredicator VPP(*Plan);
8945     VPP.predicate();
8946 
8947     // Avoid running transformation to recipes until masked code generation in
8948     // VPlan-native path is in place.
8949     return Plan;
8950   }
8951 
8952   SmallPtrSet<Instruction *, 1> DeadInstructions;
8953   VPlanTransforms::VPInstructionsToVPRecipes(
8954       OrigLoop, Plan, Legal->getInductionVars(), DeadInstructions);
8955   return Plan;
8956 }
8957 
8958 // Adjust the recipes for any inloop reductions. The chain of instructions
8959 // leading from the loop exit instr to the phi need to be converted to
8960 // reductions, with one operand being vector and the other being the scalar
8961 // reduction chain.
8962 void LoopVectorizationPlanner::adjustRecipesForInLoopReductions(
8963     VPlanPtr &Plan, VPRecipeBuilder &RecipeBuilder) {
8964   for (auto &Reduction : CM.getInLoopReductionChains()) {
8965     PHINode *Phi = Reduction.first;
8966     RecurrenceDescriptor &RdxDesc = Legal->getReductionVars()[Phi];
8967     const SmallVector<Instruction *, 4> &ReductionOperations = Reduction.second;
8968 
8969     // ReductionOperations are orders top-down from the phi's use to the
8970     // LoopExitValue. We keep a track of the previous item (the Chain) to tell
8971     // which of the two operands will remain scalar and which will be reduced.
8972     // For minmax the chain will be the select instructions.
8973     Instruction *Chain = Phi;
8974     for (Instruction *R : ReductionOperations) {
8975       VPRecipeBase *WidenRecipe = RecipeBuilder.getRecipe(R);
8976       RecurKind Kind = RdxDesc.getRecurrenceKind();
8977 
8978       VPValue *ChainOp = Plan->getVPValue(Chain);
8979       unsigned FirstOpId;
8980       if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) {
8981         assert(isa<VPWidenSelectRecipe>(WidenRecipe) &&
8982                "Expected to replace a VPWidenSelectSC");
8983         FirstOpId = 1;
8984       } else {
8985         assert(isa<VPWidenRecipe>(WidenRecipe) &&
8986                "Expected to replace a VPWidenSC");
8987         FirstOpId = 0;
8988       }
8989       unsigned VecOpId =
8990           R->getOperand(FirstOpId) == Chain ? FirstOpId + 1 : FirstOpId;
8991       VPValue *VecOp = Plan->getVPValue(R->getOperand(VecOpId));
8992 
8993       auto *CondOp = CM.foldTailByMasking()
8994                          ? RecipeBuilder.createBlockInMask(R->getParent(), Plan)
8995                          : nullptr;
8996       VPReductionRecipe *RedRecipe = new VPReductionRecipe(
8997           &RdxDesc, R, ChainOp, VecOp, CondOp, TTI);
8998       WidenRecipe->getVPValue()->replaceAllUsesWith(RedRecipe);
8999       Plan->removeVPValueFor(R);
9000       Plan->addVPValue(R, RedRecipe);
9001       WidenRecipe->getParent()->insert(RedRecipe, WidenRecipe->getIterator());
9002       WidenRecipe->getVPValue()->replaceAllUsesWith(RedRecipe);
9003       WidenRecipe->eraseFromParent();
9004 
9005       if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) {
9006         VPRecipeBase *CompareRecipe =
9007             RecipeBuilder.getRecipe(cast<Instruction>(R->getOperand(0)));
9008         assert(isa<VPWidenRecipe>(CompareRecipe) &&
9009                "Expected to replace a VPWidenSC");
9010         assert(cast<VPWidenRecipe>(CompareRecipe)->getNumUsers() == 0 &&
9011                "Expected no remaining users");
9012         CompareRecipe->eraseFromParent();
9013       }
9014       Chain = R;
9015     }
9016   }
9017 }
9018 
9019 Value* LoopVectorizationPlanner::VPCallbackILV::
9020 getOrCreateVectorValues(Value *V, unsigned Part) {
9021       return ILV.getOrCreateVectorValue(V, Part);
9022 }
9023 
9024 Value *LoopVectorizationPlanner::VPCallbackILV::getOrCreateScalarValue(
9025     Value *V, const VPIteration &Instance) {
9026   return ILV.getOrCreateScalarValue(V, Instance);
9027 }
9028 
9029 void VPInterleaveRecipe::print(raw_ostream &O, const Twine &Indent,
9030                                VPSlotTracker &SlotTracker) const {
9031   O << "\"INTERLEAVE-GROUP with factor " << IG->getFactor() << " at ";
9032   IG->getInsertPos()->printAsOperand(O, false);
9033   O << ", ";
9034   getAddr()->printAsOperand(O, SlotTracker);
9035   VPValue *Mask = getMask();
9036   if (Mask) {
9037     O << ", ";
9038     Mask->printAsOperand(O, SlotTracker);
9039   }
9040   for (unsigned i = 0; i < IG->getFactor(); ++i)
9041     if (Instruction *I = IG->getMember(i))
9042       O << "\\l\" +\n" << Indent << "\"  " << VPlanIngredient(I) << " " << i;
9043 }
9044 
9045 void VPWidenCallRecipe::execute(VPTransformState &State) {
9046   State.ILV->widenCallInstruction(*cast<CallInst>(getUnderlyingInstr()), this,
9047                                   *this, State);
9048 }
9049 
9050 void VPWidenSelectRecipe::execute(VPTransformState &State) {
9051   State.ILV->widenSelectInstruction(*cast<SelectInst>(getUnderlyingInstr()),
9052                                     this, *this, InvariantCond, State);
9053 }
9054 
9055 void VPWidenRecipe::execute(VPTransformState &State) {
9056   State.ILV->widenInstruction(*getUnderlyingInstr(), this, *this, State);
9057 }
9058 
9059 void VPWidenGEPRecipe::execute(VPTransformState &State) {
9060   State.ILV->widenGEP(cast<GetElementPtrInst>(getUnderlyingInstr()), this,
9061                       *this, State.UF, State.VF, IsPtrLoopInvariant,
9062                       IsIndexLoopInvariant, State);
9063 }
9064 
9065 void VPWidenIntOrFpInductionRecipe::execute(VPTransformState &State) {
9066   assert(!State.Instance && "Int or FP induction being replicated.");
9067   State.ILV->widenIntOrFpInduction(IV, getStartValue()->getLiveInIRValue(),
9068                                    getTruncInst(), getVPValue(0),
9069                                    getCastValue(), State);
9070 }
9071 
9072 void VPWidenPHIRecipe::execute(VPTransformState &State) {
9073   Value *StartV =
9074       getStartValue() ? getStartValue()->getLiveInIRValue() : nullptr;
9075   State.ILV->widenPHIInstruction(cast<PHINode>(getUnderlyingValue()), RdxDesc,
9076                                  StartV, this, State);
9077 }
9078 
9079 void VPBlendRecipe::execute(VPTransformState &State) {
9080   State.ILV->setDebugLocFromInst(State.Builder, Phi);
9081   // We know that all PHIs in non-header blocks are converted into
9082   // selects, so we don't have to worry about the insertion order and we
9083   // can just use the builder.
9084   // At this point we generate the predication tree. There may be
9085   // duplications since this is a simple recursive scan, but future
9086   // optimizations will clean it up.
9087 
9088   unsigned NumIncoming = getNumIncomingValues();
9089 
9090   // Generate a sequence of selects of the form:
9091   // SELECT(Mask3, In3,
9092   //        SELECT(Mask2, In2,
9093   //               SELECT(Mask1, In1,
9094   //                      In0)))
9095   // Note that Mask0 is never used: lanes for which no path reaches this phi and
9096   // are essentially undef are taken from In0.
9097   InnerLoopVectorizer::VectorParts Entry(State.UF);
9098   for (unsigned In = 0; In < NumIncoming; ++In) {
9099     for (unsigned Part = 0; Part < State.UF; ++Part) {
9100       // We might have single edge PHIs (blocks) - use an identity
9101       // 'select' for the first PHI operand.
9102       Value *In0 = State.get(getIncomingValue(In), Part);
9103       if (In == 0)
9104         Entry[Part] = In0; // Initialize with the first incoming value.
9105       else {
9106         // Select between the current value and the previous incoming edge
9107         // based on the incoming mask.
9108         Value *Cond = State.get(getMask(In), Part);
9109         Entry[Part] =
9110             State.Builder.CreateSelect(Cond, In0, Entry[Part], "predphi");
9111       }
9112     }
9113   }
9114   for (unsigned Part = 0; Part < State.UF; ++Part)
9115     State.set(this, Phi, Entry[Part], Part);
9116 }
9117 
9118 void VPInterleaveRecipe::execute(VPTransformState &State) {
9119   assert(!State.Instance && "Interleave group being replicated.");
9120   State.ILV->vectorizeInterleaveGroup(IG, definedValues(), State, getAddr(),
9121                                       getStoredValues(), getMask());
9122 }
9123 
9124 void VPReductionRecipe::execute(VPTransformState &State) {
9125   assert(!State.Instance && "Reduction being replicated.");
9126   for (unsigned Part = 0; Part < State.UF; ++Part) {
9127     RecurKind Kind = RdxDesc->getRecurrenceKind();
9128     Value *NewVecOp = State.get(getVecOp(), Part);
9129     if (VPValue *Cond = getCondOp()) {
9130       Value *NewCond = State.get(Cond, Part);
9131       VectorType *VecTy = cast<VectorType>(NewVecOp->getType());
9132       Constant *Iden = RecurrenceDescriptor::getRecurrenceIdentity(
9133           Kind, VecTy->getElementType());
9134       Constant *IdenVec =
9135           ConstantVector::getSplat(VecTy->getElementCount(), Iden);
9136       Value *Select = State.Builder.CreateSelect(NewCond, NewVecOp, IdenVec);
9137       NewVecOp = Select;
9138     }
9139     Value *NewRed =
9140         createTargetReduction(State.Builder, TTI, *RdxDesc, NewVecOp);
9141     Value *PrevInChain = State.get(getChainOp(), Part);
9142     Value *NextInChain;
9143     if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) {
9144       NextInChain =
9145           createMinMaxOp(State.Builder, RdxDesc->getRecurrenceKind(),
9146                          NewRed, PrevInChain);
9147     } else {
9148       NextInChain = State.Builder.CreateBinOp(
9149           (Instruction::BinaryOps)getUnderlyingInstr()->getOpcode(), NewRed,
9150           PrevInChain);
9151     }
9152     State.set(this, getUnderlyingInstr(), NextInChain, Part);
9153   }
9154 }
9155 
9156 void VPReplicateRecipe::execute(VPTransformState &State) {
9157   if (State.Instance) { // Generate a single instance.
9158     assert(!State.VF.isScalable() && "Can't scalarize a scalable vector");
9159     State.ILV->scalarizeInstruction(getUnderlyingInstr(), this, *this,
9160                                     *State.Instance, IsPredicated, State);
9161     // Insert scalar instance packing it into a vector.
9162     if (AlsoPack && State.VF.isVector()) {
9163       // If we're constructing lane 0, initialize to start from poison.
9164       if (State.Instance->Lane == 0) {
9165         assert(!State.VF.isScalable() && "VF is assumed to be non scalable.");
9166         Value *Poison = PoisonValue::get(
9167             VectorType::get(getUnderlyingValue()->getType(), State.VF));
9168         State.set(this, getUnderlyingInstr(), Poison, State.Instance->Part);
9169       }
9170       State.ILV->packScalarIntoVectorValue(this, *State.Instance, State);
9171     }
9172     return;
9173   }
9174 
9175   // Generate scalar instances for all VF lanes of all UF parts, unless the
9176   // instruction is uniform inwhich case generate only the first lane for each
9177   // of the UF parts.
9178   unsigned EndLane = IsUniform ? 1 : State.VF.getKnownMinValue();
9179   assert((!State.VF.isScalable() || IsUniform) &&
9180          "Can't scalarize a scalable vector");
9181   for (unsigned Part = 0; Part < State.UF; ++Part)
9182     for (unsigned Lane = 0; Lane < EndLane; ++Lane)
9183       State.ILV->scalarizeInstruction(getUnderlyingInstr(), this, *this,
9184                                       VPIteration(Part, Lane), IsPredicated,
9185                                       State);
9186 }
9187 
9188 void VPBranchOnMaskRecipe::execute(VPTransformState &State) {
9189   assert(State.Instance && "Branch on Mask works only on single instance.");
9190 
9191   unsigned Part = State.Instance->Part;
9192   unsigned Lane = State.Instance->Lane;
9193 
9194   Value *ConditionBit = nullptr;
9195   VPValue *BlockInMask = getMask();
9196   if (BlockInMask) {
9197     ConditionBit = State.get(BlockInMask, Part);
9198     if (ConditionBit->getType()->isVectorTy())
9199       ConditionBit = State.Builder.CreateExtractElement(
9200           ConditionBit, State.Builder.getInt32(Lane));
9201   } else // Block in mask is all-one.
9202     ConditionBit = State.Builder.getTrue();
9203 
9204   // Replace the temporary unreachable terminator with a new conditional branch,
9205   // whose two destinations will be set later when they are created.
9206   auto *CurrentTerminator = State.CFG.PrevBB->getTerminator();
9207   assert(isa<UnreachableInst>(CurrentTerminator) &&
9208          "Expected to replace unreachable terminator with conditional branch.");
9209   auto *CondBr = BranchInst::Create(State.CFG.PrevBB, nullptr, ConditionBit);
9210   CondBr->setSuccessor(0, nullptr);
9211   ReplaceInstWithInst(CurrentTerminator, CondBr);
9212 }
9213 
9214 void VPPredInstPHIRecipe::execute(VPTransformState &State) {
9215   assert(State.Instance && "Predicated instruction PHI works per instance.");
9216   Instruction *ScalarPredInst =
9217       cast<Instruction>(State.get(getOperand(0), *State.Instance));
9218   BasicBlock *PredicatedBB = ScalarPredInst->getParent();
9219   BasicBlock *PredicatingBB = PredicatedBB->getSinglePredecessor();
9220   assert(PredicatingBB && "Predicated block has no single predecessor.");
9221   assert(isa<VPReplicateRecipe>(getOperand(0)) &&
9222          "operand must be VPReplicateRecipe");
9223 
9224   // By current pack/unpack logic we need to generate only a single phi node: if
9225   // a vector value for the predicated instruction exists at this point it means
9226   // the instruction has vector users only, and a phi for the vector value is
9227   // needed. In this case the recipe of the predicated instruction is marked to
9228   // also do that packing, thereby "hoisting" the insert-element sequence.
9229   // Otherwise, a phi node for the scalar value is needed.
9230   unsigned Part = State.Instance->Part;
9231   if (State.hasVectorValue(getOperand(0), Part)) {
9232     Value *VectorValue = State.get(getOperand(0), Part);
9233     InsertElementInst *IEI = cast<InsertElementInst>(VectorValue);
9234     PHINode *VPhi = State.Builder.CreatePHI(IEI->getType(), 2);
9235     VPhi->addIncoming(IEI->getOperand(0), PredicatingBB); // Unmodified vector.
9236     VPhi->addIncoming(IEI, PredicatedBB); // New vector with inserted element.
9237     if (State.hasVectorValue(this, Part))
9238       State.reset(this, VPhi, Part);
9239     else
9240       State.set(this, VPhi, Part);
9241     // NOTE: Currently we need to update the value of the operand, so the next
9242     // predicated iteration inserts its generated value in the correct vector.
9243     State.reset(getOperand(0), VPhi, Part);
9244   } else {
9245     Type *PredInstType = getOperand(0)->getUnderlyingValue()->getType();
9246     PHINode *Phi = State.Builder.CreatePHI(PredInstType, 2);
9247     Phi->addIncoming(PoisonValue::get(ScalarPredInst->getType()),
9248                      PredicatingBB);
9249     Phi->addIncoming(ScalarPredInst, PredicatedBB);
9250     if (State.hasScalarValue(this, *State.Instance))
9251       State.reset(this, Phi, *State.Instance);
9252     else
9253       State.set(this, Phi, *State.Instance);
9254     // NOTE: Currently we need to update the value of the operand, so the next
9255     // predicated iteration inserts its generated value in the correct vector.
9256     State.reset(getOperand(0), Phi, *State.Instance);
9257   }
9258 }
9259 
9260 void VPWidenMemoryInstructionRecipe::execute(VPTransformState &State) {
9261   VPValue *StoredValue = isStore() ? getStoredValue() : nullptr;
9262   State.ILV->vectorizeMemoryInstruction(&Ingredient, State,
9263                                         StoredValue ? nullptr : getVPValue(),
9264                                         getAddr(), StoredValue, getMask());
9265 }
9266 
9267 // Determine how to lower the scalar epilogue, which depends on 1) optimising
9268 // for minimum code-size, 2) predicate compiler options, 3) loop hints forcing
9269 // predication, and 4) a TTI hook that analyses whether the loop is suitable
9270 // for predication.
9271 static ScalarEpilogueLowering getScalarEpilogueLowering(
9272     Function *F, Loop *L, LoopVectorizeHints &Hints, ProfileSummaryInfo *PSI,
9273     BlockFrequencyInfo *BFI, TargetTransformInfo *TTI, TargetLibraryInfo *TLI,
9274     AssumptionCache *AC, LoopInfo *LI, ScalarEvolution *SE, DominatorTree *DT,
9275     LoopVectorizationLegality &LVL) {
9276   // 1) OptSize takes precedence over all other options, i.e. if this is set,
9277   // don't look at hints or options, and don't request a scalar epilogue.
9278   // (For PGSO, as shouldOptimizeForSize isn't currently accessible from
9279   // LoopAccessInfo (due to code dependency and not being able to reliably get
9280   // PSI/BFI from a loop analysis under NPM), we cannot suppress the collection
9281   // of strides in LoopAccessInfo::analyzeLoop() and vectorize without
9282   // versioning when the vectorization is forced, unlike hasOptSize. So revert
9283   // back to the old way and vectorize with versioning when forced. See D81345.)
9284   if (F->hasOptSize() || (llvm::shouldOptimizeForSize(L->getHeader(), PSI, BFI,
9285                                                       PGSOQueryType::IRPass) &&
9286                           Hints.getForce() != LoopVectorizeHints::FK_Enabled))
9287     return CM_ScalarEpilogueNotAllowedOptSize;
9288 
9289   // 2) If set, obey the directives
9290   if (PreferPredicateOverEpilogue.getNumOccurrences()) {
9291     switch (PreferPredicateOverEpilogue) {
9292     case PreferPredicateTy::ScalarEpilogue:
9293       return CM_ScalarEpilogueAllowed;
9294     case PreferPredicateTy::PredicateElseScalarEpilogue:
9295       return CM_ScalarEpilogueNotNeededUsePredicate;
9296     case PreferPredicateTy::PredicateOrDontVectorize:
9297       return CM_ScalarEpilogueNotAllowedUsePredicate;
9298     };
9299   }
9300 
9301   // 3) If set, obey the hints
9302   switch (Hints.getPredicate()) {
9303   case LoopVectorizeHints::FK_Enabled:
9304     return CM_ScalarEpilogueNotNeededUsePredicate;
9305   case LoopVectorizeHints::FK_Disabled:
9306     return CM_ScalarEpilogueAllowed;
9307   };
9308 
9309   // 4) if the TTI hook indicates this is profitable, request predication.
9310   if (TTI->preferPredicateOverEpilogue(L, LI, *SE, *AC, TLI, DT,
9311                                        LVL.getLAI()))
9312     return CM_ScalarEpilogueNotNeededUsePredicate;
9313 
9314   return CM_ScalarEpilogueAllowed;
9315 }
9316 
9317 void VPTransformState::set(VPValue *Def, Value *IRDef, Value *V,
9318                            const VPIteration &Instance) {
9319   set(Def, V, Instance);
9320   ILV->setScalarValue(IRDef, Instance, V);
9321 }
9322 
9323 void VPTransformState::set(VPValue *Def, Value *IRDef, Value *V,
9324                            unsigned Part) {
9325   set(Def, V, Part);
9326   ILV->setVectorValue(IRDef, Part, V);
9327 }
9328 
9329 void VPTransformState::reset(VPValue *Def, Value *IRDef, Value *V,
9330                              unsigned Part) {
9331   set(Def, V, Part);
9332   ILV->resetVectorValue(IRDef, Part, V);
9333 }
9334 
9335 Value *VPTransformState::get(VPValue *Def, unsigned Part) {
9336   // If Values have been set for this Def return the one relevant for \p Part.
9337   if (hasVectorValue(Def, Part))
9338     return Data.PerPartOutput[Def][Part];
9339 
9340   if (!hasScalarValue(Def, {Part, 0}))
9341     return Callback.getOrCreateVectorValues(VPValue2Value[Def], Part);
9342 
9343   Value *ScalarValue = get(Def, {Part, 0});
9344   // If we aren't vectorizing, we can just copy the scalar map values over
9345   // to the vector map.
9346   if (VF.isScalar()) {
9347     set(Def, ScalarValue, Part);
9348     return ScalarValue;
9349   }
9350 
9351   auto *RepR = dyn_cast<VPReplicateRecipe>(Def);
9352   bool IsUniform = RepR && RepR->isUniform();
9353 
9354   unsigned LastLane = IsUniform ? 0 : VF.getKnownMinValue() - 1;
9355   auto *LastInst = cast<Instruction>(get(Def, {Part, LastLane}));
9356 
9357   // Set the insert point after the last scalarized instruction. This
9358   // ensures the insertelement sequence will directly follow the scalar
9359   // definitions.
9360   auto OldIP = Builder.saveIP();
9361   auto NewIP = std::next(BasicBlock::iterator(LastInst));
9362   Builder.SetInsertPoint(&*NewIP);
9363 
9364   // However, if we are vectorizing, we need to construct the vector values.
9365   // If the value is known to be uniform after vectorization, we can just
9366   // broadcast the scalar value corresponding to lane zero for each unroll
9367   // iteration. Otherwise, we construct the vector values using
9368   // insertelement instructions. Since the resulting vectors are stored in
9369   // VectorLoopValueMap, we will only generate the insertelements once.
9370   Value *VectorValue = nullptr;
9371   if (IsUniform) {
9372     VectorValue = ILV->getBroadcastInstrs(ScalarValue);
9373     set(Def, VectorValue, Part);
9374   } else {
9375     // Initialize packing with insertelements to start from undef.
9376     assert(!VF.isScalable() && "VF is assumed to be non scalable.");
9377     Value *Undef = PoisonValue::get(VectorType::get(LastInst->getType(), VF));
9378     set(Def, Undef, Part);
9379     for (unsigned Lane = 0; Lane < VF.getKnownMinValue(); ++Lane)
9380       ILV->packScalarIntoVectorValue(Def, {Part, Lane}, *this);
9381     VectorValue = get(Def, Part);
9382   }
9383   Builder.restoreIP(OldIP);
9384   return VectorValue;
9385 }
9386 
9387 // Process the loop in the VPlan-native vectorization path. This path builds
9388 // VPlan upfront in the vectorization pipeline, which allows to apply
9389 // VPlan-to-VPlan transformations from the very beginning without modifying the
9390 // input LLVM IR.
9391 static bool processLoopInVPlanNativePath(
9392     Loop *L, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT,
9393     LoopVectorizationLegality *LVL, TargetTransformInfo *TTI,
9394     TargetLibraryInfo *TLI, DemandedBits *DB, AssumptionCache *AC,
9395     OptimizationRemarkEmitter *ORE, BlockFrequencyInfo *BFI,
9396     ProfileSummaryInfo *PSI, LoopVectorizeHints &Hints) {
9397 
9398   if (isa<SCEVCouldNotCompute>(PSE.getBackedgeTakenCount())) {
9399     LLVM_DEBUG(dbgs() << "LV: cannot compute the outer-loop trip count\n");
9400     return false;
9401   }
9402   assert(EnableVPlanNativePath && "VPlan-native path is disabled.");
9403   Function *F = L->getHeader()->getParent();
9404   InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL->getLAI());
9405 
9406   ScalarEpilogueLowering SEL = getScalarEpilogueLowering(
9407       F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, *LVL);
9408 
9409   LoopVectorizationCostModel CM(SEL, L, PSE, LI, LVL, *TTI, TLI, DB, AC, ORE, F,
9410                                 &Hints, IAI);
9411   // Use the planner for outer loop vectorization.
9412   // TODO: CM is not used at this point inside the planner. Turn CM into an
9413   // optional argument if we don't need it in the future.
9414   LoopVectorizationPlanner LVP(L, LI, TLI, TTI, LVL, CM, IAI, PSE);
9415 
9416   // Get user vectorization factor.
9417   ElementCount UserVF = Hints.getWidth();
9418 
9419   // Plan how to best vectorize, return the best VF and its cost.
9420   const VectorizationFactor VF = LVP.planInVPlanNativePath(UserVF);
9421 
9422   // If we are stress testing VPlan builds, do not attempt to generate vector
9423   // code. Masked vector code generation support will follow soon.
9424   // Also, do not attempt to vectorize if no vector code will be produced.
9425   if (VPlanBuildStressTest || EnableVPlanPredication ||
9426       VectorizationFactor::Disabled() == VF)
9427     return false;
9428 
9429   LVP.setBestPlan(VF.Width, 1);
9430 
9431   InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, 1, LVL,
9432                          &CM, BFI, PSI);
9433   LLVM_DEBUG(dbgs() << "Vectorizing outer loop in \""
9434                     << L->getHeader()->getParent()->getName() << "\"\n");
9435   LVP.executePlan(LB, DT);
9436 
9437   // Mark the loop as already vectorized to avoid vectorizing again.
9438   Hints.setAlreadyVectorized();
9439 
9440   assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs()));
9441   return true;
9442 }
9443 
9444 // Emit a remark if there are stores to floats that required a floating point
9445 // extension. If the vectorized loop was generated with floating point there
9446 // will be a performance penalty from the conversion overhead and the change in
9447 // the vector width.
9448 static void checkMixedPrecision(Loop *L, OptimizationRemarkEmitter *ORE) {
9449   SmallVector<Instruction *, 4> Worklist;
9450   for (BasicBlock *BB : L->getBlocks()) {
9451     for (Instruction &Inst : *BB) {
9452       if (auto *S = dyn_cast<StoreInst>(&Inst)) {
9453         if (S->getValueOperand()->getType()->isFloatTy())
9454           Worklist.push_back(S);
9455       }
9456     }
9457   }
9458 
9459   // Traverse the floating point stores upwards searching, for floating point
9460   // conversions.
9461   SmallPtrSet<const Instruction *, 4> Visited;
9462   SmallPtrSet<const Instruction *, 4> EmittedRemark;
9463   while (!Worklist.empty()) {
9464     auto *I = Worklist.pop_back_val();
9465     if (!L->contains(I))
9466       continue;
9467     if (!Visited.insert(I).second)
9468       continue;
9469 
9470     // Emit a remark if the floating point store required a floating
9471     // point conversion.
9472     // TODO: More work could be done to identify the root cause such as a
9473     // constant or a function return type and point the user to it.
9474     if (isa<FPExtInst>(I) && EmittedRemark.insert(I).second)
9475       ORE->emit([&]() {
9476         return OptimizationRemarkAnalysis(LV_NAME, "VectorMixedPrecision",
9477                                           I->getDebugLoc(), L->getHeader())
9478                << "floating point conversion changes vector width. "
9479                << "Mixed floating point precision requires an up/down "
9480                << "cast that will negatively impact performance.";
9481       });
9482 
9483     for (Use &Op : I->operands())
9484       if (auto *OpI = dyn_cast<Instruction>(Op))
9485         Worklist.push_back(OpI);
9486   }
9487 }
9488 
9489 LoopVectorizePass::LoopVectorizePass(LoopVectorizeOptions Opts)
9490     : InterleaveOnlyWhenForced(Opts.InterleaveOnlyWhenForced ||
9491                                !EnableLoopInterleaving),
9492       VectorizeOnlyWhenForced(Opts.VectorizeOnlyWhenForced ||
9493                               !EnableLoopVectorization) {}
9494 
9495 bool LoopVectorizePass::processLoop(Loop *L) {
9496   assert((EnableVPlanNativePath || L->isInnermost()) &&
9497          "VPlan-native path is not enabled. Only process inner loops.");
9498 
9499 #ifndef NDEBUG
9500   const std::string DebugLocStr = getDebugLocString(L);
9501 #endif /* NDEBUG */
9502 
9503   LLVM_DEBUG(dbgs() << "\nLV: Checking a loop in \""
9504                     << L->getHeader()->getParent()->getName() << "\" from "
9505                     << DebugLocStr << "\n");
9506 
9507   LoopVectorizeHints Hints(L, InterleaveOnlyWhenForced, *ORE);
9508 
9509   LLVM_DEBUG(
9510       dbgs() << "LV: Loop hints:"
9511              << " force="
9512              << (Hints.getForce() == LoopVectorizeHints::FK_Disabled
9513                      ? "disabled"
9514                      : (Hints.getForce() == LoopVectorizeHints::FK_Enabled
9515                             ? "enabled"
9516                             : "?"))
9517              << " width=" << Hints.getWidth()
9518              << " unroll=" << Hints.getInterleave() << "\n");
9519 
9520   // Function containing loop
9521   Function *F = L->getHeader()->getParent();
9522 
9523   // Looking at the diagnostic output is the only way to determine if a loop
9524   // was vectorized (other than looking at the IR or machine code), so it
9525   // is important to generate an optimization remark for each loop. Most of
9526   // these messages are generated as OptimizationRemarkAnalysis. Remarks
9527   // generated as OptimizationRemark and OptimizationRemarkMissed are
9528   // less verbose reporting vectorized loops and unvectorized loops that may
9529   // benefit from vectorization, respectively.
9530 
9531   if (!Hints.allowVectorization(F, L, VectorizeOnlyWhenForced)) {
9532     LLVM_DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n");
9533     return false;
9534   }
9535 
9536   PredicatedScalarEvolution PSE(*SE, *L);
9537 
9538   // Check if it is legal to vectorize the loop.
9539   LoopVectorizationRequirements Requirements(*ORE);
9540   LoopVectorizationLegality LVL(L, PSE, DT, TTI, TLI, AA, F, GetLAA, LI, ORE,
9541                                 &Requirements, &Hints, DB, AC, BFI, PSI);
9542   if (!LVL.canVectorize(EnableVPlanNativePath)) {
9543     LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n");
9544     Hints.emitRemarkWithHints();
9545     return false;
9546   }
9547 
9548   // Check the function attributes and profiles to find out if this function
9549   // should be optimized for size.
9550   ScalarEpilogueLowering SEL = getScalarEpilogueLowering(
9551       F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, LVL);
9552 
9553   // Entrance to the VPlan-native vectorization path. Outer loops are processed
9554   // here. They may require CFG and instruction level transformations before
9555   // even evaluating whether vectorization is profitable. Since we cannot modify
9556   // the incoming IR, we need to build VPlan upfront in the vectorization
9557   // pipeline.
9558   if (!L->isInnermost())
9559     return processLoopInVPlanNativePath(L, PSE, LI, DT, &LVL, TTI, TLI, DB, AC,
9560                                         ORE, BFI, PSI, Hints);
9561 
9562   assert(L->isInnermost() && "Inner loop expected.");
9563 
9564   // Check the loop for a trip count threshold: vectorize loops with a tiny trip
9565   // count by optimizing for size, to minimize overheads.
9566   auto ExpectedTC = getSmallBestKnownTC(*SE, L);
9567   if (ExpectedTC && *ExpectedTC < TinyTripCountVectorThreshold) {
9568     LLVM_DEBUG(dbgs() << "LV: Found a loop with a very small trip count. "
9569                       << "This loop is worth vectorizing only if no scalar "
9570                       << "iteration overheads are incurred.");
9571     if (Hints.getForce() == LoopVectorizeHints::FK_Enabled)
9572       LLVM_DEBUG(dbgs() << " But vectorizing was explicitly forced.\n");
9573     else {
9574       LLVM_DEBUG(dbgs() << "\n");
9575       SEL = CM_ScalarEpilogueNotAllowedLowTripLoop;
9576     }
9577   }
9578 
9579   // Check the function attributes to see if implicit floats are allowed.
9580   // FIXME: This check doesn't seem possibly correct -- what if the loop is
9581   // an integer loop and the vector instructions selected are purely integer
9582   // vector instructions?
9583   if (F->hasFnAttribute(Attribute::NoImplicitFloat)) {
9584     reportVectorizationFailure(
9585         "Can't vectorize when the NoImplicitFloat attribute is used",
9586         "loop not vectorized due to NoImplicitFloat attribute",
9587         "NoImplicitFloat", ORE, L);
9588     Hints.emitRemarkWithHints();
9589     return false;
9590   }
9591 
9592   // Check if the target supports potentially unsafe FP vectorization.
9593   // FIXME: Add a check for the type of safety issue (denormal, signaling)
9594   // for the target we're vectorizing for, to make sure none of the
9595   // additional fp-math flags can help.
9596   if (Hints.isPotentiallyUnsafe() &&
9597       TTI->isFPVectorizationPotentiallyUnsafe()) {
9598     reportVectorizationFailure(
9599         "Potentially unsafe FP op prevents vectorization",
9600         "loop not vectorized due to unsafe FP support.",
9601         "UnsafeFP", ORE, L);
9602     Hints.emitRemarkWithHints();
9603     return false;
9604   }
9605 
9606   bool UseInterleaved = TTI->enableInterleavedAccessVectorization();
9607   InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL.getLAI());
9608 
9609   // If an override option has been passed in for interleaved accesses, use it.
9610   if (EnableInterleavedMemAccesses.getNumOccurrences() > 0)
9611     UseInterleaved = EnableInterleavedMemAccesses;
9612 
9613   // Analyze interleaved memory accesses.
9614   if (UseInterleaved) {
9615     IAI.analyzeInterleaving(useMaskedInterleavedAccesses(*TTI));
9616   }
9617 
9618   // Use the cost model.
9619   LoopVectorizationCostModel CM(SEL, L, PSE, LI, &LVL, *TTI, TLI, DB, AC, ORE,
9620                                 F, &Hints, IAI);
9621   CM.collectValuesToIgnore();
9622 
9623   // Use the planner for vectorization.
9624   LoopVectorizationPlanner LVP(L, LI, TLI, TTI, &LVL, CM, IAI, PSE);
9625 
9626   // Get user vectorization factor and interleave count.
9627   ElementCount UserVF = Hints.getWidth();
9628   unsigned UserIC = Hints.getInterleave();
9629 
9630   // Plan how to best vectorize, return the best VF and its cost.
9631   Optional<VectorizationFactor> MaybeVF = LVP.plan(UserVF, UserIC);
9632 
9633   VectorizationFactor VF = VectorizationFactor::Disabled();
9634   unsigned IC = 1;
9635 
9636   if (MaybeVF) {
9637     VF = *MaybeVF;
9638     // Select the interleave count.
9639     IC = CM.selectInterleaveCount(VF.Width, VF.Cost);
9640   }
9641 
9642   // Identify the diagnostic messages that should be produced.
9643   std::pair<StringRef, std::string> VecDiagMsg, IntDiagMsg;
9644   bool VectorizeLoop = true, InterleaveLoop = true;
9645   if (Requirements.doesNotMeet(F, L, Hints)) {
9646     LLVM_DEBUG(dbgs() << "LV: Not vectorizing: loop did not meet vectorization "
9647                          "requirements.\n");
9648     Hints.emitRemarkWithHints();
9649     return false;
9650   }
9651 
9652   if (VF.Width.isScalar()) {
9653     LLVM_DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n");
9654     VecDiagMsg = std::make_pair(
9655         "VectorizationNotBeneficial",
9656         "the cost-model indicates that vectorization is not beneficial");
9657     VectorizeLoop = false;
9658   }
9659 
9660   if (!MaybeVF && UserIC > 1) {
9661     // Tell the user interleaving was avoided up-front, despite being explicitly
9662     // requested.
9663     LLVM_DEBUG(dbgs() << "LV: Ignoring UserIC, because vectorization and "
9664                          "interleaving should be avoided up front\n");
9665     IntDiagMsg = std::make_pair(
9666         "InterleavingAvoided",
9667         "Ignoring UserIC, because interleaving was avoided up front");
9668     InterleaveLoop = false;
9669   } else if (IC == 1 && UserIC <= 1) {
9670     // Tell the user interleaving is not beneficial.
9671     LLVM_DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n");
9672     IntDiagMsg = std::make_pair(
9673         "InterleavingNotBeneficial",
9674         "the cost-model indicates that interleaving is not beneficial");
9675     InterleaveLoop = false;
9676     if (UserIC == 1) {
9677       IntDiagMsg.first = "InterleavingNotBeneficialAndDisabled";
9678       IntDiagMsg.second +=
9679           " and is explicitly disabled or interleave count is set to 1";
9680     }
9681   } else if (IC > 1 && UserIC == 1) {
9682     // Tell the user interleaving is beneficial, but it explicitly disabled.
9683     LLVM_DEBUG(
9684         dbgs() << "LV: Interleaving is beneficial but is explicitly disabled.");
9685     IntDiagMsg = std::make_pair(
9686         "InterleavingBeneficialButDisabled",
9687         "the cost-model indicates that interleaving is beneficial "
9688         "but is explicitly disabled or interleave count is set to 1");
9689     InterleaveLoop = false;
9690   }
9691 
9692   // Override IC if user provided an interleave count.
9693   IC = UserIC > 0 ? UserIC : IC;
9694 
9695   // Emit diagnostic messages, if any.
9696   const char *VAPassName = Hints.vectorizeAnalysisPassName();
9697   if (!VectorizeLoop && !InterleaveLoop) {
9698     // Do not vectorize or interleaving the loop.
9699     ORE->emit([&]() {
9700       return OptimizationRemarkMissed(VAPassName, VecDiagMsg.first,
9701                                       L->getStartLoc(), L->getHeader())
9702              << VecDiagMsg.second;
9703     });
9704     ORE->emit([&]() {
9705       return OptimizationRemarkMissed(LV_NAME, IntDiagMsg.first,
9706                                       L->getStartLoc(), L->getHeader())
9707              << IntDiagMsg.second;
9708     });
9709     return false;
9710   } else if (!VectorizeLoop && InterleaveLoop) {
9711     LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n');
9712     ORE->emit([&]() {
9713       return OptimizationRemarkAnalysis(VAPassName, VecDiagMsg.first,
9714                                         L->getStartLoc(), L->getHeader())
9715              << VecDiagMsg.second;
9716     });
9717   } else if (VectorizeLoop && !InterleaveLoop) {
9718     LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width
9719                       << ") in " << DebugLocStr << '\n');
9720     ORE->emit([&]() {
9721       return OptimizationRemarkAnalysis(LV_NAME, IntDiagMsg.first,
9722                                         L->getStartLoc(), L->getHeader())
9723              << IntDiagMsg.second;
9724     });
9725   } else if (VectorizeLoop && InterleaveLoop) {
9726     LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width
9727                       << ") in " << DebugLocStr << '\n');
9728     LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n');
9729   }
9730 
9731   LVP.setBestPlan(VF.Width, IC);
9732 
9733   using namespace ore;
9734   bool DisableRuntimeUnroll = false;
9735   MDNode *OrigLoopID = L->getLoopID();
9736 
9737   if (!VectorizeLoop) {
9738     assert(IC > 1 && "interleave count should not be 1 or 0");
9739     // If we decided that it is not legal to vectorize the loop, then
9740     // interleave it.
9741     InnerLoopUnroller Unroller(L, PSE, LI, DT, TLI, TTI, AC, ORE, IC, &LVL, &CM,
9742                                BFI, PSI);
9743     LVP.executePlan(Unroller, DT);
9744 
9745     ORE->emit([&]() {
9746       return OptimizationRemark(LV_NAME, "Interleaved", L->getStartLoc(),
9747                                 L->getHeader())
9748              << "interleaved loop (interleaved count: "
9749              << NV("InterleaveCount", IC) << ")";
9750     });
9751   } else {
9752     // If we decided that it is *legal* to vectorize the loop, then do it.
9753 
9754     // Consider vectorizing the epilogue too if it's profitable.
9755     VectorizationFactor EpilogueVF =
9756       CM.selectEpilogueVectorizationFactor(VF.Width, LVP);
9757     if (EpilogueVF.Width.isVector()) {
9758 
9759       // The first pass vectorizes the main loop and creates a scalar epilogue
9760       // to be vectorized by executing the plan (potentially with a different
9761       // factor) again shortly afterwards.
9762       EpilogueLoopVectorizationInfo EPI(VF.Width.getKnownMinValue(), IC,
9763                                         EpilogueVF.Width.getKnownMinValue(), 1);
9764       EpilogueVectorizerMainLoop MainILV(L, PSE, LI, DT, TLI, TTI, AC, ORE, EPI,
9765                                          &LVL, &CM, BFI, PSI);
9766 
9767       LVP.setBestPlan(EPI.MainLoopVF, EPI.MainLoopUF);
9768       LVP.executePlan(MainILV, DT);
9769       ++LoopsVectorized;
9770 
9771       simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */);
9772       formLCSSARecursively(*L, *DT, LI, SE);
9773 
9774       // Second pass vectorizes the epilogue and adjusts the control flow
9775       // edges from the first pass.
9776       LVP.setBestPlan(EPI.EpilogueVF, EPI.EpilogueUF);
9777       EPI.MainLoopVF = EPI.EpilogueVF;
9778       EPI.MainLoopUF = EPI.EpilogueUF;
9779       EpilogueVectorizerEpilogueLoop EpilogILV(L, PSE, LI, DT, TLI, TTI, AC,
9780                                                ORE, EPI, &LVL, &CM, BFI, PSI);
9781       LVP.executePlan(EpilogILV, DT);
9782       ++LoopsEpilogueVectorized;
9783 
9784       if (!MainILV.areSafetyChecksAdded())
9785         DisableRuntimeUnroll = true;
9786     } else {
9787       InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, IC,
9788                              &LVL, &CM, BFI, PSI);
9789       LVP.executePlan(LB, DT);
9790       ++LoopsVectorized;
9791 
9792       // Add metadata to disable runtime unrolling a scalar loop when there are
9793       // no runtime checks about strides and memory. A scalar loop that is
9794       // rarely used is not worth unrolling.
9795       if (!LB.areSafetyChecksAdded())
9796         DisableRuntimeUnroll = true;
9797     }
9798 
9799     // Report the vectorization decision.
9800     ORE->emit([&]() {
9801       return OptimizationRemark(LV_NAME, "Vectorized", L->getStartLoc(),
9802                                 L->getHeader())
9803              << "vectorized loop (vectorization width: "
9804              << NV("VectorizationFactor", VF.Width)
9805              << ", interleaved count: " << NV("InterleaveCount", IC) << ")";
9806     });
9807 
9808     if (ORE->allowExtraAnalysis(LV_NAME))
9809       checkMixedPrecision(L, ORE);
9810   }
9811 
9812   Optional<MDNode *> RemainderLoopID =
9813       makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll,
9814                                       LLVMLoopVectorizeFollowupEpilogue});
9815   if (RemainderLoopID.hasValue()) {
9816     L->setLoopID(RemainderLoopID.getValue());
9817   } else {
9818     if (DisableRuntimeUnroll)
9819       AddRuntimeUnrollDisableMetaData(L);
9820 
9821     // Mark the loop as already vectorized to avoid vectorizing again.
9822     Hints.setAlreadyVectorized();
9823   }
9824 
9825   assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs()));
9826   return true;
9827 }
9828 
9829 LoopVectorizeResult LoopVectorizePass::runImpl(
9830     Function &F, ScalarEvolution &SE_, LoopInfo &LI_, TargetTransformInfo &TTI_,
9831     DominatorTree &DT_, BlockFrequencyInfo &BFI_, TargetLibraryInfo *TLI_,
9832     DemandedBits &DB_, AAResults &AA_, AssumptionCache &AC_,
9833     std::function<const LoopAccessInfo &(Loop &)> &GetLAA_,
9834     OptimizationRemarkEmitter &ORE_, ProfileSummaryInfo *PSI_) {
9835   SE = &SE_;
9836   LI = &LI_;
9837   TTI = &TTI_;
9838   DT = &DT_;
9839   BFI = &BFI_;
9840   TLI = TLI_;
9841   AA = &AA_;
9842   AC = &AC_;
9843   GetLAA = &GetLAA_;
9844   DB = &DB_;
9845   ORE = &ORE_;
9846   PSI = PSI_;
9847 
9848   // Don't attempt if
9849   // 1. the target claims to have no vector registers, and
9850   // 2. interleaving won't help ILP.
9851   //
9852   // The second condition is necessary because, even if the target has no
9853   // vector registers, loop vectorization may still enable scalar
9854   // interleaving.
9855   if (!TTI->getNumberOfRegisters(TTI->getRegisterClassForType(true)) &&
9856       TTI->getMaxInterleaveFactor(1) < 2)
9857     return LoopVectorizeResult(false, false);
9858 
9859   bool Changed = false, CFGChanged = false;
9860 
9861   // The vectorizer requires loops to be in simplified form.
9862   // Since simplification may add new inner loops, it has to run before the
9863   // legality and profitability checks. This means running the loop vectorizer
9864   // will simplify all loops, regardless of whether anything end up being
9865   // vectorized.
9866   for (auto &L : *LI)
9867     Changed |= CFGChanged |=
9868         simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */);
9869 
9870   // Build up a worklist of inner-loops to vectorize. This is necessary as
9871   // the act of vectorizing or partially unrolling a loop creates new loops
9872   // and can invalidate iterators across the loops.
9873   SmallVector<Loop *, 8> Worklist;
9874 
9875   for (Loop *L : *LI)
9876     collectSupportedLoops(*L, LI, ORE, Worklist);
9877 
9878   LoopsAnalyzed += Worklist.size();
9879 
9880   // Now walk the identified inner loops.
9881   while (!Worklist.empty()) {
9882     Loop *L = Worklist.pop_back_val();
9883 
9884     // For the inner loops we actually process, form LCSSA to simplify the
9885     // transform.
9886     Changed |= formLCSSARecursively(*L, *DT, LI, SE);
9887 
9888     Changed |= CFGChanged |= processLoop(L);
9889   }
9890 
9891   // Process each loop nest in the function.
9892   return LoopVectorizeResult(Changed, CFGChanged);
9893 }
9894 
9895 PreservedAnalyses LoopVectorizePass::run(Function &F,
9896                                          FunctionAnalysisManager &AM) {
9897     auto &SE = AM.getResult<ScalarEvolutionAnalysis>(F);
9898     auto &LI = AM.getResult<LoopAnalysis>(F);
9899     auto &TTI = AM.getResult<TargetIRAnalysis>(F);
9900     auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
9901     auto &BFI = AM.getResult<BlockFrequencyAnalysis>(F);
9902     auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
9903     auto &AA = AM.getResult<AAManager>(F);
9904     auto &AC = AM.getResult<AssumptionAnalysis>(F);
9905     auto &DB = AM.getResult<DemandedBitsAnalysis>(F);
9906     auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F);
9907     MemorySSA *MSSA = EnableMSSALoopDependency
9908                           ? &AM.getResult<MemorySSAAnalysis>(F).getMSSA()
9909                           : nullptr;
9910 
9911     auto &LAM = AM.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager();
9912     std::function<const LoopAccessInfo &(Loop &)> GetLAA =
9913         [&](Loop &L) -> const LoopAccessInfo & {
9914       LoopStandardAnalysisResults AR = {AA,  AC,  DT,      LI,  SE,
9915                                         TLI, TTI, nullptr, MSSA};
9916       return LAM.getResult<LoopAccessAnalysis>(L, AR);
9917     };
9918     auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F);
9919     ProfileSummaryInfo *PSI =
9920         MAMProxy.getCachedResult<ProfileSummaryAnalysis>(*F.getParent());
9921     LoopVectorizeResult Result =
9922         runImpl(F, SE, LI, TTI, DT, BFI, &TLI, DB, AA, AC, GetLAA, ORE, PSI);
9923     if (!Result.MadeAnyChange)
9924       return PreservedAnalyses::all();
9925     PreservedAnalyses PA;
9926 
9927     // We currently do not preserve loopinfo/dominator analyses with outer loop
9928     // vectorization. Until this is addressed, mark these analyses as preserved
9929     // only for non-VPlan-native path.
9930     // TODO: Preserve Loop and Dominator analyses for VPlan-native path.
9931     if (!EnableVPlanNativePath) {
9932       PA.preserve<LoopAnalysis>();
9933       PA.preserve<DominatorTreeAnalysis>();
9934     }
9935     PA.preserve<BasicAA>();
9936     PA.preserve<GlobalsAA>();
9937     if (!Result.MadeCFGChange)
9938       PA.preserveSet<CFGAnalyses>();
9939     return PA;
9940 }
9941