1 //===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops
10 // and generates target-independent LLVM-IR.
11 // The vectorizer uses the TargetTransformInfo analysis to estimate the costs
12 // of instructions in order to estimate the profitability of vectorization.
13 //
14 // The loop vectorizer combines consecutive loop iterations into a single
15 // 'wide' iteration. After this transformation the index is incremented
16 // by the SIMD vector width, and not by one.
17 //
18 // This pass has three parts:
19 // 1. The main loop pass that drives the different parts.
20 // 2. LoopVectorizationLegality - A unit that checks for the legality
21 //    of the vectorization.
22 // 3. InnerLoopVectorizer - A unit that performs the actual
23 //    widening of instructions.
24 // 4. LoopVectorizationCostModel - A unit that checks for the profitability
25 //    of vectorization. It decides on the optimal vector width, which
26 //    can be one, if vectorization is not profitable.
27 //
28 // There is a development effort going on to migrate loop vectorizer to the
29 // VPlan infrastructure and to introduce outer loop vectorization support (see
30 // docs/Proposal/VectorizationPlan.rst and
31 // http://lists.llvm.org/pipermail/llvm-dev/2017-December/119523.html). For this
32 // purpose, we temporarily introduced the VPlan-native vectorization path: an
33 // alternative vectorization path that is natively implemented on top of the
34 // VPlan infrastructure. See EnableVPlanNativePath for enabling.
35 //
36 //===----------------------------------------------------------------------===//
37 //
38 // The reduction-variable vectorization is based on the paper:
39 //  D. Nuzman and R. Henderson. Multi-platform Auto-vectorization.
40 //
41 // Variable uniformity checks are inspired by:
42 //  Karrenberg, R. and Hack, S. Whole Function Vectorization.
43 //
44 // The interleaved access vectorization is based on the paper:
45 //  Dorit Nuzman, Ira Rosen and Ayal Zaks.  Auto-Vectorization of Interleaved
46 //  Data for SIMD
47 //
48 // Other ideas/concepts are from:
49 //  A. Zaks and D. Nuzman. Autovectorization in GCC-two years later.
50 //
51 //  S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua.  An Evaluation of
52 //  Vectorizing Compilers.
53 //
54 //===----------------------------------------------------------------------===//
55 
56 #include "llvm/Transforms/Vectorize/LoopVectorize.h"
57 #include "LoopVectorizationPlanner.h"
58 #include "VPRecipeBuilder.h"
59 #include "VPlan.h"
60 #include "VPlanHCFGBuilder.h"
61 #include "VPlanPredicator.h"
62 #include "VPlanTransforms.h"
63 #include "llvm/ADT/APInt.h"
64 #include "llvm/ADT/ArrayRef.h"
65 #include "llvm/ADT/DenseMap.h"
66 #include "llvm/ADT/DenseMapInfo.h"
67 #include "llvm/ADT/Hashing.h"
68 #include "llvm/ADT/MapVector.h"
69 #include "llvm/ADT/None.h"
70 #include "llvm/ADT/Optional.h"
71 #include "llvm/ADT/STLExtras.h"
72 #include "llvm/ADT/SmallPtrSet.h"
73 #include "llvm/ADT/SmallSet.h"
74 #include "llvm/ADT/SmallVector.h"
75 #include "llvm/ADT/Statistic.h"
76 #include "llvm/ADT/StringRef.h"
77 #include "llvm/ADT/Twine.h"
78 #include "llvm/ADT/iterator_range.h"
79 #include "llvm/Analysis/AssumptionCache.h"
80 #include "llvm/Analysis/BasicAliasAnalysis.h"
81 #include "llvm/Analysis/BlockFrequencyInfo.h"
82 #include "llvm/Analysis/CFG.h"
83 #include "llvm/Analysis/CodeMetrics.h"
84 #include "llvm/Analysis/DemandedBits.h"
85 #include "llvm/Analysis/GlobalsModRef.h"
86 #include "llvm/Analysis/LoopAccessAnalysis.h"
87 #include "llvm/Analysis/LoopAnalysisManager.h"
88 #include "llvm/Analysis/LoopInfo.h"
89 #include "llvm/Analysis/LoopIterator.h"
90 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
91 #include "llvm/Analysis/ProfileSummaryInfo.h"
92 #include "llvm/Analysis/ScalarEvolution.h"
93 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
94 #include "llvm/Analysis/TargetLibraryInfo.h"
95 #include "llvm/Analysis/TargetTransformInfo.h"
96 #include "llvm/Analysis/VectorUtils.h"
97 #include "llvm/IR/Attributes.h"
98 #include "llvm/IR/BasicBlock.h"
99 #include "llvm/IR/CFG.h"
100 #include "llvm/IR/Constant.h"
101 #include "llvm/IR/Constants.h"
102 #include "llvm/IR/DataLayout.h"
103 #include "llvm/IR/DebugInfoMetadata.h"
104 #include "llvm/IR/DebugLoc.h"
105 #include "llvm/IR/DerivedTypes.h"
106 #include "llvm/IR/DiagnosticInfo.h"
107 #include "llvm/IR/Dominators.h"
108 #include "llvm/IR/Function.h"
109 #include "llvm/IR/IRBuilder.h"
110 #include "llvm/IR/InstrTypes.h"
111 #include "llvm/IR/Instruction.h"
112 #include "llvm/IR/Instructions.h"
113 #include "llvm/IR/IntrinsicInst.h"
114 #include "llvm/IR/Intrinsics.h"
115 #include "llvm/IR/LLVMContext.h"
116 #include "llvm/IR/Metadata.h"
117 #include "llvm/IR/Module.h"
118 #include "llvm/IR/Operator.h"
119 #include "llvm/IR/PatternMatch.h"
120 #include "llvm/IR/Type.h"
121 #include "llvm/IR/Use.h"
122 #include "llvm/IR/User.h"
123 #include "llvm/IR/Value.h"
124 #include "llvm/IR/ValueHandle.h"
125 #include "llvm/IR/Verifier.h"
126 #include "llvm/InitializePasses.h"
127 #include "llvm/Pass.h"
128 #include "llvm/Support/Casting.h"
129 #include "llvm/Support/CommandLine.h"
130 #include "llvm/Support/Compiler.h"
131 #include "llvm/Support/Debug.h"
132 #include "llvm/Support/ErrorHandling.h"
133 #include "llvm/Support/InstructionCost.h"
134 #include "llvm/Support/MathExtras.h"
135 #include "llvm/Support/raw_ostream.h"
136 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
137 #include "llvm/Transforms/Utils/InjectTLIMappings.h"
138 #include "llvm/Transforms/Utils/LoopSimplify.h"
139 #include "llvm/Transforms/Utils/LoopUtils.h"
140 #include "llvm/Transforms/Utils/LoopVersioning.h"
141 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
142 #include "llvm/Transforms/Utils/SizeOpts.h"
143 #include "llvm/Transforms/Vectorize/LoopVectorizationLegality.h"
144 #include <algorithm>
145 #include <cassert>
146 #include <cstdint>
147 #include <cstdlib>
148 #include <functional>
149 #include <iterator>
150 #include <limits>
151 #include <memory>
152 #include <string>
153 #include <tuple>
154 #include <utility>
155 
156 using namespace llvm;
157 
158 #define LV_NAME "loop-vectorize"
159 #define DEBUG_TYPE LV_NAME
160 
161 #ifndef NDEBUG
162 const char VerboseDebug[] = DEBUG_TYPE "-verbose";
163 #endif
164 
165 /// @{
166 /// Metadata attribute names
167 const char LLVMLoopVectorizeFollowupAll[] = "llvm.loop.vectorize.followup_all";
168 const char LLVMLoopVectorizeFollowupVectorized[] =
169     "llvm.loop.vectorize.followup_vectorized";
170 const char LLVMLoopVectorizeFollowupEpilogue[] =
171     "llvm.loop.vectorize.followup_epilogue";
172 /// @}
173 
174 STATISTIC(LoopsVectorized, "Number of loops vectorized");
175 STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization");
176 STATISTIC(LoopsEpilogueVectorized, "Number of epilogues vectorized");
177 
178 static cl::opt<bool> EnableEpilogueVectorization(
179     "enable-epilogue-vectorization", cl::init(true), cl::Hidden,
180     cl::desc("Enable vectorization of epilogue loops."));
181 
182 static cl::opt<unsigned> EpilogueVectorizationForceVF(
183     "epilogue-vectorization-force-VF", cl::init(1), cl::Hidden,
184     cl::desc("When epilogue vectorization is enabled, and a value greater than "
185              "1 is specified, forces the given VF for all applicable epilogue "
186              "loops."));
187 
188 static cl::opt<unsigned> EpilogueVectorizationMinVF(
189     "epilogue-vectorization-minimum-VF", cl::init(16), cl::Hidden,
190     cl::desc("Only loops with vectorization factor equal to or larger than "
191              "the specified value are considered for epilogue vectorization."));
192 
193 /// Loops with a known constant trip count below this number are vectorized only
194 /// if no scalar iteration overheads are incurred.
195 static cl::opt<unsigned> TinyTripCountVectorThreshold(
196     "vectorizer-min-trip-count", cl::init(16), cl::Hidden,
197     cl::desc("Loops with a constant trip count that is smaller than this "
198              "value are vectorized only if no scalar iteration overheads "
199              "are incurred."));
200 
201 static cl::opt<unsigned> PragmaVectorizeMemoryCheckThreshold(
202     "pragma-vectorize-memory-check-threshold", cl::init(128), cl::Hidden,
203     cl::desc("The maximum allowed number of runtime memory checks with a "
204              "vectorize(enable) pragma."));
205 
206 // Option prefer-predicate-over-epilogue indicates that an epilogue is undesired,
207 // that predication is preferred, and this lists all options. I.e., the
208 // vectorizer will try to fold the tail-loop (epilogue) into the vector body
209 // and predicate the instructions accordingly. If tail-folding fails, there are
210 // different fallback strategies depending on these values:
211 namespace PreferPredicateTy {
212   enum Option {
213     ScalarEpilogue = 0,
214     PredicateElseScalarEpilogue,
215     PredicateOrDontVectorize
216   };
217 } // namespace PreferPredicateTy
218 
219 static cl::opt<PreferPredicateTy::Option> PreferPredicateOverEpilogue(
220     "prefer-predicate-over-epilogue",
221     cl::init(PreferPredicateTy::ScalarEpilogue),
222     cl::Hidden,
223     cl::desc("Tail-folding and predication preferences over creating a scalar "
224              "epilogue loop."),
225     cl::values(clEnumValN(PreferPredicateTy::ScalarEpilogue,
226                          "scalar-epilogue",
227                          "Don't tail-predicate loops, create scalar epilogue"),
228               clEnumValN(PreferPredicateTy::PredicateElseScalarEpilogue,
229                          "predicate-else-scalar-epilogue",
230                          "prefer tail-folding, create scalar epilogue if tail "
231                          "folding fails."),
232               clEnumValN(PreferPredicateTy::PredicateOrDontVectorize,
233                          "predicate-dont-vectorize",
234                          "prefers tail-folding, don't attempt vectorization if "
235                          "tail-folding fails.")));
236 
237 static cl::opt<bool> MaximizeBandwidth(
238     "vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden,
239     cl::desc("Maximize bandwidth when selecting vectorization factor which "
240              "will be determined by the smallest type in loop."));
241 
242 static cl::opt<bool> EnableInterleavedMemAccesses(
243     "enable-interleaved-mem-accesses", cl::init(false), cl::Hidden,
244     cl::desc("Enable vectorization on interleaved memory accesses in a loop"));
245 
246 /// An interleave-group may need masking if it resides in a block that needs
247 /// predication, or in order to mask away gaps.
248 static cl::opt<bool> EnableMaskedInterleavedMemAccesses(
249     "enable-masked-interleaved-mem-accesses", cl::init(false), cl::Hidden,
250     cl::desc("Enable vectorization on masked interleaved memory accesses in a loop"));
251 
252 static cl::opt<unsigned> TinyTripCountInterleaveThreshold(
253     "tiny-trip-count-interleave-threshold", cl::init(128), cl::Hidden,
254     cl::desc("We don't interleave loops with a estimated constant trip count "
255              "below this number"));
256 
257 static cl::opt<unsigned> ForceTargetNumScalarRegs(
258     "force-target-num-scalar-regs", cl::init(0), cl::Hidden,
259     cl::desc("A flag that overrides the target's number of scalar registers."));
260 
261 static cl::opt<unsigned> ForceTargetNumVectorRegs(
262     "force-target-num-vector-regs", cl::init(0), cl::Hidden,
263     cl::desc("A flag that overrides the target's number of vector registers."));
264 
265 static cl::opt<unsigned> ForceTargetMaxScalarInterleaveFactor(
266     "force-target-max-scalar-interleave", cl::init(0), cl::Hidden,
267     cl::desc("A flag that overrides the target's max interleave factor for "
268              "scalar loops."));
269 
270 static cl::opt<unsigned> ForceTargetMaxVectorInterleaveFactor(
271     "force-target-max-vector-interleave", cl::init(0), cl::Hidden,
272     cl::desc("A flag that overrides the target's max interleave factor for "
273              "vectorized loops."));
274 
275 static cl::opt<unsigned> ForceTargetInstructionCost(
276     "force-target-instruction-cost", cl::init(0), cl::Hidden,
277     cl::desc("A flag that overrides the target's expected cost for "
278              "an instruction to a single constant value. Mostly "
279              "useful for getting consistent testing."));
280 
281 static cl::opt<bool> ForceTargetSupportsScalableVectors(
282     "force-target-supports-scalable-vectors", cl::init(false), cl::Hidden,
283     cl::desc(
284         "Pretend that scalable vectors are supported, even if the target does "
285         "not support them. This flag should only be used for testing."));
286 
287 static cl::opt<unsigned> SmallLoopCost(
288     "small-loop-cost", cl::init(20), cl::Hidden,
289     cl::desc(
290         "The cost of a loop that is considered 'small' by the interleaver."));
291 
292 static cl::opt<bool> LoopVectorizeWithBlockFrequency(
293     "loop-vectorize-with-block-frequency", cl::init(true), cl::Hidden,
294     cl::desc("Enable the use of the block frequency analysis to access PGO "
295              "heuristics minimizing code growth in cold regions and being more "
296              "aggressive in hot regions."));
297 
298 // Runtime interleave loops for load/store throughput.
299 static cl::opt<bool> EnableLoadStoreRuntimeInterleave(
300     "enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden,
301     cl::desc(
302         "Enable runtime interleaving until load/store ports are saturated"));
303 
304 /// Interleave small loops with scalar reductions.
305 static cl::opt<bool> InterleaveSmallLoopScalarReduction(
306     "interleave-small-loop-scalar-reduction", cl::init(false), cl::Hidden,
307     cl::desc("Enable interleaving for loops with small iteration counts that "
308              "contain scalar reductions to expose ILP."));
309 
310 /// The number of stores in a loop that are allowed to need predication.
311 static cl::opt<unsigned> NumberOfStoresToPredicate(
312     "vectorize-num-stores-pred", cl::init(1), cl::Hidden,
313     cl::desc("Max number of stores to be predicated behind an if."));
314 
315 static cl::opt<bool> EnableIndVarRegisterHeur(
316     "enable-ind-var-reg-heur", cl::init(true), cl::Hidden,
317     cl::desc("Count the induction variable only once when interleaving"));
318 
319 static cl::opt<bool> EnableCondStoresVectorization(
320     "enable-cond-stores-vec", cl::init(true), cl::Hidden,
321     cl::desc("Enable if predication of stores during vectorization."));
322 
323 static cl::opt<unsigned> MaxNestedScalarReductionIC(
324     "max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden,
325     cl::desc("The maximum interleave count to use when interleaving a scalar "
326              "reduction in a nested loop."));
327 
328 static cl::opt<bool>
329     PreferInLoopReductions("prefer-inloop-reductions", cl::init(false),
330                            cl::Hidden,
331                            cl::desc("Prefer in-loop vector reductions, "
332                                     "overriding the targets preference."));
333 
334 static cl::opt<bool> ForceOrderedReductions(
335     "force-ordered-reductions", cl::init(false), cl::Hidden,
336     cl::desc("Enable the vectorisation of loops with in-order (strict) "
337              "FP reductions"));
338 
339 static cl::opt<bool> PreferPredicatedReductionSelect(
340     "prefer-predicated-reduction-select", cl::init(false), cl::Hidden,
341     cl::desc(
342         "Prefer predicating a reduction operation over an after loop select."));
343 
344 cl::opt<bool> EnableVPlanNativePath(
345     "enable-vplan-native-path", cl::init(false), cl::Hidden,
346     cl::desc("Enable VPlan-native vectorization path with "
347              "support for outer loop vectorization."));
348 
349 // FIXME: Remove this switch once we have divergence analysis. Currently we
350 // assume divergent non-backedge branches when this switch is true.
351 cl::opt<bool> EnableVPlanPredication(
352     "enable-vplan-predication", cl::init(false), cl::Hidden,
353     cl::desc("Enable VPlan-native vectorization path predicator with "
354              "support for outer loop vectorization."));
355 
356 // This flag enables the stress testing of the VPlan H-CFG construction in the
357 // VPlan-native vectorization path. It must be used in conjuction with
358 // -enable-vplan-native-path. -vplan-verify-hcfg can also be used to enable the
359 // verification of the H-CFGs built.
360 static cl::opt<bool> VPlanBuildStressTest(
361     "vplan-build-stress-test", cl::init(false), cl::Hidden,
362     cl::desc(
363         "Build VPlan for every supported loop nest in the function and bail "
364         "out right after the build (stress test the VPlan H-CFG construction "
365         "in the VPlan-native vectorization path)."));
366 
367 cl::opt<bool> llvm::EnableLoopInterleaving(
368     "interleave-loops", cl::init(true), cl::Hidden,
369     cl::desc("Enable loop interleaving in Loop vectorization passes"));
370 cl::opt<bool> llvm::EnableLoopVectorization(
371     "vectorize-loops", cl::init(true), cl::Hidden,
372     cl::desc("Run the Loop vectorization passes"));
373 
374 cl::opt<bool> PrintVPlansInDotFormat(
375     "vplan-print-in-dot-format", cl::init(false), cl::Hidden,
376     cl::desc("Use dot format instead of plain text when dumping VPlans"));
377 
378 /// A helper function that returns true if the given type is irregular. The
379 /// type is irregular if its allocated size doesn't equal the store size of an
380 /// element of the corresponding vector type.
381 static bool hasIrregularType(Type *Ty, const DataLayout &DL) {
382   // Determine if an array of N elements of type Ty is "bitcast compatible"
383   // with a <N x Ty> vector.
384   // This is only true if there is no padding between the array elements.
385   return DL.getTypeAllocSizeInBits(Ty) != DL.getTypeSizeInBits(Ty);
386 }
387 
388 /// A helper function that returns the reciprocal of the block probability of
389 /// predicated blocks. If we return X, we are assuming the predicated block
390 /// will execute once for every X iterations of the loop header.
391 ///
392 /// TODO: We should use actual block probability here, if available. Currently,
393 ///       we always assume predicated blocks have a 50% chance of executing.
394 static unsigned getReciprocalPredBlockProb() { return 2; }
395 
396 /// A helper function that returns an integer or floating-point constant with
397 /// value C.
398 static Constant *getSignedIntOrFpConstant(Type *Ty, int64_t C) {
399   return Ty->isIntegerTy() ? ConstantInt::getSigned(Ty, C)
400                            : ConstantFP::get(Ty, C);
401 }
402 
403 /// Returns "best known" trip count for the specified loop \p L as defined by
404 /// the following procedure:
405 ///   1) Returns exact trip count if it is known.
406 ///   2) Returns expected trip count according to profile data if any.
407 ///   3) Returns upper bound estimate if it is known.
408 ///   4) Returns None if all of the above failed.
409 static Optional<unsigned> getSmallBestKnownTC(ScalarEvolution &SE, Loop *L) {
410   // Check if exact trip count is known.
411   if (unsigned ExpectedTC = SE.getSmallConstantTripCount(L))
412     return ExpectedTC;
413 
414   // Check if there is an expected trip count available from profile data.
415   if (LoopVectorizeWithBlockFrequency)
416     if (auto EstimatedTC = getLoopEstimatedTripCount(L))
417       return EstimatedTC;
418 
419   // Check if upper bound estimate is known.
420   if (unsigned ExpectedTC = SE.getSmallConstantMaxTripCount(L))
421     return ExpectedTC;
422 
423   return None;
424 }
425 
426 // Forward declare GeneratedRTChecks.
427 class GeneratedRTChecks;
428 
429 namespace llvm {
430 
431 /// InnerLoopVectorizer vectorizes loops which contain only one basic
432 /// block to a specified vectorization factor (VF).
433 /// This class performs the widening of scalars into vectors, or multiple
434 /// scalars. This class also implements the following features:
435 /// * It inserts an epilogue loop for handling loops that don't have iteration
436 ///   counts that are known to be a multiple of the vectorization factor.
437 /// * It handles the code generation for reduction variables.
438 /// * Scalarization (implementation using scalars) of un-vectorizable
439 ///   instructions.
440 /// InnerLoopVectorizer does not perform any vectorization-legality
441 /// checks, and relies on the caller to check for the different legality
442 /// aspects. The InnerLoopVectorizer relies on the
443 /// LoopVectorizationLegality class to provide information about the induction
444 /// and reduction variables that were found to a given vectorization factor.
445 class InnerLoopVectorizer {
446 public:
447   InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE,
448                       LoopInfo *LI, DominatorTree *DT,
449                       const TargetLibraryInfo *TLI,
450                       const TargetTransformInfo *TTI, AssumptionCache *AC,
451                       OptimizationRemarkEmitter *ORE, ElementCount VecWidth,
452                       unsigned UnrollFactor, LoopVectorizationLegality *LVL,
453                       LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI,
454                       ProfileSummaryInfo *PSI, GeneratedRTChecks &RTChecks)
455       : OrigLoop(OrigLoop), PSE(PSE), LI(LI), DT(DT), TLI(TLI), TTI(TTI),
456         AC(AC), ORE(ORE), VF(VecWidth), UF(UnrollFactor),
457         Builder(PSE.getSE()->getContext()), Legal(LVL), Cost(CM), BFI(BFI),
458         PSI(PSI), RTChecks(RTChecks) {
459     // Query this against the original loop and save it here because the profile
460     // of the original loop header may change as the transformation happens.
461     OptForSizeBasedOnProfile = llvm::shouldOptimizeForSize(
462         OrigLoop->getHeader(), PSI, BFI, PGSOQueryType::IRPass);
463   }
464 
465   virtual ~InnerLoopVectorizer() = default;
466 
467   /// Create a new empty loop that will contain vectorized instructions later
468   /// on, while the old loop will be used as the scalar remainder. Control flow
469   /// is generated around the vectorized (and scalar epilogue) loops consisting
470   /// of various checks and bypasses. Return the pre-header block of the new
471   /// loop.
472   /// In the case of epilogue vectorization, this function is overriden to
473   /// handle the more complex control flow around the loops.
474   virtual BasicBlock *createVectorizedLoopSkeleton();
475 
476   /// Widen a single instruction within the innermost loop.
477   void widenInstruction(Instruction &I, VPValue *Def, VPUser &Operands,
478                         VPTransformState &State);
479 
480   /// Widen a single call instruction within the innermost loop.
481   void widenCallInstruction(CallInst &I, VPValue *Def, VPUser &ArgOperands,
482                             VPTransformState &State);
483 
484   /// Widen a single select instruction within the innermost loop.
485   void widenSelectInstruction(SelectInst &I, VPValue *VPDef, VPUser &Operands,
486                               bool InvariantCond, VPTransformState &State);
487 
488   /// Fix the vectorized code, taking care of header phi's, live-outs, and more.
489   void fixVectorizedLoop(VPTransformState &State);
490 
491   // Return true if any runtime check is added.
492   bool areSafetyChecksAdded() { return AddedSafetyChecks; }
493 
494   /// A type for vectorized values in the new loop. Each value from the
495   /// original loop, when vectorized, is represented by UF vector values in the
496   /// new unrolled loop, where UF is the unroll factor.
497   using VectorParts = SmallVector<Value *, 2>;
498 
499   /// Vectorize a single GetElementPtrInst based on information gathered and
500   /// decisions taken during planning.
501   void widenGEP(GetElementPtrInst *GEP, VPValue *VPDef, VPUser &Indices,
502                 unsigned UF, ElementCount VF, bool IsPtrLoopInvariant,
503                 SmallBitVector &IsIndexLoopInvariant, VPTransformState &State);
504 
505   /// Vectorize a single first-order recurrence or pointer induction PHINode in
506   /// a block. This method handles the induction variable canonicalization. It
507   /// supports both VF = 1 for unrolled loops and arbitrary length vectors.
508   void widenPHIInstruction(Instruction *PN, VPWidenPHIRecipe *PhiR,
509                            VPTransformState &State);
510 
511   /// A helper function to scalarize a single Instruction in the innermost loop.
512   /// Generates a sequence of scalar instances for each lane between \p MinLane
513   /// and \p MaxLane, times each part between \p MinPart and \p MaxPart,
514   /// inclusive. Uses the VPValue operands from \p Operands instead of \p
515   /// Instr's operands.
516   void scalarizeInstruction(Instruction *Instr, VPValue *Def, VPUser &Operands,
517                             const VPIteration &Instance, bool IfPredicateInstr,
518                             VPTransformState &State);
519 
520   /// Widen an integer or floating-point induction variable \p IV. If \p Trunc
521   /// is provided, the integer induction variable will first be truncated to
522   /// the corresponding type.
523   void widenIntOrFpInduction(PHINode *IV, Value *Start, TruncInst *Trunc,
524                              VPValue *Def, VPValue *CastDef,
525                              VPTransformState &State);
526 
527   /// Construct the vector value of a scalarized value \p V one lane at a time.
528   void packScalarIntoVectorValue(VPValue *Def, const VPIteration &Instance,
529                                  VPTransformState &State);
530 
531   /// Try to vectorize interleaved access group \p Group with the base address
532   /// given in \p Addr, optionally masking the vector operations if \p
533   /// BlockInMask is non-null. Use \p State to translate given VPValues to IR
534   /// values in the vectorized loop.
535   void vectorizeInterleaveGroup(const InterleaveGroup<Instruction> *Group,
536                                 ArrayRef<VPValue *> VPDefs,
537                                 VPTransformState &State, VPValue *Addr,
538                                 ArrayRef<VPValue *> StoredValues,
539                                 VPValue *BlockInMask = nullptr);
540 
541   /// Vectorize Load and Store instructions with the base address given in \p
542   /// Addr, optionally masking the vector operations if \p BlockInMask is
543   /// non-null. Use \p State to translate given VPValues to IR values in the
544   /// vectorized loop.
545   void vectorizeMemoryInstruction(Instruction *Instr, VPTransformState &State,
546                                   VPValue *Def, VPValue *Addr,
547                                   VPValue *StoredValue, VPValue *BlockInMask);
548 
549   /// Set the debug location in the builder \p Ptr using the debug location in
550   /// \p V. If \p Ptr is None then it uses the class member's Builder.
551   void setDebugLocFromInst(const Value *V,
552                            Optional<IRBuilder<> *> CustomBuilder = None);
553 
554   /// Fix the non-induction PHIs in the OrigPHIsToFix vector.
555   void fixNonInductionPHIs(VPTransformState &State);
556 
557   /// Returns true if the reordering of FP operations is not allowed, but we are
558   /// able to vectorize with strict in-order reductions for the given RdxDesc.
559   bool useOrderedReductions(RecurrenceDescriptor &RdxDesc);
560 
561   /// Create a broadcast instruction. This method generates a broadcast
562   /// instruction (shuffle) for loop invariant values and for the induction
563   /// value. If this is the induction variable then we extend it to N, N+1, ...
564   /// this is needed because each iteration in the loop corresponds to a SIMD
565   /// element.
566   virtual Value *getBroadcastInstrs(Value *V);
567 
568 protected:
569   friend class LoopVectorizationPlanner;
570 
571   /// A small list of PHINodes.
572   using PhiVector = SmallVector<PHINode *, 4>;
573 
574   /// A type for scalarized values in the new loop. Each value from the
575   /// original loop, when scalarized, is represented by UF x VF scalar values
576   /// in the new unrolled loop, where UF is the unroll factor and VF is the
577   /// vectorization factor.
578   using ScalarParts = SmallVector<SmallVector<Value *, 4>, 2>;
579 
580   /// Set up the values of the IVs correctly when exiting the vector loop.
581   void fixupIVUsers(PHINode *OrigPhi, const InductionDescriptor &II,
582                     Value *CountRoundDown, Value *EndValue,
583                     BasicBlock *MiddleBlock);
584 
585   /// Create a new induction variable inside L.
586   PHINode *createInductionVariable(Loop *L, Value *Start, Value *End,
587                                    Value *Step, Instruction *DL);
588 
589   /// Handle all cross-iteration phis in the header.
590   void fixCrossIterationPHIs(VPTransformState &State);
591 
592   /// Create the exit value of first order recurrences in the middle block and
593   /// update their users.
594   void fixFirstOrderRecurrence(VPWidenPHIRecipe *PhiR, VPTransformState &State);
595 
596   /// Create code for the loop exit value of the reduction.
597   void fixReduction(VPReductionPHIRecipe *Phi, VPTransformState &State);
598 
599   /// Clear NSW/NUW flags from reduction instructions if necessary.
600   void clearReductionWrapFlags(const RecurrenceDescriptor &RdxDesc,
601                                VPTransformState &State);
602 
603   /// Fixup the LCSSA phi nodes in the unique exit block.  This simply
604   /// means we need to add the appropriate incoming value from the middle
605   /// block as exiting edges from the scalar epilogue loop (if present) are
606   /// already in place, and we exit the vector loop exclusively to the middle
607   /// block.
608   void fixLCSSAPHIs(VPTransformState &State);
609 
610   /// Iteratively sink the scalarized operands of a predicated instruction into
611   /// the block that was created for it.
612   void sinkScalarOperands(Instruction *PredInst);
613 
614   /// Shrinks vector element sizes to the smallest bitwidth they can be legally
615   /// represented as.
616   void truncateToMinimalBitwidths(VPTransformState &State);
617 
618   /// This function adds
619   /// (StartIdx * Step, (StartIdx + 1) * Step, (StartIdx + 2) * Step, ...)
620   /// to each vector element of Val. The sequence starts at StartIndex.
621   /// \p Opcode is relevant for FP induction variable.
622   virtual Value *getStepVector(Value *Val, int StartIdx, Value *Step,
623                                Instruction::BinaryOps Opcode =
624                                Instruction::BinaryOpsEnd);
625 
626   /// Compute scalar induction steps. \p ScalarIV is the scalar induction
627   /// variable on which to base the steps, \p Step is the size of the step, and
628   /// \p EntryVal is the value from the original loop that maps to the steps.
629   /// Note that \p EntryVal doesn't have to be an induction variable - it
630   /// can also be a truncate instruction.
631   void buildScalarSteps(Value *ScalarIV, Value *Step, Instruction *EntryVal,
632                         const InductionDescriptor &ID, VPValue *Def,
633                         VPValue *CastDef, VPTransformState &State);
634 
635   /// Create a vector induction phi node based on an existing scalar one. \p
636   /// EntryVal is the value from the original loop that maps to the vector phi
637   /// node, and \p Step is the loop-invariant step. If \p EntryVal is a
638   /// truncate instruction, instead of widening the original IV, we widen a
639   /// version of the IV truncated to \p EntryVal's type.
640   void createVectorIntOrFpInductionPHI(const InductionDescriptor &II,
641                                        Value *Step, Value *Start,
642                                        Instruction *EntryVal, VPValue *Def,
643                                        VPValue *CastDef,
644                                        VPTransformState &State);
645 
646   /// Returns true if an instruction \p I should be scalarized instead of
647   /// vectorized for the chosen vectorization factor.
648   bool shouldScalarizeInstruction(Instruction *I) const;
649 
650   /// Returns true if we should generate a scalar version of \p IV.
651   bool needsScalarInduction(Instruction *IV) const;
652 
653   /// If there is a cast involved in the induction variable \p ID, which should
654   /// be ignored in the vectorized loop body, this function records the
655   /// VectorLoopValue of the respective Phi also as the VectorLoopValue of the
656   /// cast. We had already proved that the casted Phi is equal to the uncasted
657   /// Phi in the vectorized loop (under a runtime guard), and therefore
658   /// there is no need to vectorize the cast - the same value can be used in the
659   /// vector loop for both the Phi and the cast.
660   /// If \p VectorLoopValue is a scalarized value, \p Lane is also specified,
661   /// Otherwise, \p VectorLoopValue is a widened/vectorized value.
662   ///
663   /// \p EntryVal is the value from the original loop that maps to the vector
664   /// phi node and is used to distinguish what is the IV currently being
665   /// processed - original one (if \p EntryVal is a phi corresponding to the
666   /// original IV) or the "newly-created" one based on the proof mentioned above
667   /// (see also buildScalarSteps() and createVectorIntOrFPInductionPHI()). In the
668   /// latter case \p EntryVal is a TruncInst and we must not record anything for
669   /// that IV, but it's error-prone to expect callers of this routine to care
670   /// about that, hence this explicit parameter.
671   void recordVectorLoopValueForInductionCast(
672       const InductionDescriptor &ID, const Instruction *EntryVal,
673       Value *VectorLoopValue, VPValue *CastDef, VPTransformState &State,
674       unsigned Part, unsigned Lane = UINT_MAX);
675 
676   /// Generate a shuffle sequence that will reverse the vector Vec.
677   virtual Value *reverseVector(Value *Vec);
678 
679   /// Returns (and creates if needed) the original loop trip count.
680   Value *getOrCreateTripCount(Loop *NewLoop);
681 
682   /// Returns (and creates if needed) the trip count of the widened loop.
683   Value *getOrCreateVectorTripCount(Loop *NewLoop);
684 
685   /// Returns a bitcasted value to the requested vector type.
686   /// Also handles bitcasts of vector<float> <-> vector<pointer> types.
687   Value *createBitOrPointerCast(Value *V, VectorType *DstVTy,
688                                 const DataLayout &DL);
689 
690   /// Emit a bypass check to see if the vector trip count is zero, including if
691   /// it overflows.
692   void emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass);
693 
694   /// Emit a bypass check to see if all of the SCEV assumptions we've
695   /// had to make are correct. Returns the block containing the checks or
696   /// nullptr if no checks have been added.
697   BasicBlock *emitSCEVChecks(Loop *L, BasicBlock *Bypass);
698 
699   /// Emit bypass checks to check any memory assumptions we may have made.
700   /// Returns the block containing the checks or nullptr if no checks have been
701   /// added.
702   BasicBlock *emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass);
703 
704   /// Compute the transformed value of Index at offset StartValue using step
705   /// StepValue.
706   /// For integer induction, returns StartValue + Index * StepValue.
707   /// For pointer induction, returns StartValue[Index * StepValue].
708   /// FIXME: The newly created binary instructions should contain nsw/nuw
709   /// flags, which can be found from the original scalar operations.
710   Value *emitTransformedIndex(IRBuilder<> &B, Value *Index, ScalarEvolution *SE,
711                               const DataLayout &DL,
712                               const InductionDescriptor &ID) const;
713 
714   /// Emit basic blocks (prefixed with \p Prefix) for the iteration check,
715   /// vector loop preheader, middle block and scalar preheader. Also
716   /// allocate a loop object for the new vector loop and return it.
717   Loop *createVectorLoopSkeleton(StringRef Prefix);
718 
719   /// Create new phi nodes for the induction variables to resume iteration count
720   /// in the scalar epilogue, from where the vectorized loop left off (given by
721   /// \p VectorTripCount).
722   /// In cases where the loop skeleton is more complicated (eg. epilogue
723   /// vectorization) and the resume values can come from an additional bypass
724   /// block, the \p AdditionalBypass pair provides information about the bypass
725   /// block and the end value on the edge from bypass to this loop.
726   void createInductionResumeValues(
727       Loop *L, Value *VectorTripCount,
728       std::pair<BasicBlock *, Value *> AdditionalBypass = {nullptr, nullptr});
729 
730   /// Complete the loop skeleton by adding debug MDs, creating appropriate
731   /// conditional branches in the middle block, preparing the builder and
732   /// running the verifier. Take in the vector loop \p L as argument, and return
733   /// the preheader of the completed vector loop.
734   BasicBlock *completeLoopSkeleton(Loop *L, MDNode *OrigLoopID);
735 
736   /// Add additional metadata to \p To that was not present on \p Orig.
737   ///
738   /// Currently this is used to add the noalias annotations based on the
739   /// inserted memchecks.  Use this for instructions that are *cloned* into the
740   /// vector loop.
741   void addNewMetadata(Instruction *To, const Instruction *Orig);
742 
743   /// Add metadata from one instruction to another.
744   ///
745   /// This includes both the original MDs from \p From and additional ones (\see
746   /// addNewMetadata).  Use this for *newly created* instructions in the vector
747   /// loop.
748   void addMetadata(Instruction *To, Instruction *From);
749 
750   /// Similar to the previous function but it adds the metadata to a
751   /// vector of instructions.
752   void addMetadata(ArrayRef<Value *> To, Instruction *From);
753 
754   /// Allow subclasses to override and print debug traces before/after vplan
755   /// execution, when trace information is requested.
756   virtual void printDebugTracesAtStart(){};
757   virtual void printDebugTracesAtEnd(){};
758 
759   /// The original loop.
760   Loop *OrigLoop;
761 
762   /// A wrapper around ScalarEvolution used to add runtime SCEV checks. Applies
763   /// dynamic knowledge to simplify SCEV expressions and converts them to a
764   /// more usable form.
765   PredicatedScalarEvolution &PSE;
766 
767   /// Loop Info.
768   LoopInfo *LI;
769 
770   /// Dominator Tree.
771   DominatorTree *DT;
772 
773   /// Alias Analysis.
774   AAResults *AA;
775 
776   /// Target Library Info.
777   const TargetLibraryInfo *TLI;
778 
779   /// Target Transform Info.
780   const TargetTransformInfo *TTI;
781 
782   /// Assumption Cache.
783   AssumptionCache *AC;
784 
785   /// Interface to emit optimization remarks.
786   OptimizationRemarkEmitter *ORE;
787 
788   /// LoopVersioning.  It's only set up (non-null) if memchecks were
789   /// used.
790   ///
791   /// This is currently only used to add no-alias metadata based on the
792   /// memchecks.  The actually versioning is performed manually.
793   std::unique_ptr<LoopVersioning> LVer;
794 
795   /// The vectorization SIMD factor to use. Each vector will have this many
796   /// vector elements.
797   ElementCount VF;
798 
799   /// The vectorization unroll factor to use. Each scalar is vectorized to this
800   /// many different vector instructions.
801   unsigned UF;
802 
803   /// The builder that we use
804   IRBuilder<> Builder;
805 
806   // --- Vectorization state ---
807 
808   /// The vector-loop preheader.
809   BasicBlock *LoopVectorPreHeader;
810 
811   /// The scalar-loop preheader.
812   BasicBlock *LoopScalarPreHeader;
813 
814   /// Middle Block between the vector and the scalar.
815   BasicBlock *LoopMiddleBlock;
816 
817   /// The unique ExitBlock of the scalar loop if one exists.  Note that
818   /// there can be multiple exiting edges reaching this block.
819   BasicBlock *LoopExitBlock;
820 
821   /// The vector loop body.
822   BasicBlock *LoopVectorBody;
823 
824   /// The scalar loop body.
825   BasicBlock *LoopScalarBody;
826 
827   /// A list of all bypass blocks. The first block is the entry of the loop.
828   SmallVector<BasicBlock *, 4> LoopBypassBlocks;
829 
830   /// The new Induction variable which was added to the new block.
831   PHINode *Induction = nullptr;
832 
833   /// The induction variable of the old basic block.
834   PHINode *OldInduction = nullptr;
835 
836   /// Store instructions that were predicated.
837   SmallVector<Instruction *, 4> PredicatedInstructions;
838 
839   /// Trip count of the original loop.
840   Value *TripCount = nullptr;
841 
842   /// Trip count of the widened loop (TripCount - TripCount % (VF*UF))
843   Value *VectorTripCount = nullptr;
844 
845   /// The legality analysis.
846   LoopVectorizationLegality *Legal;
847 
848   /// The profitablity analysis.
849   LoopVectorizationCostModel *Cost;
850 
851   // Record whether runtime checks are added.
852   bool AddedSafetyChecks = false;
853 
854   // Holds the end values for each induction variable. We save the end values
855   // so we can later fix-up the external users of the induction variables.
856   DenseMap<PHINode *, Value *> IVEndValues;
857 
858   // Vector of original scalar PHIs whose corresponding widened PHIs need to be
859   // fixed up at the end of vector code generation.
860   SmallVector<PHINode *, 8> OrigPHIsToFix;
861 
862   /// BFI and PSI are used to check for profile guided size optimizations.
863   BlockFrequencyInfo *BFI;
864   ProfileSummaryInfo *PSI;
865 
866   // Whether this loop should be optimized for size based on profile guided size
867   // optimizatios.
868   bool OptForSizeBasedOnProfile;
869 
870   /// Structure to hold information about generated runtime checks, responsible
871   /// for cleaning the checks, if vectorization turns out unprofitable.
872   GeneratedRTChecks &RTChecks;
873 };
874 
875 class InnerLoopUnroller : public InnerLoopVectorizer {
876 public:
877   InnerLoopUnroller(Loop *OrigLoop, PredicatedScalarEvolution &PSE,
878                     LoopInfo *LI, DominatorTree *DT,
879                     const TargetLibraryInfo *TLI,
880                     const TargetTransformInfo *TTI, AssumptionCache *AC,
881                     OptimizationRemarkEmitter *ORE, unsigned UnrollFactor,
882                     LoopVectorizationLegality *LVL,
883                     LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI,
884                     ProfileSummaryInfo *PSI, GeneratedRTChecks &Check)
885       : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE,
886                             ElementCount::getFixed(1), UnrollFactor, LVL, CM,
887                             BFI, PSI, Check) {}
888 
889 private:
890   Value *getBroadcastInstrs(Value *V) override;
891   Value *getStepVector(Value *Val, int StartIdx, Value *Step,
892                        Instruction::BinaryOps Opcode =
893                        Instruction::BinaryOpsEnd) override;
894   Value *reverseVector(Value *Vec) override;
895 };
896 
897 /// Encapsulate information regarding vectorization of a loop and its epilogue.
898 /// This information is meant to be updated and used across two stages of
899 /// epilogue vectorization.
900 struct EpilogueLoopVectorizationInfo {
901   ElementCount MainLoopVF = ElementCount::getFixed(0);
902   unsigned MainLoopUF = 0;
903   ElementCount EpilogueVF = ElementCount::getFixed(0);
904   unsigned EpilogueUF = 0;
905   BasicBlock *MainLoopIterationCountCheck = nullptr;
906   BasicBlock *EpilogueIterationCountCheck = nullptr;
907   BasicBlock *SCEVSafetyCheck = nullptr;
908   BasicBlock *MemSafetyCheck = nullptr;
909   Value *TripCount = nullptr;
910   Value *VectorTripCount = nullptr;
911 
912   EpilogueLoopVectorizationInfo(unsigned MVF, unsigned MUF, unsigned EVF,
913                                 unsigned EUF)
914       : MainLoopVF(ElementCount::getFixed(MVF)), MainLoopUF(MUF),
915         EpilogueVF(ElementCount::getFixed(EVF)), EpilogueUF(EUF) {
916     assert(EUF == 1 &&
917            "A high UF for the epilogue loop is likely not beneficial.");
918   }
919 };
920 
921 /// An extension of the inner loop vectorizer that creates a skeleton for a
922 /// vectorized loop that has its epilogue (residual) also vectorized.
923 /// The idea is to run the vplan on a given loop twice, firstly to setup the
924 /// skeleton and vectorize the main loop, and secondly to complete the skeleton
925 /// from the first step and vectorize the epilogue.  This is achieved by
926 /// deriving two concrete strategy classes from this base class and invoking
927 /// them in succession from the loop vectorizer planner.
928 class InnerLoopAndEpilogueVectorizer : public InnerLoopVectorizer {
929 public:
930   InnerLoopAndEpilogueVectorizer(
931       Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI,
932       DominatorTree *DT, const TargetLibraryInfo *TLI,
933       const TargetTransformInfo *TTI, AssumptionCache *AC,
934       OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI,
935       LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM,
936       BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI,
937       GeneratedRTChecks &Checks)
938       : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE,
939                             EPI.MainLoopVF, EPI.MainLoopUF, LVL, CM, BFI, PSI,
940                             Checks),
941         EPI(EPI) {}
942 
943   // Override this function to handle the more complex control flow around the
944   // three loops.
945   BasicBlock *createVectorizedLoopSkeleton() final override {
946     return createEpilogueVectorizedLoopSkeleton();
947   }
948 
949   /// The interface for creating a vectorized skeleton using one of two
950   /// different strategies, each corresponding to one execution of the vplan
951   /// as described above.
952   virtual BasicBlock *createEpilogueVectorizedLoopSkeleton() = 0;
953 
954   /// Holds and updates state information required to vectorize the main loop
955   /// and its epilogue in two separate passes. This setup helps us avoid
956   /// regenerating and recomputing runtime safety checks. It also helps us to
957   /// shorten the iteration-count-check path length for the cases where the
958   /// iteration count of the loop is so small that the main vector loop is
959   /// completely skipped.
960   EpilogueLoopVectorizationInfo &EPI;
961 };
962 
963 /// A specialized derived class of inner loop vectorizer that performs
964 /// vectorization of *main* loops in the process of vectorizing loops and their
965 /// epilogues.
966 class EpilogueVectorizerMainLoop : public InnerLoopAndEpilogueVectorizer {
967 public:
968   EpilogueVectorizerMainLoop(
969       Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI,
970       DominatorTree *DT, const TargetLibraryInfo *TLI,
971       const TargetTransformInfo *TTI, AssumptionCache *AC,
972       OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI,
973       LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM,
974       BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI,
975       GeneratedRTChecks &Check)
976       : InnerLoopAndEpilogueVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE,
977                                        EPI, LVL, CM, BFI, PSI, Check) {}
978   /// Implements the interface for creating a vectorized skeleton using the
979   /// *main loop* strategy (ie the first pass of vplan execution).
980   BasicBlock *createEpilogueVectorizedLoopSkeleton() final override;
981 
982 protected:
983   /// Emits an iteration count bypass check once for the main loop (when \p
984   /// ForEpilogue is false) and once for the epilogue loop (when \p
985   /// ForEpilogue is true).
986   BasicBlock *emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass,
987                                              bool ForEpilogue);
988   void printDebugTracesAtStart() override;
989   void printDebugTracesAtEnd() override;
990 };
991 
992 // A specialized derived class of inner loop vectorizer that performs
993 // vectorization of *epilogue* loops in the process of vectorizing loops and
994 // their epilogues.
995 class EpilogueVectorizerEpilogueLoop : public InnerLoopAndEpilogueVectorizer {
996 public:
997   EpilogueVectorizerEpilogueLoop(
998       Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI,
999       DominatorTree *DT, const TargetLibraryInfo *TLI,
1000       const TargetTransformInfo *TTI, AssumptionCache *AC,
1001       OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI,
1002       LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM,
1003       BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI,
1004       GeneratedRTChecks &Checks)
1005       : InnerLoopAndEpilogueVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE,
1006                                        EPI, LVL, CM, BFI, PSI, Checks) {}
1007   /// Implements the interface for creating a vectorized skeleton using the
1008   /// *epilogue loop* strategy (ie the second pass of vplan execution).
1009   BasicBlock *createEpilogueVectorizedLoopSkeleton() final override;
1010 
1011 protected:
1012   /// Emits an iteration count bypass check after the main vector loop has
1013   /// finished to see if there are any iterations left to execute by either
1014   /// the vector epilogue or the scalar epilogue.
1015   BasicBlock *emitMinimumVectorEpilogueIterCountCheck(Loop *L,
1016                                                       BasicBlock *Bypass,
1017                                                       BasicBlock *Insert);
1018   void printDebugTracesAtStart() override;
1019   void printDebugTracesAtEnd() override;
1020 };
1021 } // end namespace llvm
1022 
1023 /// Look for a meaningful debug location on the instruction or it's
1024 /// operands.
1025 static Instruction *getDebugLocFromInstOrOperands(Instruction *I) {
1026   if (!I)
1027     return I;
1028 
1029   DebugLoc Empty;
1030   if (I->getDebugLoc() != Empty)
1031     return I;
1032 
1033   for (Use &Op : I->operands()) {
1034     if (Instruction *OpInst = dyn_cast<Instruction>(Op))
1035       if (OpInst->getDebugLoc() != Empty)
1036         return OpInst;
1037   }
1038 
1039   return I;
1040 }
1041 
1042 void InnerLoopVectorizer::setDebugLocFromInst(
1043     const Value *V, Optional<IRBuilder<> *> CustomBuilder) {
1044   IRBuilder<> *B = (CustomBuilder == None) ? &Builder : *CustomBuilder;
1045   if (const Instruction *Inst = dyn_cast_or_null<Instruction>(V)) {
1046     const DILocation *DIL = Inst->getDebugLoc();
1047 
1048     // When a FSDiscriminator is enabled, we don't need to add the multiply
1049     // factors to the discriminators.
1050     if (DIL && Inst->getFunction()->isDebugInfoForProfiling() &&
1051         !isa<DbgInfoIntrinsic>(Inst) && !EnableFSDiscriminator) {
1052       // FIXME: For scalable vectors, assume vscale=1.
1053       auto NewDIL =
1054           DIL->cloneByMultiplyingDuplicationFactor(UF * VF.getKnownMinValue());
1055       if (NewDIL)
1056         B->SetCurrentDebugLocation(NewDIL.getValue());
1057       else
1058         LLVM_DEBUG(dbgs()
1059                    << "Failed to create new discriminator: "
1060                    << DIL->getFilename() << " Line: " << DIL->getLine());
1061     } else
1062       B->SetCurrentDebugLocation(DIL);
1063   } else
1064     B->SetCurrentDebugLocation(DebugLoc());
1065 }
1066 
1067 /// Write a \p DebugMsg about vectorization to the debug output stream. If \p I
1068 /// is passed, the message relates to that particular instruction.
1069 #ifndef NDEBUG
1070 static void debugVectorizationMessage(const StringRef Prefix,
1071                                       const StringRef DebugMsg,
1072                                       Instruction *I) {
1073   dbgs() << "LV: " << Prefix << DebugMsg;
1074   if (I != nullptr)
1075     dbgs() << " " << *I;
1076   else
1077     dbgs() << '.';
1078   dbgs() << '\n';
1079 }
1080 #endif
1081 
1082 /// Create an analysis remark that explains why vectorization failed
1083 ///
1084 /// \p PassName is the name of the pass (e.g. can be AlwaysPrint).  \p
1085 /// RemarkName is the identifier for the remark.  If \p I is passed it is an
1086 /// instruction that prevents vectorization.  Otherwise \p TheLoop is used for
1087 /// the location of the remark.  \return the remark object that can be
1088 /// streamed to.
1089 static OptimizationRemarkAnalysis createLVAnalysis(const char *PassName,
1090     StringRef RemarkName, Loop *TheLoop, Instruction *I) {
1091   Value *CodeRegion = TheLoop->getHeader();
1092   DebugLoc DL = TheLoop->getStartLoc();
1093 
1094   if (I) {
1095     CodeRegion = I->getParent();
1096     // If there is no debug location attached to the instruction, revert back to
1097     // using the loop's.
1098     if (I->getDebugLoc())
1099       DL = I->getDebugLoc();
1100   }
1101 
1102   return OptimizationRemarkAnalysis(PassName, RemarkName, DL, CodeRegion);
1103 }
1104 
1105 /// Return a value for Step multiplied by VF.
1106 static Value *createStepForVF(IRBuilder<> &B, Constant *Step, ElementCount VF) {
1107   assert(isa<ConstantInt>(Step) && "Expected an integer step");
1108   Constant *StepVal = ConstantInt::get(
1109       Step->getType(),
1110       cast<ConstantInt>(Step)->getSExtValue() * VF.getKnownMinValue());
1111   return VF.isScalable() ? B.CreateVScale(StepVal) : StepVal;
1112 }
1113 
1114 namespace llvm {
1115 
1116 /// Return the runtime value for VF.
1117 Value *getRuntimeVF(IRBuilder<> &B, Type *Ty, ElementCount VF) {
1118   Constant *EC = ConstantInt::get(Ty, VF.getKnownMinValue());
1119   return VF.isScalable() ? B.CreateVScale(EC) : EC;
1120 }
1121 
1122 void reportVectorizationFailure(const StringRef DebugMsg,
1123                                 const StringRef OREMsg, const StringRef ORETag,
1124                                 OptimizationRemarkEmitter *ORE, Loop *TheLoop,
1125                                 Instruction *I) {
1126   LLVM_DEBUG(debugVectorizationMessage("Not vectorizing: ", DebugMsg, I));
1127   LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE);
1128   ORE->emit(
1129       createLVAnalysis(Hints.vectorizeAnalysisPassName(), ORETag, TheLoop, I)
1130       << "loop not vectorized: " << OREMsg);
1131 }
1132 
1133 void reportVectorizationInfo(const StringRef Msg, const StringRef ORETag,
1134                              OptimizationRemarkEmitter *ORE, Loop *TheLoop,
1135                              Instruction *I) {
1136   LLVM_DEBUG(debugVectorizationMessage("", Msg, I));
1137   LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE);
1138   ORE->emit(
1139       createLVAnalysis(Hints.vectorizeAnalysisPassName(), ORETag, TheLoop, I)
1140       << Msg);
1141 }
1142 
1143 } // end namespace llvm
1144 
1145 #ifndef NDEBUG
1146 /// \return string containing a file name and a line # for the given loop.
1147 static std::string getDebugLocString(const Loop *L) {
1148   std::string Result;
1149   if (L) {
1150     raw_string_ostream OS(Result);
1151     if (const DebugLoc LoopDbgLoc = L->getStartLoc())
1152       LoopDbgLoc.print(OS);
1153     else
1154       // Just print the module name.
1155       OS << L->getHeader()->getParent()->getParent()->getModuleIdentifier();
1156     OS.flush();
1157   }
1158   return Result;
1159 }
1160 #endif
1161 
1162 void InnerLoopVectorizer::addNewMetadata(Instruction *To,
1163                                          const Instruction *Orig) {
1164   // If the loop was versioned with memchecks, add the corresponding no-alias
1165   // metadata.
1166   if (LVer && (isa<LoadInst>(Orig) || isa<StoreInst>(Orig)))
1167     LVer->annotateInstWithNoAlias(To, Orig);
1168 }
1169 
1170 void InnerLoopVectorizer::addMetadata(Instruction *To,
1171                                       Instruction *From) {
1172   propagateMetadata(To, From);
1173   addNewMetadata(To, From);
1174 }
1175 
1176 void InnerLoopVectorizer::addMetadata(ArrayRef<Value *> To,
1177                                       Instruction *From) {
1178   for (Value *V : To) {
1179     if (Instruction *I = dyn_cast<Instruction>(V))
1180       addMetadata(I, From);
1181   }
1182 }
1183 
1184 namespace llvm {
1185 
1186 // Loop vectorization cost-model hints how the scalar epilogue loop should be
1187 // lowered.
1188 enum ScalarEpilogueLowering {
1189 
1190   // The default: allowing scalar epilogues.
1191   CM_ScalarEpilogueAllowed,
1192 
1193   // Vectorization with OptForSize: don't allow epilogues.
1194   CM_ScalarEpilogueNotAllowedOptSize,
1195 
1196   // A special case of vectorisation with OptForSize: loops with a very small
1197   // trip count are considered for vectorization under OptForSize, thereby
1198   // making sure the cost of their loop body is dominant, free of runtime
1199   // guards and scalar iteration overheads.
1200   CM_ScalarEpilogueNotAllowedLowTripLoop,
1201 
1202   // Loop hint predicate indicating an epilogue is undesired.
1203   CM_ScalarEpilogueNotNeededUsePredicate,
1204 
1205   // Directive indicating we must either tail fold or not vectorize
1206   CM_ScalarEpilogueNotAllowedUsePredicate
1207 };
1208 
1209 /// ElementCountComparator creates a total ordering for ElementCount
1210 /// for the purposes of using it in a set structure.
1211 struct ElementCountComparator {
1212   bool operator()(const ElementCount &LHS, const ElementCount &RHS) const {
1213     return std::make_tuple(LHS.isScalable(), LHS.getKnownMinValue()) <
1214            std::make_tuple(RHS.isScalable(), RHS.getKnownMinValue());
1215   }
1216 };
1217 using ElementCountSet = SmallSet<ElementCount, 16, ElementCountComparator>;
1218 
1219 /// LoopVectorizationCostModel - estimates the expected speedups due to
1220 /// vectorization.
1221 /// In many cases vectorization is not profitable. This can happen because of
1222 /// a number of reasons. In this class we mainly attempt to predict the
1223 /// expected speedup/slowdowns due to the supported instruction set. We use the
1224 /// TargetTransformInfo to query the different backends for the cost of
1225 /// different operations.
1226 class LoopVectorizationCostModel {
1227 public:
1228   LoopVectorizationCostModel(ScalarEpilogueLowering SEL, Loop *L,
1229                              PredicatedScalarEvolution &PSE, LoopInfo *LI,
1230                              LoopVectorizationLegality *Legal,
1231                              const TargetTransformInfo &TTI,
1232                              const TargetLibraryInfo *TLI, DemandedBits *DB,
1233                              AssumptionCache *AC,
1234                              OptimizationRemarkEmitter *ORE, const Function *F,
1235                              const LoopVectorizeHints *Hints,
1236                              InterleavedAccessInfo &IAI)
1237       : ScalarEpilogueStatus(SEL), TheLoop(L), PSE(PSE), LI(LI), Legal(Legal),
1238         TTI(TTI), TLI(TLI), DB(DB), AC(AC), ORE(ORE), TheFunction(F),
1239         Hints(Hints), InterleaveInfo(IAI) {}
1240 
1241   /// \return An upper bound for the vectorization factors (both fixed and
1242   /// scalable). If the factors are 0, vectorization and interleaving should be
1243   /// avoided up front.
1244   FixedScalableVFPair computeMaxVF(ElementCount UserVF, unsigned UserIC);
1245 
1246   /// \return True if runtime checks are required for vectorization, and false
1247   /// otherwise.
1248   bool runtimeChecksRequired();
1249 
1250   /// \return The most profitable vectorization factor and the cost of that VF.
1251   /// This method checks every VF in \p CandidateVFs. If UserVF is not ZERO
1252   /// then this vectorization factor will be selected if vectorization is
1253   /// possible.
1254   VectorizationFactor
1255   selectVectorizationFactor(const ElementCountSet &CandidateVFs);
1256 
1257   VectorizationFactor
1258   selectEpilogueVectorizationFactor(const ElementCount MaxVF,
1259                                     const LoopVectorizationPlanner &LVP);
1260 
1261   /// Setup cost-based decisions for user vectorization factor.
1262   /// \return true if the UserVF is a feasible VF to be chosen.
1263   bool selectUserVectorizationFactor(ElementCount UserVF) {
1264     collectUniformsAndScalars(UserVF);
1265     collectInstsToScalarize(UserVF);
1266     return expectedCost(UserVF).first.isValid();
1267   }
1268 
1269   /// \return The size (in bits) of the smallest and widest types in the code
1270   /// that needs to be vectorized. We ignore values that remain scalar such as
1271   /// 64 bit loop indices.
1272   std::pair<unsigned, unsigned> getSmallestAndWidestTypes();
1273 
1274   /// \return The desired interleave count.
1275   /// If interleave count has been specified by metadata it will be returned.
1276   /// Otherwise, the interleave count is computed and returned. VF and LoopCost
1277   /// are the selected vectorization factor and the cost of the selected VF.
1278   unsigned selectInterleaveCount(ElementCount VF, unsigned LoopCost);
1279 
1280   /// Memory access instruction may be vectorized in more than one way.
1281   /// Form of instruction after vectorization depends on cost.
1282   /// This function takes cost-based decisions for Load/Store instructions
1283   /// and collects them in a map. This decisions map is used for building
1284   /// the lists of loop-uniform and loop-scalar instructions.
1285   /// The calculated cost is saved with widening decision in order to
1286   /// avoid redundant calculations.
1287   void setCostBasedWideningDecision(ElementCount VF);
1288 
1289   /// A struct that represents some properties of the register usage
1290   /// of a loop.
1291   struct RegisterUsage {
1292     /// Holds the number of loop invariant values that are used in the loop.
1293     /// The key is ClassID of target-provided register class.
1294     SmallMapVector<unsigned, unsigned, 4> LoopInvariantRegs;
1295     /// Holds the maximum number of concurrent live intervals in the loop.
1296     /// The key is ClassID of target-provided register class.
1297     SmallMapVector<unsigned, unsigned, 4> MaxLocalUsers;
1298   };
1299 
1300   /// \return Returns information about the register usages of the loop for the
1301   /// given vectorization factors.
1302   SmallVector<RegisterUsage, 8>
1303   calculateRegisterUsage(ArrayRef<ElementCount> VFs);
1304 
1305   /// Collect values we want to ignore in the cost model.
1306   void collectValuesToIgnore();
1307 
1308   /// Collect all element types in the loop for which widening is needed.
1309   void collectElementTypesForWidening();
1310 
1311   /// Split reductions into those that happen in the loop, and those that happen
1312   /// outside. In loop reductions are collected into InLoopReductionChains.
1313   void collectInLoopReductions();
1314 
1315   /// Returns true if we should use strict in-order reductions for the given
1316   /// RdxDesc. This is true if the -enable-strict-reductions flag is passed,
1317   /// the IsOrdered flag of RdxDesc is set and we do not allow reordering
1318   /// of FP operations.
1319   bool useOrderedReductions(const RecurrenceDescriptor &RdxDesc) {
1320     return !Hints->allowReordering() && RdxDesc.isOrdered();
1321   }
1322 
1323   /// \returns The smallest bitwidth each instruction can be represented with.
1324   /// The vector equivalents of these instructions should be truncated to this
1325   /// type.
1326   const MapVector<Instruction *, uint64_t> &getMinimalBitwidths() const {
1327     return MinBWs;
1328   }
1329 
1330   /// \returns True if it is more profitable to scalarize instruction \p I for
1331   /// vectorization factor \p VF.
1332   bool isProfitableToScalarize(Instruction *I, ElementCount VF) const {
1333     assert(VF.isVector() &&
1334            "Profitable to scalarize relevant only for VF > 1.");
1335 
1336     // Cost model is not run in the VPlan-native path - return conservative
1337     // result until this changes.
1338     if (EnableVPlanNativePath)
1339       return false;
1340 
1341     auto Scalars = InstsToScalarize.find(VF);
1342     assert(Scalars != InstsToScalarize.end() &&
1343            "VF not yet analyzed for scalarization profitability");
1344     return Scalars->second.find(I) != Scalars->second.end();
1345   }
1346 
1347   /// Returns true if \p I is known to be uniform after vectorization.
1348   bool isUniformAfterVectorization(Instruction *I, ElementCount VF) const {
1349     if (VF.isScalar())
1350       return true;
1351 
1352     // Cost model is not run in the VPlan-native path - return conservative
1353     // result until this changes.
1354     if (EnableVPlanNativePath)
1355       return false;
1356 
1357     auto UniformsPerVF = Uniforms.find(VF);
1358     assert(UniformsPerVF != Uniforms.end() &&
1359            "VF not yet analyzed for uniformity");
1360     return UniformsPerVF->second.count(I);
1361   }
1362 
1363   /// Returns true if \p I is known to be scalar after vectorization.
1364   bool isScalarAfterVectorization(Instruction *I, ElementCount VF) const {
1365     if (VF.isScalar())
1366       return true;
1367 
1368     // Cost model is not run in the VPlan-native path - return conservative
1369     // result until this changes.
1370     if (EnableVPlanNativePath)
1371       return false;
1372 
1373     auto ScalarsPerVF = Scalars.find(VF);
1374     assert(ScalarsPerVF != Scalars.end() &&
1375            "Scalar values are not calculated for VF");
1376     return ScalarsPerVF->second.count(I);
1377   }
1378 
1379   /// \returns True if instruction \p I can be truncated to a smaller bitwidth
1380   /// for vectorization factor \p VF.
1381   bool canTruncateToMinimalBitwidth(Instruction *I, ElementCount VF) const {
1382     return VF.isVector() && MinBWs.find(I) != MinBWs.end() &&
1383            !isProfitableToScalarize(I, VF) &&
1384            !isScalarAfterVectorization(I, VF);
1385   }
1386 
1387   /// Decision that was taken during cost calculation for memory instruction.
1388   enum InstWidening {
1389     CM_Unknown,
1390     CM_Widen,         // For consecutive accesses with stride +1.
1391     CM_Widen_Reverse, // For consecutive accesses with stride -1.
1392     CM_Interleave,
1393     CM_GatherScatter,
1394     CM_Scalarize
1395   };
1396 
1397   /// Save vectorization decision \p W and \p Cost taken by the cost model for
1398   /// instruction \p I and vector width \p VF.
1399   void setWideningDecision(Instruction *I, ElementCount VF, InstWidening W,
1400                            InstructionCost Cost) {
1401     assert(VF.isVector() && "Expected VF >=2");
1402     WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost);
1403   }
1404 
1405   /// Save vectorization decision \p W and \p Cost taken by the cost model for
1406   /// interleaving group \p Grp and vector width \p VF.
1407   void setWideningDecision(const InterleaveGroup<Instruction> *Grp,
1408                            ElementCount VF, InstWidening W,
1409                            InstructionCost Cost) {
1410     assert(VF.isVector() && "Expected VF >=2");
1411     /// Broadcast this decicion to all instructions inside the group.
1412     /// But the cost will be assigned to one instruction only.
1413     for (unsigned i = 0; i < Grp->getFactor(); ++i) {
1414       if (auto *I = Grp->getMember(i)) {
1415         if (Grp->getInsertPos() == I)
1416           WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost);
1417         else
1418           WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, 0);
1419       }
1420     }
1421   }
1422 
1423   /// Return the cost model decision for the given instruction \p I and vector
1424   /// width \p VF. Return CM_Unknown if this instruction did not pass
1425   /// through the cost modeling.
1426   InstWidening getWideningDecision(Instruction *I, ElementCount VF) const {
1427     assert(VF.isVector() && "Expected VF to be a vector VF");
1428     // Cost model is not run in the VPlan-native path - return conservative
1429     // result until this changes.
1430     if (EnableVPlanNativePath)
1431       return CM_GatherScatter;
1432 
1433     std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(I, VF);
1434     auto Itr = WideningDecisions.find(InstOnVF);
1435     if (Itr == WideningDecisions.end())
1436       return CM_Unknown;
1437     return Itr->second.first;
1438   }
1439 
1440   /// Return the vectorization cost for the given instruction \p I and vector
1441   /// width \p VF.
1442   InstructionCost getWideningCost(Instruction *I, ElementCount VF) {
1443     assert(VF.isVector() && "Expected VF >=2");
1444     std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(I, VF);
1445     assert(WideningDecisions.find(InstOnVF) != WideningDecisions.end() &&
1446            "The cost is not calculated");
1447     return WideningDecisions[InstOnVF].second;
1448   }
1449 
1450   /// Return True if instruction \p I is an optimizable truncate whose operand
1451   /// is an induction variable. Such a truncate will be removed by adding a new
1452   /// induction variable with the destination type.
1453   bool isOptimizableIVTruncate(Instruction *I, ElementCount VF) {
1454     // If the instruction is not a truncate, return false.
1455     auto *Trunc = dyn_cast<TruncInst>(I);
1456     if (!Trunc)
1457       return false;
1458 
1459     // Get the source and destination types of the truncate.
1460     Type *SrcTy = ToVectorTy(cast<CastInst>(I)->getSrcTy(), VF);
1461     Type *DestTy = ToVectorTy(cast<CastInst>(I)->getDestTy(), VF);
1462 
1463     // If the truncate is free for the given types, return false. Replacing a
1464     // free truncate with an induction variable would add an induction variable
1465     // update instruction to each iteration of the loop. We exclude from this
1466     // check the primary induction variable since it will need an update
1467     // instruction regardless.
1468     Value *Op = Trunc->getOperand(0);
1469     if (Op != Legal->getPrimaryInduction() && TTI.isTruncateFree(SrcTy, DestTy))
1470       return false;
1471 
1472     // If the truncated value is not an induction variable, return false.
1473     return Legal->isInductionPhi(Op);
1474   }
1475 
1476   /// Collects the instructions to scalarize for each predicated instruction in
1477   /// the loop.
1478   void collectInstsToScalarize(ElementCount VF);
1479 
1480   /// Collect Uniform and Scalar values for the given \p VF.
1481   /// The sets depend on CM decision for Load/Store instructions
1482   /// that may be vectorized as interleave, gather-scatter or scalarized.
1483   void collectUniformsAndScalars(ElementCount VF) {
1484     // Do the analysis once.
1485     if (VF.isScalar() || Uniforms.find(VF) != Uniforms.end())
1486       return;
1487     setCostBasedWideningDecision(VF);
1488     collectLoopUniforms(VF);
1489     collectLoopScalars(VF);
1490   }
1491 
1492   /// Returns true if the target machine supports masked store operation
1493   /// for the given \p DataType and kind of access to \p Ptr.
1494   bool isLegalMaskedStore(Type *DataType, Value *Ptr, Align Alignment) const {
1495     return Legal->isConsecutivePtr(Ptr) &&
1496            TTI.isLegalMaskedStore(DataType, Alignment);
1497   }
1498 
1499   /// Returns true if the target machine supports masked load operation
1500   /// for the given \p DataType and kind of access to \p Ptr.
1501   bool isLegalMaskedLoad(Type *DataType, Value *Ptr, Align Alignment) const {
1502     return Legal->isConsecutivePtr(Ptr) &&
1503            TTI.isLegalMaskedLoad(DataType, Alignment);
1504   }
1505 
1506   /// Returns true if the target machine can represent \p V as a masked gather
1507   /// or scatter operation.
1508   bool isLegalGatherOrScatter(Value *V) {
1509     bool LI = isa<LoadInst>(V);
1510     bool SI = isa<StoreInst>(V);
1511     if (!LI && !SI)
1512       return false;
1513     auto *Ty = getLoadStoreType(V);
1514     Align Align = getLoadStoreAlignment(V);
1515     return (LI && TTI.isLegalMaskedGather(Ty, Align)) ||
1516            (SI && TTI.isLegalMaskedScatter(Ty, Align));
1517   }
1518 
1519   /// Returns true if the target machine supports all of the reduction
1520   /// variables found for the given VF.
1521   bool canVectorizeReductions(ElementCount VF) const {
1522     return (all_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool {
1523       const RecurrenceDescriptor &RdxDesc = Reduction.second;
1524       return TTI.isLegalToVectorizeReduction(RdxDesc, VF);
1525     }));
1526   }
1527 
1528   /// Returns true if \p I is an instruction that will be scalarized with
1529   /// predication. Such instructions include conditional stores and
1530   /// instructions that may divide by zero.
1531   /// If a non-zero VF has been calculated, we check if I will be scalarized
1532   /// predication for that VF.
1533   bool isScalarWithPredication(Instruction *I) const;
1534 
1535   // Returns true if \p I is an instruction that will be predicated either
1536   // through scalar predication or masked load/store or masked gather/scatter.
1537   // Superset of instructions that return true for isScalarWithPredication.
1538   bool isPredicatedInst(Instruction *I) {
1539     if (!blockNeedsPredication(I->getParent()))
1540       return false;
1541     // Loads and stores that need some form of masked operation are predicated
1542     // instructions.
1543     if (isa<LoadInst>(I) || isa<StoreInst>(I))
1544       return Legal->isMaskRequired(I);
1545     return isScalarWithPredication(I);
1546   }
1547 
1548   /// Returns true if \p I is a memory instruction with consecutive memory
1549   /// access that can be widened.
1550   bool
1551   memoryInstructionCanBeWidened(Instruction *I,
1552                                 ElementCount VF = ElementCount::getFixed(1));
1553 
1554   /// Returns true if \p I is a memory instruction in an interleaved-group
1555   /// of memory accesses that can be vectorized with wide vector loads/stores
1556   /// and shuffles.
1557   bool
1558   interleavedAccessCanBeWidened(Instruction *I,
1559                                 ElementCount VF = ElementCount::getFixed(1));
1560 
1561   /// Check if \p Instr belongs to any interleaved access group.
1562   bool isAccessInterleaved(Instruction *Instr) {
1563     return InterleaveInfo.isInterleaved(Instr);
1564   }
1565 
1566   /// Get the interleaved access group that \p Instr belongs to.
1567   const InterleaveGroup<Instruction> *
1568   getInterleavedAccessGroup(Instruction *Instr) {
1569     return InterleaveInfo.getInterleaveGroup(Instr);
1570   }
1571 
1572   /// Returns true if we're required to use a scalar epilogue for at least
1573   /// the final iteration of the original loop.
1574   bool requiresScalarEpilogue(ElementCount VF) const {
1575     if (!isScalarEpilogueAllowed())
1576       return false;
1577     // If we might exit from anywhere but the latch, must run the exiting
1578     // iteration in scalar form.
1579     if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch())
1580       return true;
1581     return VF.isVector() && InterleaveInfo.requiresScalarEpilogue();
1582   }
1583 
1584   /// Returns true if a scalar epilogue is not allowed due to optsize or a
1585   /// loop hint annotation.
1586   bool isScalarEpilogueAllowed() const {
1587     return ScalarEpilogueStatus == CM_ScalarEpilogueAllowed;
1588   }
1589 
1590   /// Returns true if all loop blocks should be masked to fold tail loop.
1591   bool foldTailByMasking() const { return FoldTailByMasking; }
1592 
1593   bool blockNeedsPredication(BasicBlock *BB) const {
1594     return foldTailByMasking() || Legal->blockNeedsPredication(BB);
1595   }
1596 
1597   /// A SmallMapVector to store the InLoop reduction op chains, mapping phi
1598   /// nodes to the chain of instructions representing the reductions. Uses a
1599   /// MapVector to ensure deterministic iteration order.
1600   using ReductionChainMap =
1601       SmallMapVector<PHINode *, SmallVector<Instruction *, 4>, 4>;
1602 
1603   /// Return the chain of instructions representing an inloop reduction.
1604   const ReductionChainMap &getInLoopReductionChains() const {
1605     return InLoopReductionChains;
1606   }
1607 
1608   /// Returns true if the Phi is part of an inloop reduction.
1609   bool isInLoopReduction(PHINode *Phi) const {
1610     return InLoopReductionChains.count(Phi);
1611   }
1612 
1613   /// Estimate cost of an intrinsic call instruction CI if it were vectorized
1614   /// with factor VF.  Return the cost of the instruction, including
1615   /// scalarization overhead if it's needed.
1616   InstructionCost getVectorIntrinsicCost(CallInst *CI, ElementCount VF) const;
1617 
1618   /// Estimate cost of a call instruction CI if it were vectorized with factor
1619   /// VF. Return the cost of the instruction, including scalarization overhead
1620   /// if it's needed. The flag NeedToScalarize shows if the call needs to be
1621   /// scalarized -
1622   /// i.e. either vector version isn't available, or is too expensive.
1623   InstructionCost getVectorCallCost(CallInst *CI, ElementCount VF,
1624                                     bool &NeedToScalarize) const;
1625 
1626   /// Returns true if the per-lane cost of VectorizationFactor A is lower than
1627   /// that of B.
1628   bool isMoreProfitable(const VectorizationFactor &A,
1629                         const VectorizationFactor &B) const;
1630 
1631   /// Invalidates decisions already taken by the cost model.
1632   void invalidateCostModelingDecisions() {
1633     WideningDecisions.clear();
1634     Uniforms.clear();
1635     Scalars.clear();
1636   }
1637 
1638 private:
1639   unsigned NumPredStores = 0;
1640 
1641   /// \return An upper bound for the vectorization factors for both
1642   /// fixed and scalable vectorization, where the minimum-known number of
1643   /// elements is a power-of-2 larger than zero. If scalable vectorization is
1644   /// disabled or unsupported, then the scalable part will be equal to
1645   /// ElementCount::getScalable(0).
1646   FixedScalableVFPair computeFeasibleMaxVF(unsigned ConstTripCount,
1647                                            ElementCount UserVF);
1648 
1649   /// \return the maximized element count based on the targets vector
1650   /// registers and the loop trip-count, but limited to a maximum safe VF.
1651   /// This is a helper function of computeFeasibleMaxVF.
1652   /// FIXME: MaxSafeVF is currently passed by reference to avoid some obscure
1653   /// issue that occurred on one of the buildbots which cannot be reproduced
1654   /// without having access to the properietary compiler (see comments on
1655   /// D98509). The issue is currently under investigation and this workaround
1656   /// will be removed as soon as possible.
1657   ElementCount getMaximizedVFForTarget(unsigned ConstTripCount,
1658                                        unsigned SmallestType,
1659                                        unsigned WidestType,
1660                                        const ElementCount &MaxSafeVF);
1661 
1662   /// \return the maximum legal scalable VF, based on the safe max number
1663   /// of elements.
1664   ElementCount getMaxLegalScalableVF(unsigned MaxSafeElements);
1665 
1666   /// The vectorization cost is a combination of the cost itself and a boolean
1667   /// indicating whether any of the contributing operations will actually
1668   /// operate on vector values after type legalization in the backend. If this
1669   /// latter value is false, then all operations will be scalarized (i.e. no
1670   /// vectorization has actually taken place).
1671   using VectorizationCostTy = std::pair<InstructionCost, bool>;
1672 
1673   /// Returns the expected execution cost. The unit of the cost does
1674   /// not matter because we use the 'cost' units to compare different
1675   /// vector widths. The cost that is returned is *not* normalized by
1676   /// the factor width. If \p Invalid is not nullptr, this function
1677   /// will add a pair(Instruction*, ElementCount) to \p Invalid for
1678   /// each instruction that has an Invalid cost for the given VF.
1679   using InstructionVFPair = std::pair<Instruction *, ElementCount>;
1680   VectorizationCostTy
1681   expectedCost(ElementCount VF,
1682                SmallVectorImpl<InstructionVFPair> *Invalid = nullptr);
1683 
1684   /// Returns the execution time cost of an instruction for a given vector
1685   /// width. Vector width of one means scalar.
1686   VectorizationCostTy getInstructionCost(Instruction *I, ElementCount VF);
1687 
1688   /// The cost-computation logic from getInstructionCost which provides
1689   /// the vector type as an output parameter.
1690   InstructionCost getInstructionCost(Instruction *I, ElementCount VF,
1691                                      Type *&VectorTy);
1692 
1693   /// Return the cost of instructions in an inloop reduction pattern, if I is
1694   /// part of that pattern.
1695   Optional<InstructionCost>
1696   getReductionPatternCost(Instruction *I, ElementCount VF, Type *VectorTy,
1697                           TTI::TargetCostKind CostKind);
1698 
1699   /// Calculate vectorization cost of memory instruction \p I.
1700   InstructionCost getMemoryInstructionCost(Instruction *I, ElementCount VF);
1701 
1702   /// The cost computation for scalarized memory instruction.
1703   InstructionCost getMemInstScalarizationCost(Instruction *I, ElementCount VF);
1704 
1705   /// The cost computation for interleaving group of memory instructions.
1706   InstructionCost getInterleaveGroupCost(Instruction *I, ElementCount VF);
1707 
1708   /// The cost computation for Gather/Scatter instruction.
1709   InstructionCost getGatherScatterCost(Instruction *I, ElementCount VF);
1710 
1711   /// The cost computation for widening instruction \p I with consecutive
1712   /// memory access.
1713   InstructionCost getConsecutiveMemOpCost(Instruction *I, ElementCount VF);
1714 
1715   /// The cost calculation for Load/Store instruction \p I with uniform pointer -
1716   /// Load: scalar load + broadcast.
1717   /// Store: scalar store + (loop invariant value stored? 0 : extract of last
1718   /// element)
1719   InstructionCost getUniformMemOpCost(Instruction *I, ElementCount VF);
1720 
1721   /// Estimate the overhead of scalarizing an instruction. This is a
1722   /// convenience wrapper for the type-based getScalarizationOverhead API.
1723   InstructionCost getScalarizationOverhead(Instruction *I,
1724                                            ElementCount VF) const;
1725 
1726   /// Returns whether the instruction is a load or store and will be a emitted
1727   /// as a vector operation.
1728   bool isConsecutiveLoadOrStore(Instruction *I);
1729 
1730   /// Returns true if an artificially high cost for emulated masked memrefs
1731   /// should be used.
1732   bool useEmulatedMaskMemRefHack(Instruction *I);
1733 
1734   /// Map of scalar integer values to the smallest bitwidth they can be legally
1735   /// represented as. The vector equivalents of these values should be truncated
1736   /// to this type.
1737   MapVector<Instruction *, uint64_t> MinBWs;
1738 
1739   /// A type representing the costs for instructions if they were to be
1740   /// scalarized rather than vectorized. The entries are Instruction-Cost
1741   /// pairs.
1742   using ScalarCostsTy = DenseMap<Instruction *, InstructionCost>;
1743 
1744   /// A set containing all BasicBlocks that are known to present after
1745   /// vectorization as a predicated block.
1746   SmallPtrSet<BasicBlock *, 4> PredicatedBBsAfterVectorization;
1747 
1748   /// Records whether it is allowed to have the original scalar loop execute at
1749   /// least once. This may be needed as a fallback loop in case runtime
1750   /// aliasing/dependence checks fail, or to handle the tail/remainder
1751   /// iterations when the trip count is unknown or doesn't divide by the VF,
1752   /// or as a peel-loop to handle gaps in interleave-groups.
1753   /// Under optsize and when the trip count is very small we don't allow any
1754   /// iterations to execute in the scalar loop.
1755   ScalarEpilogueLowering ScalarEpilogueStatus = CM_ScalarEpilogueAllowed;
1756 
1757   /// All blocks of loop are to be masked to fold tail of scalar iterations.
1758   bool FoldTailByMasking = false;
1759 
1760   /// A map holding scalar costs for different vectorization factors. The
1761   /// presence of a cost for an instruction in the mapping indicates that the
1762   /// instruction will be scalarized when vectorizing with the associated
1763   /// vectorization factor. The entries are VF-ScalarCostTy pairs.
1764   DenseMap<ElementCount, ScalarCostsTy> InstsToScalarize;
1765 
1766   /// Holds the instructions known to be uniform after vectorization.
1767   /// The data is collected per VF.
1768   DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Uniforms;
1769 
1770   /// Holds the instructions known to be scalar after vectorization.
1771   /// The data is collected per VF.
1772   DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Scalars;
1773 
1774   /// Holds the instructions (address computations) that are forced to be
1775   /// scalarized.
1776   DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> ForcedScalars;
1777 
1778   /// PHINodes of the reductions that should be expanded in-loop along with
1779   /// their associated chains of reduction operations, in program order from top
1780   /// (PHI) to bottom
1781   ReductionChainMap InLoopReductionChains;
1782 
1783   /// A Map of inloop reduction operations and their immediate chain operand.
1784   /// FIXME: This can be removed once reductions can be costed correctly in
1785   /// vplan. This was added to allow quick lookup to the inloop operations,
1786   /// without having to loop through InLoopReductionChains.
1787   DenseMap<Instruction *, Instruction *> InLoopReductionImmediateChains;
1788 
1789   /// Returns the expected difference in cost from scalarizing the expression
1790   /// feeding a predicated instruction \p PredInst. The instructions to
1791   /// scalarize and their scalar costs are collected in \p ScalarCosts. A
1792   /// non-negative return value implies the expression will be scalarized.
1793   /// Currently, only single-use chains are considered for scalarization.
1794   int computePredInstDiscount(Instruction *PredInst, ScalarCostsTy &ScalarCosts,
1795                               ElementCount VF);
1796 
1797   /// Collect the instructions that are uniform after vectorization. An
1798   /// instruction is uniform if we represent it with a single scalar value in
1799   /// the vectorized loop corresponding to each vector iteration. Examples of
1800   /// uniform instructions include pointer operands of consecutive or
1801   /// interleaved memory accesses. Note that although uniformity implies an
1802   /// instruction will be scalar, the reverse is not true. In general, a
1803   /// scalarized instruction will be represented by VF scalar values in the
1804   /// vectorized loop, each corresponding to an iteration of the original
1805   /// scalar loop.
1806   void collectLoopUniforms(ElementCount VF);
1807 
1808   /// Collect the instructions that are scalar after vectorization. An
1809   /// instruction is scalar if it is known to be uniform or will be scalarized
1810   /// during vectorization. Non-uniform scalarized instructions will be
1811   /// represented by VF values in the vectorized loop, each corresponding to an
1812   /// iteration of the original scalar loop.
1813   void collectLoopScalars(ElementCount VF);
1814 
1815   /// Keeps cost model vectorization decision and cost for instructions.
1816   /// Right now it is used for memory instructions only.
1817   using DecisionList = DenseMap<std::pair<Instruction *, ElementCount>,
1818                                 std::pair<InstWidening, InstructionCost>>;
1819 
1820   DecisionList WideningDecisions;
1821 
1822   /// Returns true if \p V is expected to be vectorized and it needs to be
1823   /// extracted.
1824   bool needsExtract(Value *V, ElementCount VF) const {
1825     Instruction *I = dyn_cast<Instruction>(V);
1826     if (VF.isScalar() || !I || !TheLoop->contains(I) ||
1827         TheLoop->isLoopInvariant(I))
1828       return false;
1829 
1830     // Assume we can vectorize V (and hence we need extraction) if the
1831     // scalars are not computed yet. This can happen, because it is called
1832     // via getScalarizationOverhead from setCostBasedWideningDecision, before
1833     // the scalars are collected. That should be a safe assumption in most
1834     // cases, because we check if the operands have vectorizable types
1835     // beforehand in LoopVectorizationLegality.
1836     return Scalars.find(VF) == Scalars.end() ||
1837            !isScalarAfterVectorization(I, VF);
1838   };
1839 
1840   /// Returns a range containing only operands needing to be extracted.
1841   SmallVector<Value *, 4> filterExtractingOperands(Instruction::op_range Ops,
1842                                                    ElementCount VF) const {
1843     return SmallVector<Value *, 4>(make_filter_range(
1844         Ops, [this, VF](Value *V) { return this->needsExtract(V, VF); }));
1845   }
1846 
1847   /// Determines if we have the infrastructure to vectorize loop \p L and its
1848   /// epilogue, assuming the main loop is vectorized by \p VF.
1849   bool isCandidateForEpilogueVectorization(const Loop &L,
1850                                            const ElementCount VF) const;
1851 
1852   /// Returns true if epilogue vectorization is considered profitable, and
1853   /// false otherwise.
1854   /// \p VF is the vectorization factor chosen for the original loop.
1855   bool isEpilogueVectorizationProfitable(const ElementCount VF) const;
1856 
1857 public:
1858   /// The loop that we evaluate.
1859   Loop *TheLoop;
1860 
1861   /// Predicated scalar evolution analysis.
1862   PredicatedScalarEvolution &PSE;
1863 
1864   /// Loop Info analysis.
1865   LoopInfo *LI;
1866 
1867   /// Vectorization legality.
1868   LoopVectorizationLegality *Legal;
1869 
1870   /// Vector target information.
1871   const TargetTransformInfo &TTI;
1872 
1873   /// Target Library Info.
1874   const TargetLibraryInfo *TLI;
1875 
1876   /// Demanded bits analysis.
1877   DemandedBits *DB;
1878 
1879   /// Assumption cache.
1880   AssumptionCache *AC;
1881 
1882   /// Interface to emit optimization remarks.
1883   OptimizationRemarkEmitter *ORE;
1884 
1885   const Function *TheFunction;
1886 
1887   /// Loop Vectorize Hint.
1888   const LoopVectorizeHints *Hints;
1889 
1890   /// The interleave access information contains groups of interleaved accesses
1891   /// with the same stride and close to each other.
1892   InterleavedAccessInfo &InterleaveInfo;
1893 
1894   /// Values to ignore in the cost model.
1895   SmallPtrSet<const Value *, 16> ValuesToIgnore;
1896 
1897   /// Values to ignore in the cost model when VF > 1.
1898   SmallPtrSet<const Value *, 16> VecValuesToIgnore;
1899 
1900   /// All element types found in the loop.
1901   SmallPtrSet<Type *, 16> ElementTypesInLoop;
1902 
1903   /// Profitable vector factors.
1904   SmallVector<VectorizationFactor, 8> ProfitableVFs;
1905 };
1906 } // end namespace llvm
1907 
1908 /// Helper struct to manage generating runtime checks for vectorization.
1909 ///
1910 /// The runtime checks are created up-front in temporary blocks to allow better
1911 /// estimating the cost and un-linked from the existing IR. After deciding to
1912 /// vectorize, the checks are moved back. If deciding not to vectorize, the
1913 /// temporary blocks are completely removed.
1914 class GeneratedRTChecks {
1915   /// Basic block which contains the generated SCEV checks, if any.
1916   BasicBlock *SCEVCheckBlock = nullptr;
1917 
1918   /// The value representing the result of the generated SCEV checks. If it is
1919   /// nullptr, either no SCEV checks have been generated or they have been used.
1920   Value *SCEVCheckCond = nullptr;
1921 
1922   /// Basic block which contains the generated memory runtime checks, if any.
1923   BasicBlock *MemCheckBlock = nullptr;
1924 
1925   /// The value representing the result of the generated memory runtime checks.
1926   /// If it is nullptr, either no memory runtime checks have been generated or
1927   /// they have been used.
1928   Instruction *MemRuntimeCheckCond = nullptr;
1929 
1930   DominatorTree *DT;
1931   LoopInfo *LI;
1932 
1933   SCEVExpander SCEVExp;
1934   SCEVExpander MemCheckExp;
1935 
1936 public:
1937   GeneratedRTChecks(ScalarEvolution &SE, DominatorTree *DT, LoopInfo *LI,
1938                     const DataLayout &DL)
1939       : DT(DT), LI(LI), SCEVExp(SE, DL, "scev.check"),
1940         MemCheckExp(SE, DL, "scev.check") {}
1941 
1942   /// Generate runtime checks in SCEVCheckBlock and MemCheckBlock, so we can
1943   /// accurately estimate the cost of the runtime checks. The blocks are
1944   /// un-linked from the IR and is added back during vector code generation. If
1945   /// there is no vector code generation, the check blocks are removed
1946   /// completely.
1947   void Create(Loop *L, const LoopAccessInfo &LAI,
1948               const SCEVUnionPredicate &UnionPred) {
1949 
1950     BasicBlock *LoopHeader = L->getHeader();
1951     BasicBlock *Preheader = L->getLoopPreheader();
1952 
1953     // Use SplitBlock to create blocks for SCEV & memory runtime checks to
1954     // ensure the blocks are properly added to LoopInfo & DominatorTree. Those
1955     // may be used by SCEVExpander. The blocks will be un-linked from their
1956     // predecessors and removed from LI & DT at the end of the function.
1957     if (!UnionPred.isAlwaysTrue()) {
1958       SCEVCheckBlock = SplitBlock(Preheader, Preheader->getTerminator(), DT, LI,
1959                                   nullptr, "vector.scevcheck");
1960 
1961       SCEVCheckCond = SCEVExp.expandCodeForPredicate(
1962           &UnionPred, SCEVCheckBlock->getTerminator());
1963     }
1964 
1965     const auto &RtPtrChecking = *LAI.getRuntimePointerChecking();
1966     if (RtPtrChecking.Need) {
1967       auto *Pred = SCEVCheckBlock ? SCEVCheckBlock : Preheader;
1968       MemCheckBlock = SplitBlock(Pred, Pred->getTerminator(), DT, LI, nullptr,
1969                                  "vector.memcheck");
1970 
1971       std::tie(std::ignore, MemRuntimeCheckCond) =
1972           addRuntimeChecks(MemCheckBlock->getTerminator(), L,
1973                            RtPtrChecking.getChecks(), MemCheckExp);
1974       assert(MemRuntimeCheckCond &&
1975              "no RT checks generated although RtPtrChecking "
1976              "claimed checks are required");
1977     }
1978 
1979     if (!MemCheckBlock && !SCEVCheckBlock)
1980       return;
1981 
1982     // Unhook the temporary block with the checks, update various places
1983     // accordingly.
1984     if (SCEVCheckBlock)
1985       SCEVCheckBlock->replaceAllUsesWith(Preheader);
1986     if (MemCheckBlock)
1987       MemCheckBlock->replaceAllUsesWith(Preheader);
1988 
1989     if (SCEVCheckBlock) {
1990       SCEVCheckBlock->getTerminator()->moveBefore(Preheader->getTerminator());
1991       new UnreachableInst(Preheader->getContext(), SCEVCheckBlock);
1992       Preheader->getTerminator()->eraseFromParent();
1993     }
1994     if (MemCheckBlock) {
1995       MemCheckBlock->getTerminator()->moveBefore(Preheader->getTerminator());
1996       new UnreachableInst(Preheader->getContext(), MemCheckBlock);
1997       Preheader->getTerminator()->eraseFromParent();
1998     }
1999 
2000     DT->changeImmediateDominator(LoopHeader, Preheader);
2001     if (MemCheckBlock) {
2002       DT->eraseNode(MemCheckBlock);
2003       LI->removeBlock(MemCheckBlock);
2004     }
2005     if (SCEVCheckBlock) {
2006       DT->eraseNode(SCEVCheckBlock);
2007       LI->removeBlock(SCEVCheckBlock);
2008     }
2009   }
2010 
2011   /// Remove the created SCEV & memory runtime check blocks & instructions, if
2012   /// unused.
2013   ~GeneratedRTChecks() {
2014     SCEVExpanderCleaner SCEVCleaner(SCEVExp, *DT);
2015     SCEVExpanderCleaner MemCheckCleaner(MemCheckExp, *DT);
2016     if (!SCEVCheckCond)
2017       SCEVCleaner.markResultUsed();
2018 
2019     if (!MemRuntimeCheckCond)
2020       MemCheckCleaner.markResultUsed();
2021 
2022     if (MemRuntimeCheckCond) {
2023       auto &SE = *MemCheckExp.getSE();
2024       // Memory runtime check generation creates compares that use expanded
2025       // values. Remove them before running the SCEVExpanderCleaners.
2026       for (auto &I : make_early_inc_range(reverse(*MemCheckBlock))) {
2027         if (MemCheckExp.isInsertedInstruction(&I))
2028           continue;
2029         SE.forgetValue(&I);
2030         SE.eraseValueFromMap(&I);
2031         I.eraseFromParent();
2032       }
2033     }
2034     MemCheckCleaner.cleanup();
2035     SCEVCleaner.cleanup();
2036 
2037     if (SCEVCheckCond)
2038       SCEVCheckBlock->eraseFromParent();
2039     if (MemRuntimeCheckCond)
2040       MemCheckBlock->eraseFromParent();
2041   }
2042 
2043   /// Adds the generated SCEVCheckBlock before \p LoopVectorPreHeader and
2044   /// adjusts the branches to branch to the vector preheader or \p Bypass,
2045   /// depending on the generated condition.
2046   BasicBlock *emitSCEVChecks(Loop *L, BasicBlock *Bypass,
2047                              BasicBlock *LoopVectorPreHeader,
2048                              BasicBlock *LoopExitBlock) {
2049     if (!SCEVCheckCond)
2050       return nullptr;
2051     if (auto *C = dyn_cast<ConstantInt>(SCEVCheckCond))
2052       if (C->isZero())
2053         return nullptr;
2054 
2055     auto *Pred = LoopVectorPreHeader->getSinglePredecessor();
2056 
2057     BranchInst::Create(LoopVectorPreHeader, SCEVCheckBlock);
2058     // Create new preheader for vector loop.
2059     if (auto *PL = LI->getLoopFor(LoopVectorPreHeader))
2060       PL->addBasicBlockToLoop(SCEVCheckBlock, *LI);
2061 
2062     SCEVCheckBlock->getTerminator()->eraseFromParent();
2063     SCEVCheckBlock->moveBefore(LoopVectorPreHeader);
2064     Pred->getTerminator()->replaceSuccessorWith(LoopVectorPreHeader,
2065                                                 SCEVCheckBlock);
2066 
2067     DT->addNewBlock(SCEVCheckBlock, Pred);
2068     DT->changeImmediateDominator(LoopVectorPreHeader, SCEVCheckBlock);
2069 
2070     ReplaceInstWithInst(
2071         SCEVCheckBlock->getTerminator(),
2072         BranchInst::Create(Bypass, LoopVectorPreHeader, SCEVCheckCond));
2073     // Mark the check as used, to prevent it from being removed during cleanup.
2074     SCEVCheckCond = nullptr;
2075     return SCEVCheckBlock;
2076   }
2077 
2078   /// Adds the generated MemCheckBlock before \p LoopVectorPreHeader and adjusts
2079   /// the branches to branch to the vector preheader or \p Bypass, depending on
2080   /// the generated condition.
2081   BasicBlock *emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass,
2082                                    BasicBlock *LoopVectorPreHeader) {
2083     // Check if we generated code that checks in runtime if arrays overlap.
2084     if (!MemRuntimeCheckCond)
2085       return nullptr;
2086 
2087     auto *Pred = LoopVectorPreHeader->getSinglePredecessor();
2088     Pred->getTerminator()->replaceSuccessorWith(LoopVectorPreHeader,
2089                                                 MemCheckBlock);
2090 
2091     DT->addNewBlock(MemCheckBlock, Pred);
2092     DT->changeImmediateDominator(LoopVectorPreHeader, MemCheckBlock);
2093     MemCheckBlock->moveBefore(LoopVectorPreHeader);
2094 
2095     if (auto *PL = LI->getLoopFor(LoopVectorPreHeader))
2096       PL->addBasicBlockToLoop(MemCheckBlock, *LI);
2097 
2098     ReplaceInstWithInst(
2099         MemCheckBlock->getTerminator(),
2100         BranchInst::Create(Bypass, LoopVectorPreHeader, MemRuntimeCheckCond));
2101     MemCheckBlock->getTerminator()->setDebugLoc(
2102         Pred->getTerminator()->getDebugLoc());
2103 
2104     // Mark the check as used, to prevent it from being removed during cleanup.
2105     MemRuntimeCheckCond = nullptr;
2106     return MemCheckBlock;
2107   }
2108 };
2109 
2110 // Return true if \p OuterLp is an outer loop annotated with hints for explicit
2111 // vectorization. The loop needs to be annotated with #pragma omp simd
2112 // simdlen(#) or #pragma clang vectorize(enable) vectorize_width(#). If the
2113 // vector length information is not provided, vectorization is not considered
2114 // explicit. Interleave hints are not allowed either. These limitations will be
2115 // relaxed in the future.
2116 // Please, note that we are currently forced to abuse the pragma 'clang
2117 // vectorize' semantics. This pragma provides *auto-vectorization hints*
2118 // (i.e., LV must check that vectorization is legal) whereas pragma 'omp simd'
2119 // provides *explicit vectorization hints* (LV can bypass legal checks and
2120 // assume that vectorization is legal). However, both hints are implemented
2121 // using the same metadata (llvm.loop.vectorize, processed by
2122 // LoopVectorizeHints). This will be fixed in the future when the native IR
2123 // representation for pragma 'omp simd' is introduced.
2124 static bool isExplicitVecOuterLoop(Loop *OuterLp,
2125                                    OptimizationRemarkEmitter *ORE) {
2126   assert(!OuterLp->isInnermost() && "This is not an outer loop");
2127   LoopVectorizeHints Hints(OuterLp, true /*DisableInterleaving*/, *ORE);
2128 
2129   // Only outer loops with an explicit vectorization hint are supported.
2130   // Unannotated outer loops are ignored.
2131   if (Hints.getForce() == LoopVectorizeHints::FK_Undefined)
2132     return false;
2133 
2134   Function *Fn = OuterLp->getHeader()->getParent();
2135   if (!Hints.allowVectorization(Fn, OuterLp,
2136                                 true /*VectorizeOnlyWhenForced*/)) {
2137     LLVM_DEBUG(dbgs() << "LV: Loop hints prevent outer loop vectorization.\n");
2138     return false;
2139   }
2140 
2141   if (Hints.getInterleave() > 1) {
2142     // TODO: Interleave support is future work.
2143     LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Interleave is not supported for "
2144                          "outer loops.\n");
2145     Hints.emitRemarkWithHints();
2146     return false;
2147   }
2148 
2149   return true;
2150 }
2151 
2152 static void collectSupportedLoops(Loop &L, LoopInfo *LI,
2153                                   OptimizationRemarkEmitter *ORE,
2154                                   SmallVectorImpl<Loop *> &V) {
2155   // Collect inner loops and outer loops without irreducible control flow. For
2156   // now, only collect outer loops that have explicit vectorization hints. If we
2157   // are stress testing the VPlan H-CFG construction, we collect the outermost
2158   // loop of every loop nest.
2159   if (L.isInnermost() || VPlanBuildStressTest ||
2160       (EnableVPlanNativePath && isExplicitVecOuterLoop(&L, ORE))) {
2161     LoopBlocksRPO RPOT(&L);
2162     RPOT.perform(LI);
2163     if (!containsIrreducibleCFG<const BasicBlock *>(RPOT, *LI)) {
2164       V.push_back(&L);
2165       // TODO: Collect inner loops inside marked outer loops in case
2166       // vectorization fails for the outer loop. Do not invoke
2167       // 'containsIrreducibleCFG' again for inner loops when the outer loop is
2168       // already known to be reducible. We can use an inherited attribute for
2169       // that.
2170       return;
2171     }
2172   }
2173   for (Loop *InnerL : L)
2174     collectSupportedLoops(*InnerL, LI, ORE, V);
2175 }
2176 
2177 namespace {
2178 
2179 /// The LoopVectorize Pass.
2180 struct LoopVectorize : public FunctionPass {
2181   /// Pass identification, replacement for typeid
2182   static char ID;
2183 
2184   LoopVectorizePass Impl;
2185 
2186   explicit LoopVectorize(bool InterleaveOnlyWhenForced = false,
2187                          bool VectorizeOnlyWhenForced = false)
2188       : FunctionPass(ID),
2189         Impl({InterleaveOnlyWhenForced, VectorizeOnlyWhenForced}) {
2190     initializeLoopVectorizePass(*PassRegistry::getPassRegistry());
2191   }
2192 
2193   bool runOnFunction(Function &F) override {
2194     if (skipFunction(F))
2195       return false;
2196 
2197     auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
2198     auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
2199     auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
2200     auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
2201     auto *BFI = &getAnalysis<BlockFrequencyInfoWrapperPass>().getBFI();
2202     auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
2203     auto *TLI = TLIP ? &TLIP->getTLI(F) : nullptr;
2204     auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
2205     auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
2206     auto *LAA = &getAnalysis<LoopAccessLegacyAnalysis>();
2207     auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits();
2208     auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE();
2209     auto *PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI();
2210 
2211     std::function<const LoopAccessInfo &(Loop &)> GetLAA =
2212         [&](Loop &L) -> const LoopAccessInfo & { return LAA->getInfo(&L); };
2213 
2214     return Impl.runImpl(F, *SE, *LI, *TTI, *DT, *BFI, TLI, *DB, *AA, *AC,
2215                         GetLAA, *ORE, PSI).MadeAnyChange;
2216   }
2217 
2218   void getAnalysisUsage(AnalysisUsage &AU) const override {
2219     AU.addRequired<AssumptionCacheTracker>();
2220     AU.addRequired<BlockFrequencyInfoWrapperPass>();
2221     AU.addRequired<DominatorTreeWrapperPass>();
2222     AU.addRequired<LoopInfoWrapperPass>();
2223     AU.addRequired<ScalarEvolutionWrapperPass>();
2224     AU.addRequired<TargetTransformInfoWrapperPass>();
2225     AU.addRequired<AAResultsWrapperPass>();
2226     AU.addRequired<LoopAccessLegacyAnalysis>();
2227     AU.addRequired<DemandedBitsWrapperPass>();
2228     AU.addRequired<OptimizationRemarkEmitterWrapperPass>();
2229     AU.addRequired<InjectTLIMappingsLegacy>();
2230 
2231     // We currently do not preserve loopinfo/dominator analyses with outer loop
2232     // vectorization. Until this is addressed, mark these analyses as preserved
2233     // only for non-VPlan-native path.
2234     // TODO: Preserve Loop and Dominator analyses for VPlan-native path.
2235     if (!EnableVPlanNativePath) {
2236       AU.addPreserved<LoopInfoWrapperPass>();
2237       AU.addPreserved<DominatorTreeWrapperPass>();
2238     }
2239 
2240     AU.addPreserved<BasicAAWrapperPass>();
2241     AU.addPreserved<GlobalsAAWrapperPass>();
2242     AU.addRequired<ProfileSummaryInfoWrapperPass>();
2243   }
2244 };
2245 
2246 } // end anonymous namespace
2247 
2248 //===----------------------------------------------------------------------===//
2249 // Implementation of LoopVectorizationLegality, InnerLoopVectorizer and
2250 // LoopVectorizationCostModel and LoopVectorizationPlanner.
2251 //===----------------------------------------------------------------------===//
2252 
2253 Value *InnerLoopVectorizer::getBroadcastInstrs(Value *V) {
2254   // We need to place the broadcast of invariant variables outside the loop,
2255   // but only if it's proven safe to do so. Else, broadcast will be inside
2256   // vector loop body.
2257   Instruction *Instr = dyn_cast<Instruction>(V);
2258   bool SafeToHoist = OrigLoop->isLoopInvariant(V) &&
2259                      (!Instr ||
2260                       DT->dominates(Instr->getParent(), LoopVectorPreHeader));
2261   // Place the code for broadcasting invariant variables in the new preheader.
2262   IRBuilder<>::InsertPointGuard Guard(Builder);
2263   if (SafeToHoist)
2264     Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
2265 
2266   // Broadcast the scalar into all locations in the vector.
2267   Value *Shuf = Builder.CreateVectorSplat(VF, V, "broadcast");
2268 
2269   return Shuf;
2270 }
2271 
2272 void InnerLoopVectorizer::createVectorIntOrFpInductionPHI(
2273     const InductionDescriptor &II, Value *Step, Value *Start,
2274     Instruction *EntryVal, VPValue *Def, VPValue *CastDef,
2275     VPTransformState &State) {
2276   assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) &&
2277          "Expected either an induction phi-node or a truncate of it!");
2278 
2279   // Construct the initial value of the vector IV in the vector loop preheader
2280   auto CurrIP = Builder.saveIP();
2281   Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
2282   if (isa<TruncInst>(EntryVal)) {
2283     assert(Start->getType()->isIntegerTy() &&
2284            "Truncation requires an integer type");
2285     auto *TruncType = cast<IntegerType>(EntryVal->getType());
2286     Step = Builder.CreateTrunc(Step, TruncType);
2287     Start = Builder.CreateCast(Instruction::Trunc, Start, TruncType);
2288   }
2289   Value *SplatStart = Builder.CreateVectorSplat(VF, Start);
2290   Value *SteppedStart =
2291       getStepVector(SplatStart, 0, Step, II.getInductionOpcode());
2292 
2293   // We create vector phi nodes for both integer and floating-point induction
2294   // variables. Here, we determine the kind of arithmetic we will perform.
2295   Instruction::BinaryOps AddOp;
2296   Instruction::BinaryOps MulOp;
2297   if (Step->getType()->isIntegerTy()) {
2298     AddOp = Instruction::Add;
2299     MulOp = Instruction::Mul;
2300   } else {
2301     AddOp = II.getInductionOpcode();
2302     MulOp = Instruction::FMul;
2303   }
2304 
2305   // Multiply the vectorization factor by the step using integer or
2306   // floating-point arithmetic as appropriate.
2307   Type *StepType = Step->getType();
2308   if (Step->getType()->isFloatingPointTy())
2309     StepType = IntegerType::get(StepType->getContext(),
2310                                 StepType->getScalarSizeInBits());
2311   Value *RuntimeVF = getRuntimeVF(Builder, StepType, VF);
2312   if (Step->getType()->isFloatingPointTy())
2313     RuntimeVF = Builder.CreateSIToFP(RuntimeVF, Step->getType());
2314   Value *Mul = Builder.CreateBinOp(MulOp, Step, RuntimeVF);
2315 
2316   // Create a vector splat to use in the induction update.
2317   //
2318   // FIXME: If the step is non-constant, we create the vector splat with
2319   //        IRBuilder. IRBuilder can constant-fold the multiply, but it doesn't
2320   //        handle a constant vector splat.
2321   Value *SplatVF = isa<Constant>(Mul)
2322                        ? ConstantVector::getSplat(VF, cast<Constant>(Mul))
2323                        : Builder.CreateVectorSplat(VF, Mul);
2324   Builder.restoreIP(CurrIP);
2325 
2326   // We may need to add the step a number of times, depending on the unroll
2327   // factor. The last of those goes into the PHI.
2328   PHINode *VecInd = PHINode::Create(SteppedStart->getType(), 2, "vec.ind",
2329                                     &*LoopVectorBody->getFirstInsertionPt());
2330   VecInd->setDebugLoc(EntryVal->getDebugLoc());
2331   Instruction *LastInduction = VecInd;
2332   for (unsigned Part = 0; Part < UF; ++Part) {
2333     State.set(Def, LastInduction, Part);
2334 
2335     if (isa<TruncInst>(EntryVal))
2336       addMetadata(LastInduction, EntryVal);
2337     recordVectorLoopValueForInductionCast(II, EntryVal, LastInduction, CastDef,
2338                                           State, Part);
2339 
2340     LastInduction = cast<Instruction>(
2341         Builder.CreateBinOp(AddOp, LastInduction, SplatVF, "step.add"));
2342     LastInduction->setDebugLoc(EntryVal->getDebugLoc());
2343   }
2344 
2345   // Move the last step to the end of the latch block. This ensures consistent
2346   // placement of all induction updates.
2347   auto *LoopVectorLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch();
2348   auto *Br = cast<BranchInst>(LoopVectorLatch->getTerminator());
2349   auto *ICmp = cast<Instruction>(Br->getCondition());
2350   LastInduction->moveBefore(ICmp);
2351   LastInduction->setName("vec.ind.next");
2352 
2353   VecInd->addIncoming(SteppedStart, LoopVectorPreHeader);
2354   VecInd->addIncoming(LastInduction, LoopVectorLatch);
2355 }
2356 
2357 bool InnerLoopVectorizer::shouldScalarizeInstruction(Instruction *I) const {
2358   return Cost->isScalarAfterVectorization(I, VF) ||
2359          Cost->isProfitableToScalarize(I, VF);
2360 }
2361 
2362 bool InnerLoopVectorizer::needsScalarInduction(Instruction *IV) const {
2363   if (shouldScalarizeInstruction(IV))
2364     return true;
2365   auto isScalarInst = [&](User *U) -> bool {
2366     auto *I = cast<Instruction>(U);
2367     return (OrigLoop->contains(I) && shouldScalarizeInstruction(I));
2368   };
2369   return llvm::any_of(IV->users(), isScalarInst);
2370 }
2371 
2372 void InnerLoopVectorizer::recordVectorLoopValueForInductionCast(
2373     const InductionDescriptor &ID, const Instruction *EntryVal,
2374     Value *VectorLoopVal, VPValue *CastDef, VPTransformState &State,
2375     unsigned Part, unsigned Lane) {
2376   assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) &&
2377          "Expected either an induction phi-node or a truncate of it!");
2378 
2379   // This induction variable is not the phi from the original loop but the
2380   // newly-created IV based on the proof that casted Phi is equal to the
2381   // uncasted Phi in the vectorized loop (under a runtime guard possibly). It
2382   // re-uses the same InductionDescriptor that original IV uses but we don't
2383   // have to do any recording in this case - that is done when original IV is
2384   // processed.
2385   if (isa<TruncInst>(EntryVal))
2386     return;
2387 
2388   const SmallVectorImpl<Instruction *> &Casts = ID.getCastInsts();
2389   if (Casts.empty())
2390     return;
2391   // Only the first Cast instruction in the Casts vector is of interest.
2392   // The rest of the Casts (if exist) have no uses outside the
2393   // induction update chain itself.
2394   if (Lane < UINT_MAX)
2395     State.set(CastDef, VectorLoopVal, VPIteration(Part, Lane));
2396   else
2397     State.set(CastDef, VectorLoopVal, Part);
2398 }
2399 
2400 void InnerLoopVectorizer::widenIntOrFpInduction(PHINode *IV, Value *Start,
2401                                                 TruncInst *Trunc, VPValue *Def,
2402                                                 VPValue *CastDef,
2403                                                 VPTransformState &State) {
2404   assert((IV->getType()->isIntegerTy() || IV != OldInduction) &&
2405          "Primary induction variable must have an integer type");
2406 
2407   auto II = Legal->getInductionVars().find(IV);
2408   assert(II != Legal->getInductionVars().end() && "IV is not an induction");
2409 
2410   auto ID = II->second;
2411   assert(IV->getType() == ID.getStartValue()->getType() && "Types must match");
2412 
2413   // The value from the original loop to which we are mapping the new induction
2414   // variable.
2415   Instruction *EntryVal = Trunc ? cast<Instruction>(Trunc) : IV;
2416 
2417   auto &DL = OrigLoop->getHeader()->getModule()->getDataLayout();
2418 
2419   // Generate code for the induction step. Note that induction steps are
2420   // required to be loop-invariant
2421   auto CreateStepValue = [&](const SCEV *Step) -> Value * {
2422     assert(PSE.getSE()->isLoopInvariant(Step, OrigLoop) &&
2423            "Induction step should be loop invariant");
2424     if (PSE.getSE()->isSCEVable(IV->getType())) {
2425       SCEVExpander Exp(*PSE.getSE(), DL, "induction");
2426       return Exp.expandCodeFor(Step, Step->getType(),
2427                                LoopVectorPreHeader->getTerminator());
2428     }
2429     return cast<SCEVUnknown>(Step)->getValue();
2430   };
2431 
2432   // The scalar value to broadcast. This is derived from the canonical
2433   // induction variable. If a truncation type is given, truncate the canonical
2434   // induction variable and step. Otherwise, derive these values from the
2435   // induction descriptor.
2436   auto CreateScalarIV = [&](Value *&Step) -> Value * {
2437     Value *ScalarIV = Induction;
2438     if (IV != OldInduction) {
2439       ScalarIV = IV->getType()->isIntegerTy()
2440                      ? Builder.CreateSExtOrTrunc(Induction, IV->getType())
2441                      : Builder.CreateCast(Instruction::SIToFP, Induction,
2442                                           IV->getType());
2443       ScalarIV = emitTransformedIndex(Builder, ScalarIV, PSE.getSE(), DL, ID);
2444       ScalarIV->setName("offset.idx");
2445     }
2446     if (Trunc) {
2447       auto *TruncType = cast<IntegerType>(Trunc->getType());
2448       assert(Step->getType()->isIntegerTy() &&
2449              "Truncation requires an integer step");
2450       ScalarIV = Builder.CreateTrunc(ScalarIV, TruncType);
2451       Step = Builder.CreateTrunc(Step, TruncType);
2452     }
2453     return ScalarIV;
2454   };
2455 
2456   // Create the vector values from the scalar IV, in the absence of creating a
2457   // vector IV.
2458   auto CreateSplatIV = [&](Value *ScalarIV, Value *Step) {
2459     Value *Broadcasted = getBroadcastInstrs(ScalarIV);
2460     for (unsigned Part = 0; Part < UF; ++Part) {
2461       assert(!VF.isScalable() && "scalable vectors not yet supported.");
2462       Value *EntryPart =
2463           getStepVector(Broadcasted, VF.getKnownMinValue() * Part, Step,
2464                         ID.getInductionOpcode());
2465       State.set(Def, EntryPart, Part);
2466       if (Trunc)
2467         addMetadata(EntryPart, Trunc);
2468       recordVectorLoopValueForInductionCast(ID, EntryVal, EntryPart, CastDef,
2469                                             State, Part);
2470     }
2471   };
2472 
2473   // Fast-math-flags propagate from the original induction instruction.
2474   IRBuilder<>::FastMathFlagGuard FMFG(Builder);
2475   if (ID.getInductionBinOp() && isa<FPMathOperator>(ID.getInductionBinOp()))
2476     Builder.setFastMathFlags(ID.getInductionBinOp()->getFastMathFlags());
2477 
2478   // Now do the actual transformations, and start with creating the step value.
2479   Value *Step = CreateStepValue(ID.getStep());
2480   if (VF.isZero() || VF.isScalar()) {
2481     Value *ScalarIV = CreateScalarIV(Step);
2482     CreateSplatIV(ScalarIV, Step);
2483     return;
2484   }
2485 
2486   // Determine if we want a scalar version of the induction variable. This is
2487   // true if the induction variable itself is not widened, or if it has at
2488   // least one user in the loop that is not widened.
2489   auto NeedsScalarIV = needsScalarInduction(EntryVal);
2490   if (!NeedsScalarIV) {
2491     createVectorIntOrFpInductionPHI(ID, Step, Start, EntryVal, Def, CastDef,
2492                                     State);
2493     return;
2494   }
2495 
2496   // Try to create a new independent vector induction variable. If we can't
2497   // create the phi node, we will splat the scalar induction variable in each
2498   // loop iteration.
2499   if (!shouldScalarizeInstruction(EntryVal)) {
2500     createVectorIntOrFpInductionPHI(ID, Step, Start, EntryVal, Def, CastDef,
2501                                     State);
2502     Value *ScalarIV = CreateScalarIV(Step);
2503     // Create scalar steps that can be used by instructions we will later
2504     // scalarize. Note that the addition of the scalar steps will not increase
2505     // the number of instructions in the loop in the common case prior to
2506     // InstCombine. We will be trading one vector extract for each scalar step.
2507     buildScalarSteps(ScalarIV, Step, EntryVal, ID, Def, CastDef, State);
2508     return;
2509   }
2510 
2511   // All IV users are scalar instructions, so only emit a scalar IV, not a
2512   // vectorised IV. Except when we tail-fold, then the splat IV feeds the
2513   // predicate used by the masked loads/stores.
2514   Value *ScalarIV = CreateScalarIV(Step);
2515   if (!Cost->isScalarEpilogueAllowed())
2516     CreateSplatIV(ScalarIV, Step);
2517   buildScalarSteps(ScalarIV, Step, EntryVal, ID, Def, CastDef, State);
2518 }
2519 
2520 Value *InnerLoopVectorizer::getStepVector(Value *Val, int StartIdx, Value *Step,
2521                                           Instruction::BinaryOps BinOp) {
2522   // Create and check the types.
2523   auto *ValVTy = cast<VectorType>(Val->getType());
2524   ElementCount VLen = ValVTy->getElementCount();
2525 
2526   Type *STy = Val->getType()->getScalarType();
2527   assert((STy->isIntegerTy() || STy->isFloatingPointTy()) &&
2528          "Induction Step must be an integer or FP");
2529   assert(Step->getType() == STy && "Step has wrong type");
2530 
2531   SmallVector<Constant *, 8> Indices;
2532 
2533   // Create a vector of consecutive numbers from zero to VF.
2534   VectorType *InitVecValVTy = ValVTy;
2535   Type *InitVecValSTy = STy;
2536   if (STy->isFloatingPointTy()) {
2537     InitVecValSTy =
2538         IntegerType::get(STy->getContext(), STy->getScalarSizeInBits());
2539     InitVecValVTy = VectorType::get(InitVecValSTy, VLen);
2540   }
2541   Value *InitVec = Builder.CreateStepVector(InitVecValVTy);
2542 
2543   // Add on StartIdx
2544   Value *StartIdxSplat = Builder.CreateVectorSplat(
2545       VLen, ConstantInt::get(InitVecValSTy, StartIdx));
2546   InitVec = Builder.CreateAdd(InitVec, StartIdxSplat);
2547 
2548   if (STy->isIntegerTy()) {
2549     Step = Builder.CreateVectorSplat(VLen, Step);
2550     assert(Step->getType() == Val->getType() && "Invalid step vec");
2551     // FIXME: The newly created binary instructions should contain nsw/nuw flags,
2552     // which can be found from the original scalar operations.
2553     Step = Builder.CreateMul(InitVec, Step);
2554     return Builder.CreateAdd(Val, Step, "induction");
2555   }
2556 
2557   // Floating point induction.
2558   assert((BinOp == Instruction::FAdd || BinOp == Instruction::FSub) &&
2559          "Binary Opcode should be specified for FP induction");
2560   InitVec = Builder.CreateUIToFP(InitVec, ValVTy);
2561   Step = Builder.CreateVectorSplat(VLen, Step);
2562   Value *MulOp = Builder.CreateFMul(InitVec, Step);
2563   return Builder.CreateBinOp(BinOp, Val, MulOp, "induction");
2564 }
2565 
2566 void InnerLoopVectorizer::buildScalarSteps(Value *ScalarIV, Value *Step,
2567                                            Instruction *EntryVal,
2568                                            const InductionDescriptor &ID,
2569                                            VPValue *Def, VPValue *CastDef,
2570                                            VPTransformState &State) {
2571   // We shouldn't have to build scalar steps if we aren't vectorizing.
2572   assert(VF.isVector() && "VF should be greater than one");
2573   // Get the value type and ensure it and the step have the same integer type.
2574   Type *ScalarIVTy = ScalarIV->getType()->getScalarType();
2575   assert(ScalarIVTy == Step->getType() &&
2576          "Val and Step should have the same type");
2577 
2578   // We build scalar steps for both integer and floating-point induction
2579   // variables. Here, we determine the kind of arithmetic we will perform.
2580   Instruction::BinaryOps AddOp;
2581   Instruction::BinaryOps MulOp;
2582   if (ScalarIVTy->isIntegerTy()) {
2583     AddOp = Instruction::Add;
2584     MulOp = Instruction::Mul;
2585   } else {
2586     AddOp = ID.getInductionOpcode();
2587     MulOp = Instruction::FMul;
2588   }
2589 
2590   // Determine the number of scalars we need to generate for each unroll
2591   // iteration. If EntryVal is uniform, we only need to generate the first
2592   // lane. Otherwise, we generate all VF values.
2593   bool IsUniform =
2594       Cost->isUniformAfterVectorization(cast<Instruction>(EntryVal), VF);
2595   unsigned Lanes = IsUniform ? 1 : VF.getKnownMinValue();
2596   // Compute the scalar steps and save the results in State.
2597   Type *IntStepTy = IntegerType::get(ScalarIVTy->getContext(),
2598                                      ScalarIVTy->getScalarSizeInBits());
2599   Type *VecIVTy = nullptr;
2600   Value *UnitStepVec = nullptr, *SplatStep = nullptr, *SplatIV = nullptr;
2601   if (!IsUniform && VF.isScalable()) {
2602     VecIVTy = VectorType::get(ScalarIVTy, VF);
2603     UnitStepVec = Builder.CreateStepVector(VectorType::get(IntStepTy, VF));
2604     SplatStep = Builder.CreateVectorSplat(VF, Step);
2605     SplatIV = Builder.CreateVectorSplat(VF, ScalarIV);
2606   }
2607 
2608   for (unsigned Part = 0; Part < UF; ++Part) {
2609     Value *StartIdx0 =
2610         createStepForVF(Builder, ConstantInt::get(IntStepTy, Part), VF);
2611 
2612     if (!IsUniform && VF.isScalable()) {
2613       auto *SplatStartIdx = Builder.CreateVectorSplat(VF, StartIdx0);
2614       auto *InitVec = Builder.CreateAdd(SplatStartIdx, UnitStepVec);
2615       if (ScalarIVTy->isFloatingPointTy())
2616         InitVec = Builder.CreateSIToFP(InitVec, VecIVTy);
2617       auto *Mul = Builder.CreateBinOp(MulOp, InitVec, SplatStep);
2618       auto *Add = Builder.CreateBinOp(AddOp, SplatIV, Mul);
2619       State.set(Def, Add, Part);
2620       recordVectorLoopValueForInductionCast(ID, EntryVal, Add, CastDef, State,
2621                                             Part);
2622       // It's useful to record the lane values too for the known minimum number
2623       // of elements so we do those below. This improves the code quality when
2624       // trying to extract the first element, for example.
2625     }
2626 
2627     if (ScalarIVTy->isFloatingPointTy())
2628       StartIdx0 = Builder.CreateSIToFP(StartIdx0, ScalarIVTy);
2629 
2630     for (unsigned Lane = 0; Lane < Lanes; ++Lane) {
2631       Value *StartIdx = Builder.CreateBinOp(
2632           AddOp, StartIdx0, getSignedIntOrFpConstant(ScalarIVTy, Lane));
2633       // The step returned by `createStepForVF` is a runtime-evaluated value
2634       // when VF is scalable. Otherwise, it should be folded into a Constant.
2635       assert((VF.isScalable() || isa<Constant>(StartIdx)) &&
2636              "Expected StartIdx to be folded to a constant when VF is not "
2637              "scalable");
2638       auto *Mul = Builder.CreateBinOp(MulOp, StartIdx, Step);
2639       auto *Add = Builder.CreateBinOp(AddOp, ScalarIV, Mul);
2640       State.set(Def, Add, VPIteration(Part, Lane));
2641       recordVectorLoopValueForInductionCast(ID, EntryVal, Add, CastDef, State,
2642                                             Part, Lane);
2643     }
2644   }
2645 }
2646 
2647 void InnerLoopVectorizer::packScalarIntoVectorValue(VPValue *Def,
2648                                                     const VPIteration &Instance,
2649                                                     VPTransformState &State) {
2650   Value *ScalarInst = State.get(Def, Instance);
2651   Value *VectorValue = State.get(Def, Instance.Part);
2652   VectorValue = Builder.CreateInsertElement(
2653       VectorValue, ScalarInst,
2654       Instance.Lane.getAsRuntimeExpr(State.Builder, VF));
2655   State.set(Def, VectorValue, Instance.Part);
2656 }
2657 
2658 Value *InnerLoopVectorizer::reverseVector(Value *Vec) {
2659   assert(Vec->getType()->isVectorTy() && "Invalid type");
2660   return Builder.CreateVectorReverse(Vec, "reverse");
2661 }
2662 
2663 // Return whether we allow using masked interleave-groups (for dealing with
2664 // strided loads/stores that reside in predicated blocks, or for dealing
2665 // with gaps).
2666 static bool useMaskedInterleavedAccesses(const TargetTransformInfo &TTI) {
2667   // If an override option has been passed in for interleaved accesses, use it.
2668   if (EnableMaskedInterleavedMemAccesses.getNumOccurrences() > 0)
2669     return EnableMaskedInterleavedMemAccesses;
2670 
2671   return TTI.enableMaskedInterleavedAccessVectorization();
2672 }
2673 
2674 // Try to vectorize the interleave group that \p Instr belongs to.
2675 //
2676 // E.g. Translate following interleaved load group (factor = 3):
2677 //   for (i = 0; i < N; i+=3) {
2678 //     R = Pic[i];             // Member of index 0
2679 //     G = Pic[i+1];           // Member of index 1
2680 //     B = Pic[i+2];           // Member of index 2
2681 //     ... // do something to R, G, B
2682 //   }
2683 // To:
2684 //   %wide.vec = load <12 x i32>                       ; Read 4 tuples of R,G,B
2685 //   %R.vec = shuffle %wide.vec, poison, <0, 3, 6, 9>   ; R elements
2686 //   %G.vec = shuffle %wide.vec, poison, <1, 4, 7, 10>  ; G elements
2687 //   %B.vec = shuffle %wide.vec, poison, <2, 5, 8, 11>  ; B elements
2688 //
2689 // Or translate following interleaved store group (factor = 3):
2690 //   for (i = 0; i < N; i+=3) {
2691 //     ... do something to R, G, B
2692 //     Pic[i]   = R;           // Member of index 0
2693 //     Pic[i+1] = G;           // Member of index 1
2694 //     Pic[i+2] = B;           // Member of index 2
2695 //   }
2696 // To:
2697 //   %R_G.vec = shuffle %R.vec, %G.vec, <0, 1, 2, ..., 7>
2698 //   %B_U.vec = shuffle %B.vec, poison, <0, 1, 2, 3, u, u, u, u>
2699 //   %interleaved.vec = shuffle %R_G.vec, %B_U.vec,
2700 //        <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11>    ; Interleave R,G,B elements
2701 //   store <12 x i32> %interleaved.vec              ; Write 4 tuples of R,G,B
2702 void InnerLoopVectorizer::vectorizeInterleaveGroup(
2703     const InterleaveGroup<Instruction> *Group, ArrayRef<VPValue *> VPDefs,
2704     VPTransformState &State, VPValue *Addr, ArrayRef<VPValue *> StoredValues,
2705     VPValue *BlockInMask) {
2706   Instruction *Instr = Group->getInsertPos();
2707   const DataLayout &DL = Instr->getModule()->getDataLayout();
2708 
2709   // Prepare for the vector type of the interleaved load/store.
2710   Type *ScalarTy = getLoadStoreType(Instr);
2711   unsigned InterleaveFactor = Group->getFactor();
2712   assert(!VF.isScalable() && "scalable vectors not yet supported.");
2713   auto *VecTy = VectorType::get(ScalarTy, VF * InterleaveFactor);
2714 
2715   // Prepare for the new pointers.
2716   SmallVector<Value *, 2> AddrParts;
2717   unsigned Index = Group->getIndex(Instr);
2718 
2719   // TODO: extend the masked interleaved-group support to reversed access.
2720   assert((!BlockInMask || !Group->isReverse()) &&
2721          "Reversed masked interleave-group not supported.");
2722 
2723   // If the group is reverse, adjust the index to refer to the last vector lane
2724   // instead of the first. We adjust the index from the first vector lane,
2725   // rather than directly getting the pointer for lane VF - 1, because the
2726   // pointer operand of the interleaved access is supposed to be uniform. For
2727   // uniform instructions, we're only required to generate a value for the
2728   // first vector lane in each unroll iteration.
2729   if (Group->isReverse())
2730     Index += (VF.getKnownMinValue() - 1) * Group->getFactor();
2731 
2732   for (unsigned Part = 0; Part < UF; Part++) {
2733     Value *AddrPart = State.get(Addr, VPIteration(Part, 0));
2734     setDebugLocFromInst(AddrPart);
2735 
2736     // Notice current instruction could be any index. Need to adjust the address
2737     // to the member of index 0.
2738     //
2739     // E.g.  a = A[i+1];     // Member of index 1 (Current instruction)
2740     //       b = A[i];       // Member of index 0
2741     // Current pointer is pointed to A[i+1], adjust it to A[i].
2742     //
2743     // E.g.  A[i+1] = a;     // Member of index 1
2744     //       A[i]   = b;     // Member of index 0
2745     //       A[i+2] = c;     // Member of index 2 (Current instruction)
2746     // Current pointer is pointed to A[i+2], adjust it to A[i].
2747 
2748     bool InBounds = false;
2749     if (auto *gep = dyn_cast<GetElementPtrInst>(AddrPart->stripPointerCasts()))
2750       InBounds = gep->isInBounds();
2751     AddrPart = Builder.CreateGEP(ScalarTy, AddrPart, Builder.getInt32(-Index));
2752     cast<GetElementPtrInst>(AddrPart)->setIsInBounds(InBounds);
2753 
2754     // Cast to the vector pointer type.
2755     unsigned AddressSpace = AddrPart->getType()->getPointerAddressSpace();
2756     Type *PtrTy = VecTy->getPointerTo(AddressSpace);
2757     AddrParts.push_back(Builder.CreateBitCast(AddrPart, PtrTy));
2758   }
2759 
2760   setDebugLocFromInst(Instr);
2761   Value *PoisonVec = PoisonValue::get(VecTy);
2762 
2763   Value *MaskForGaps = nullptr;
2764   if (Group->requiresScalarEpilogue() && !Cost->isScalarEpilogueAllowed()) {
2765     MaskForGaps = createBitMaskForGaps(Builder, VF.getKnownMinValue(), *Group);
2766     assert(MaskForGaps && "Mask for Gaps is required but it is null");
2767   }
2768 
2769   // Vectorize the interleaved load group.
2770   if (isa<LoadInst>(Instr)) {
2771     // For each unroll part, create a wide load for the group.
2772     SmallVector<Value *, 2> NewLoads;
2773     for (unsigned Part = 0; Part < UF; Part++) {
2774       Instruction *NewLoad;
2775       if (BlockInMask || MaskForGaps) {
2776         assert(useMaskedInterleavedAccesses(*TTI) &&
2777                "masked interleaved groups are not allowed.");
2778         Value *GroupMask = MaskForGaps;
2779         if (BlockInMask) {
2780           Value *BlockInMaskPart = State.get(BlockInMask, Part);
2781           Value *ShuffledMask = Builder.CreateShuffleVector(
2782               BlockInMaskPart,
2783               createReplicatedMask(InterleaveFactor, VF.getKnownMinValue()),
2784               "interleaved.mask");
2785           GroupMask = MaskForGaps
2786                           ? Builder.CreateBinOp(Instruction::And, ShuffledMask,
2787                                                 MaskForGaps)
2788                           : ShuffledMask;
2789         }
2790         NewLoad =
2791             Builder.CreateMaskedLoad(VecTy, AddrParts[Part], Group->getAlign(),
2792                                      GroupMask, PoisonVec, "wide.masked.vec");
2793       }
2794       else
2795         NewLoad = Builder.CreateAlignedLoad(VecTy, AddrParts[Part],
2796                                             Group->getAlign(), "wide.vec");
2797       Group->addMetadata(NewLoad);
2798       NewLoads.push_back(NewLoad);
2799     }
2800 
2801     // For each member in the group, shuffle out the appropriate data from the
2802     // wide loads.
2803     unsigned J = 0;
2804     for (unsigned I = 0; I < InterleaveFactor; ++I) {
2805       Instruction *Member = Group->getMember(I);
2806 
2807       // Skip the gaps in the group.
2808       if (!Member)
2809         continue;
2810 
2811       auto StrideMask =
2812           createStrideMask(I, InterleaveFactor, VF.getKnownMinValue());
2813       for (unsigned Part = 0; Part < UF; Part++) {
2814         Value *StridedVec = Builder.CreateShuffleVector(
2815             NewLoads[Part], StrideMask, "strided.vec");
2816 
2817         // If this member has different type, cast the result type.
2818         if (Member->getType() != ScalarTy) {
2819           assert(!VF.isScalable() && "VF is assumed to be non scalable.");
2820           VectorType *OtherVTy = VectorType::get(Member->getType(), VF);
2821           StridedVec = createBitOrPointerCast(StridedVec, OtherVTy, DL);
2822         }
2823 
2824         if (Group->isReverse())
2825           StridedVec = reverseVector(StridedVec);
2826 
2827         State.set(VPDefs[J], StridedVec, Part);
2828       }
2829       ++J;
2830     }
2831     return;
2832   }
2833 
2834   // The sub vector type for current instruction.
2835   auto *SubVT = VectorType::get(ScalarTy, VF);
2836 
2837   // Vectorize the interleaved store group.
2838   MaskForGaps = createBitMaskForGaps(Builder, VF.getKnownMinValue(), *Group);
2839   assert((!MaskForGaps || useMaskedInterleavedAccesses(*TTI)) &&
2840          "masked interleaved groups are not allowed.");
2841   assert((!MaskForGaps || !VF.isScalable()) &&
2842          "masking gaps for scalable vectors is not yet supported.");
2843   for (unsigned Part = 0; Part < UF; Part++) {
2844     // Collect the stored vector from each member.
2845     SmallVector<Value *, 4> StoredVecs;
2846     for (unsigned i = 0; i < InterleaveFactor; i++) {
2847       assert((Group->getMember(i) || MaskForGaps) &&
2848              "Fail to get a member from an interleaved store group");
2849       Instruction *Member = Group->getMember(i);
2850 
2851       // Skip the gaps in the group.
2852       if (!Member) {
2853         Value *Undef = PoisonValue::get(SubVT);
2854         StoredVecs.push_back(Undef);
2855         continue;
2856       }
2857 
2858       Value *StoredVec = State.get(StoredValues[i], Part);
2859 
2860       if (Group->isReverse())
2861         StoredVec = reverseVector(StoredVec);
2862 
2863       // If this member has different type, cast it to a unified type.
2864 
2865       if (StoredVec->getType() != SubVT)
2866         StoredVec = createBitOrPointerCast(StoredVec, SubVT, DL);
2867 
2868       StoredVecs.push_back(StoredVec);
2869     }
2870 
2871     // Concatenate all vectors into a wide vector.
2872     Value *WideVec = concatenateVectors(Builder, StoredVecs);
2873 
2874     // Interleave the elements in the wide vector.
2875     Value *IVec = Builder.CreateShuffleVector(
2876         WideVec, createInterleaveMask(VF.getKnownMinValue(), InterleaveFactor),
2877         "interleaved.vec");
2878 
2879     Instruction *NewStoreInstr;
2880     if (BlockInMask || MaskForGaps) {
2881       Value *GroupMask = MaskForGaps;
2882       if (BlockInMask) {
2883         Value *BlockInMaskPart = State.get(BlockInMask, Part);
2884         Value *ShuffledMask = Builder.CreateShuffleVector(
2885             BlockInMaskPart,
2886             createReplicatedMask(InterleaveFactor, VF.getKnownMinValue()),
2887             "interleaved.mask");
2888         GroupMask = MaskForGaps ? Builder.CreateBinOp(Instruction::And,
2889                                                       ShuffledMask, MaskForGaps)
2890                                 : ShuffledMask;
2891       }
2892       NewStoreInstr = Builder.CreateMaskedStore(IVec, AddrParts[Part],
2893                                                 Group->getAlign(), GroupMask);
2894     } else
2895       NewStoreInstr =
2896           Builder.CreateAlignedStore(IVec, AddrParts[Part], Group->getAlign());
2897 
2898     Group->addMetadata(NewStoreInstr);
2899   }
2900 }
2901 
2902 void InnerLoopVectorizer::vectorizeMemoryInstruction(
2903     Instruction *Instr, VPTransformState &State, VPValue *Def, VPValue *Addr,
2904     VPValue *StoredValue, VPValue *BlockInMask) {
2905   // Attempt to issue a wide load.
2906   LoadInst *LI = dyn_cast<LoadInst>(Instr);
2907   StoreInst *SI = dyn_cast<StoreInst>(Instr);
2908 
2909   assert((LI || SI) && "Invalid Load/Store instruction");
2910   assert((!SI || StoredValue) && "No stored value provided for widened store");
2911   assert((!LI || !StoredValue) && "Stored value provided for widened load");
2912 
2913   LoopVectorizationCostModel::InstWidening Decision =
2914       Cost->getWideningDecision(Instr, VF);
2915   assert((Decision == LoopVectorizationCostModel::CM_Widen ||
2916           Decision == LoopVectorizationCostModel::CM_Widen_Reverse ||
2917           Decision == LoopVectorizationCostModel::CM_GatherScatter) &&
2918          "CM decision is not to widen the memory instruction");
2919 
2920   Type *ScalarDataTy = getLoadStoreType(Instr);
2921 
2922   auto *DataTy = VectorType::get(ScalarDataTy, VF);
2923   const Align Alignment = getLoadStoreAlignment(Instr);
2924 
2925   // Determine if the pointer operand of the access is either consecutive or
2926   // reverse consecutive.
2927   bool Reverse = (Decision == LoopVectorizationCostModel::CM_Widen_Reverse);
2928   bool ConsecutiveStride =
2929       Reverse || (Decision == LoopVectorizationCostModel::CM_Widen);
2930   bool CreateGatherScatter =
2931       (Decision == LoopVectorizationCostModel::CM_GatherScatter);
2932 
2933   // Either Ptr feeds a vector load/store, or a vector GEP should feed a vector
2934   // gather/scatter. Otherwise Decision should have been to Scalarize.
2935   assert((ConsecutiveStride || CreateGatherScatter) &&
2936          "The instruction should be scalarized");
2937   (void)ConsecutiveStride;
2938 
2939   VectorParts BlockInMaskParts(UF);
2940   bool isMaskRequired = BlockInMask;
2941   if (isMaskRequired)
2942     for (unsigned Part = 0; Part < UF; ++Part)
2943       BlockInMaskParts[Part] = State.get(BlockInMask, Part);
2944 
2945   const auto CreateVecPtr = [&](unsigned Part, Value *Ptr) -> Value * {
2946     // Calculate the pointer for the specific unroll-part.
2947     GetElementPtrInst *PartPtr = nullptr;
2948 
2949     bool InBounds = false;
2950     if (auto *gep = dyn_cast<GetElementPtrInst>(Ptr->stripPointerCasts()))
2951       InBounds = gep->isInBounds();
2952     if (Reverse) {
2953       // If the address is consecutive but reversed, then the
2954       // wide store needs to start at the last vector element.
2955       // RunTimeVF =  VScale * VF.getKnownMinValue()
2956       // For fixed-width VScale is 1, then RunTimeVF = VF.getKnownMinValue()
2957       Value *RunTimeVF = getRuntimeVF(Builder, Builder.getInt32Ty(), VF);
2958       // NumElt = -Part * RunTimeVF
2959       Value *NumElt = Builder.CreateMul(Builder.getInt32(-Part), RunTimeVF);
2960       // LastLane = 1 - RunTimeVF
2961       Value *LastLane = Builder.CreateSub(Builder.getInt32(1), RunTimeVF);
2962       PartPtr =
2963           cast<GetElementPtrInst>(Builder.CreateGEP(ScalarDataTy, Ptr, NumElt));
2964       PartPtr->setIsInBounds(InBounds);
2965       PartPtr = cast<GetElementPtrInst>(
2966           Builder.CreateGEP(ScalarDataTy, PartPtr, LastLane));
2967       PartPtr->setIsInBounds(InBounds);
2968       if (isMaskRequired) // Reverse of a null all-one mask is a null mask.
2969         BlockInMaskParts[Part] = reverseVector(BlockInMaskParts[Part]);
2970     } else {
2971       Value *Increment = createStepForVF(Builder, Builder.getInt32(Part), VF);
2972       PartPtr = cast<GetElementPtrInst>(
2973           Builder.CreateGEP(ScalarDataTy, Ptr, Increment));
2974       PartPtr->setIsInBounds(InBounds);
2975     }
2976 
2977     unsigned AddressSpace = Ptr->getType()->getPointerAddressSpace();
2978     return Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace));
2979   };
2980 
2981   // Handle Stores:
2982   if (SI) {
2983     setDebugLocFromInst(SI);
2984 
2985     for (unsigned Part = 0; Part < UF; ++Part) {
2986       Instruction *NewSI = nullptr;
2987       Value *StoredVal = State.get(StoredValue, Part);
2988       if (CreateGatherScatter) {
2989         Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr;
2990         Value *VectorGep = State.get(Addr, Part);
2991         NewSI = Builder.CreateMaskedScatter(StoredVal, VectorGep, Alignment,
2992                                             MaskPart);
2993       } else {
2994         if (Reverse) {
2995           // If we store to reverse consecutive memory locations, then we need
2996           // to reverse the order of elements in the stored value.
2997           StoredVal = reverseVector(StoredVal);
2998           // We don't want to update the value in the map as it might be used in
2999           // another expression. So don't call resetVectorValue(StoredVal).
3000         }
3001         auto *VecPtr = CreateVecPtr(Part, State.get(Addr, VPIteration(0, 0)));
3002         if (isMaskRequired)
3003           NewSI = Builder.CreateMaskedStore(StoredVal, VecPtr, Alignment,
3004                                             BlockInMaskParts[Part]);
3005         else
3006           NewSI = Builder.CreateAlignedStore(StoredVal, VecPtr, Alignment);
3007       }
3008       addMetadata(NewSI, SI);
3009     }
3010     return;
3011   }
3012 
3013   // Handle loads.
3014   assert(LI && "Must have a load instruction");
3015   setDebugLocFromInst(LI);
3016   for (unsigned Part = 0; Part < UF; ++Part) {
3017     Value *NewLI;
3018     if (CreateGatherScatter) {
3019       Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr;
3020       Value *VectorGep = State.get(Addr, Part);
3021       NewLI = Builder.CreateMaskedGather(DataTy, VectorGep, Alignment, MaskPart,
3022                                          nullptr, "wide.masked.gather");
3023       addMetadata(NewLI, LI);
3024     } else {
3025       auto *VecPtr = CreateVecPtr(Part, State.get(Addr, VPIteration(0, 0)));
3026       if (isMaskRequired)
3027         NewLI = Builder.CreateMaskedLoad(
3028             DataTy, VecPtr, Alignment, BlockInMaskParts[Part],
3029             PoisonValue::get(DataTy), "wide.masked.load");
3030       else
3031         NewLI =
3032             Builder.CreateAlignedLoad(DataTy, VecPtr, Alignment, "wide.load");
3033 
3034       // Add metadata to the load, but setVectorValue to the reverse shuffle.
3035       addMetadata(NewLI, LI);
3036       if (Reverse)
3037         NewLI = reverseVector(NewLI);
3038     }
3039 
3040     State.set(Def, NewLI, Part);
3041   }
3042 }
3043 
3044 void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr, VPValue *Def,
3045                                                VPUser &User,
3046                                                const VPIteration &Instance,
3047                                                bool IfPredicateInstr,
3048                                                VPTransformState &State) {
3049   assert(!Instr->getType()->isAggregateType() && "Can't handle vectors");
3050 
3051   // llvm.experimental.noalias.scope.decl intrinsics must only be duplicated for
3052   // the first lane and part.
3053   if (isa<NoAliasScopeDeclInst>(Instr))
3054     if (!Instance.isFirstIteration())
3055       return;
3056 
3057   setDebugLocFromInst(Instr);
3058 
3059   // Does this instruction return a value ?
3060   bool IsVoidRetTy = Instr->getType()->isVoidTy();
3061 
3062   Instruction *Cloned = Instr->clone();
3063   if (!IsVoidRetTy)
3064     Cloned->setName(Instr->getName() + ".cloned");
3065 
3066   State.Builder.SetInsertPoint(Builder.GetInsertBlock(),
3067                                Builder.GetInsertPoint());
3068   // Replace the operands of the cloned instructions with their scalar
3069   // equivalents in the new loop.
3070   for (unsigned op = 0, e = User.getNumOperands(); op != e; ++op) {
3071     auto *Operand = dyn_cast<Instruction>(Instr->getOperand(op));
3072     auto InputInstance = Instance;
3073     if (!Operand || !OrigLoop->contains(Operand) ||
3074         (Cost->isUniformAfterVectorization(Operand, State.VF)))
3075       InputInstance.Lane = VPLane::getFirstLane();
3076     auto *NewOp = State.get(User.getOperand(op), InputInstance);
3077     Cloned->setOperand(op, NewOp);
3078   }
3079   addNewMetadata(Cloned, Instr);
3080 
3081   // Place the cloned scalar in the new loop.
3082   Builder.Insert(Cloned);
3083 
3084   State.set(Def, Cloned, Instance);
3085 
3086   // If we just cloned a new assumption, add it the assumption cache.
3087   if (auto *II = dyn_cast<AssumeInst>(Cloned))
3088     AC->registerAssumption(II);
3089 
3090   // End if-block.
3091   if (IfPredicateInstr)
3092     PredicatedInstructions.push_back(Cloned);
3093 }
3094 
3095 PHINode *InnerLoopVectorizer::createInductionVariable(Loop *L, Value *Start,
3096                                                       Value *End, Value *Step,
3097                                                       Instruction *DL) {
3098   BasicBlock *Header = L->getHeader();
3099   BasicBlock *Latch = L->getLoopLatch();
3100   // As we're just creating this loop, it's possible no latch exists
3101   // yet. If so, use the header as this will be a single block loop.
3102   if (!Latch)
3103     Latch = Header;
3104 
3105   IRBuilder<> B(&*Header->getFirstInsertionPt());
3106   Instruction *OldInst = getDebugLocFromInstOrOperands(OldInduction);
3107   setDebugLocFromInst(OldInst, &B);
3108   auto *Induction = B.CreatePHI(Start->getType(), 2, "index");
3109 
3110   B.SetInsertPoint(Latch->getTerminator());
3111   setDebugLocFromInst(OldInst, &B);
3112 
3113   // Create i+1 and fill the PHINode.
3114   //
3115   // If the tail is not folded, we know that End - Start >= Step (either
3116   // statically or through the minimum iteration checks). We also know that both
3117   // Start % Step == 0 and End % Step == 0. We exit the vector loop if %IV +
3118   // %Step == %End. Hence we must exit the loop before %IV + %Step unsigned
3119   // overflows and we can mark the induction increment as NUW.
3120   Value *Next = B.CreateAdd(Induction, Step, "index.next",
3121                             /*NUW=*/!Cost->foldTailByMasking(), /*NSW=*/false);
3122   Induction->addIncoming(Start, L->getLoopPreheader());
3123   Induction->addIncoming(Next, Latch);
3124   // Create the compare.
3125   Value *ICmp = B.CreateICmpEQ(Next, End);
3126   B.CreateCondBr(ICmp, L->getUniqueExitBlock(), Header);
3127 
3128   // Now we have two terminators. Remove the old one from the block.
3129   Latch->getTerminator()->eraseFromParent();
3130 
3131   return Induction;
3132 }
3133 
3134 Value *InnerLoopVectorizer::getOrCreateTripCount(Loop *L) {
3135   if (TripCount)
3136     return TripCount;
3137 
3138   assert(L && "Create Trip Count for null loop.");
3139   IRBuilder<> Builder(L->getLoopPreheader()->getTerminator());
3140   // Find the loop boundaries.
3141   ScalarEvolution *SE = PSE.getSE();
3142   const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount();
3143   assert(!isa<SCEVCouldNotCompute>(BackedgeTakenCount) &&
3144          "Invalid loop count");
3145 
3146   Type *IdxTy = Legal->getWidestInductionType();
3147   assert(IdxTy && "No type for induction");
3148 
3149   // The exit count might have the type of i64 while the phi is i32. This can
3150   // happen if we have an induction variable that is sign extended before the
3151   // compare. The only way that we get a backedge taken count is that the
3152   // induction variable was signed and as such will not overflow. In such a case
3153   // truncation is legal.
3154   if (SE->getTypeSizeInBits(BackedgeTakenCount->getType()) >
3155       IdxTy->getPrimitiveSizeInBits())
3156     BackedgeTakenCount = SE->getTruncateOrNoop(BackedgeTakenCount, IdxTy);
3157   BackedgeTakenCount = SE->getNoopOrZeroExtend(BackedgeTakenCount, IdxTy);
3158 
3159   // Get the total trip count from the count by adding 1.
3160   const SCEV *ExitCount = SE->getAddExpr(
3161       BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType()));
3162 
3163   const DataLayout &DL = L->getHeader()->getModule()->getDataLayout();
3164 
3165   // Expand the trip count and place the new instructions in the preheader.
3166   // Notice that the pre-header does not change, only the loop body.
3167   SCEVExpander Exp(*SE, DL, "induction");
3168 
3169   // Count holds the overall loop count (N).
3170   TripCount = Exp.expandCodeFor(ExitCount, ExitCount->getType(),
3171                                 L->getLoopPreheader()->getTerminator());
3172 
3173   if (TripCount->getType()->isPointerTy())
3174     TripCount =
3175         CastInst::CreatePointerCast(TripCount, IdxTy, "exitcount.ptrcnt.to.int",
3176                                     L->getLoopPreheader()->getTerminator());
3177 
3178   return TripCount;
3179 }
3180 
3181 Value *InnerLoopVectorizer::getOrCreateVectorTripCount(Loop *L) {
3182   if (VectorTripCount)
3183     return VectorTripCount;
3184 
3185   Value *TC = getOrCreateTripCount(L);
3186   IRBuilder<> Builder(L->getLoopPreheader()->getTerminator());
3187 
3188   Type *Ty = TC->getType();
3189   // This is where we can make the step a runtime constant.
3190   Value *Step = createStepForVF(Builder, ConstantInt::get(Ty, UF), VF);
3191 
3192   // If the tail is to be folded by masking, round the number of iterations N
3193   // up to a multiple of Step instead of rounding down. This is done by first
3194   // adding Step-1 and then rounding down. Note that it's ok if this addition
3195   // overflows: the vector induction variable will eventually wrap to zero given
3196   // that it starts at zero and its Step is a power of two; the loop will then
3197   // exit, with the last early-exit vector comparison also producing all-true.
3198   if (Cost->foldTailByMasking()) {
3199     assert(isPowerOf2_32(VF.getKnownMinValue() * UF) &&
3200            "VF*UF must be a power of 2 when folding tail by masking");
3201     assert(!VF.isScalable() &&
3202            "Tail folding not yet supported for scalable vectors");
3203     TC = Builder.CreateAdd(
3204         TC, ConstantInt::get(Ty, VF.getKnownMinValue() * UF - 1), "n.rnd.up");
3205   }
3206 
3207   // Now we need to generate the expression for the part of the loop that the
3208   // vectorized body will execute. This is equal to N - (N % Step) if scalar
3209   // iterations are not required for correctness, or N - Step, otherwise. Step
3210   // is equal to the vectorization factor (number of SIMD elements) times the
3211   // unroll factor (number of SIMD instructions).
3212   Value *R = Builder.CreateURem(TC, Step, "n.mod.vf");
3213 
3214   // There are cases where we *must* run at least one iteration in the remainder
3215   // loop.  See the cost model for when this can happen.  If the step evenly
3216   // divides the trip count, we set the remainder to be equal to the step. If
3217   // the step does not evenly divide the trip count, no adjustment is necessary
3218   // since there will already be scalar iterations. Note that the minimum
3219   // iterations check ensures that N >= Step.
3220   if (Cost->requiresScalarEpilogue(VF)) {
3221     auto *IsZero = Builder.CreateICmpEQ(R, ConstantInt::get(R->getType(), 0));
3222     R = Builder.CreateSelect(IsZero, Step, R);
3223   }
3224 
3225   VectorTripCount = Builder.CreateSub(TC, R, "n.vec");
3226 
3227   return VectorTripCount;
3228 }
3229 
3230 Value *InnerLoopVectorizer::createBitOrPointerCast(Value *V, VectorType *DstVTy,
3231                                                    const DataLayout &DL) {
3232   // Verify that V is a vector type with same number of elements as DstVTy.
3233   auto *DstFVTy = cast<FixedVectorType>(DstVTy);
3234   unsigned VF = DstFVTy->getNumElements();
3235   auto *SrcVecTy = cast<FixedVectorType>(V->getType());
3236   assert((VF == SrcVecTy->getNumElements()) && "Vector dimensions do not match");
3237   Type *SrcElemTy = SrcVecTy->getElementType();
3238   Type *DstElemTy = DstFVTy->getElementType();
3239   assert((DL.getTypeSizeInBits(SrcElemTy) == DL.getTypeSizeInBits(DstElemTy)) &&
3240          "Vector elements must have same size");
3241 
3242   // Do a direct cast if element types are castable.
3243   if (CastInst::isBitOrNoopPointerCastable(SrcElemTy, DstElemTy, DL)) {
3244     return Builder.CreateBitOrPointerCast(V, DstFVTy);
3245   }
3246   // V cannot be directly casted to desired vector type.
3247   // May happen when V is a floating point vector but DstVTy is a vector of
3248   // pointers or vice-versa. Handle this using a two-step bitcast using an
3249   // intermediate Integer type for the bitcast i.e. Ptr <-> Int <-> Float.
3250   assert((DstElemTy->isPointerTy() != SrcElemTy->isPointerTy()) &&
3251          "Only one type should be a pointer type");
3252   assert((DstElemTy->isFloatingPointTy() != SrcElemTy->isFloatingPointTy()) &&
3253          "Only one type should be a floating point type");
3254   Type *IntTy =
3255       IntegerType::getIntNTy(V->getContext(), DL.getTypeSizeInBits(SrcElemTy));
3256   auto *VecIntTy = FixedVectorType::get(IntTy, VF);
3257   Value *CastVal = Builder.CreateBitOrPointerCast(V, VecIntTy);
3258   return Builder.CreateBitOrPointerCast(CastVal, DstFVTy);
3259 }
3260 
3261 void InnerLoopVectorizer::emitMinimumIterationCountCheck(Loop *L,
3262                                                          BasicBlock *Bypass) {
3263   Value *Count = getOrCreateTripCount(L);
3264   // Reuse existing vector loop preheader for TC checks.
3265   // Note that new preheader block is generated for vector loop.
3266   BasicBlock *const TCCheckBlock = LoopVectorPreHeader;
3267   IRBuilder<> Builder(TCCheckBlock->getTerminator());
3268 
3269   // Generate code to check if the loop's trip count is less than VF * UF, or
3270   // equal to it in case a scalar epilogue is required; this implies that the
3271   // vector trip count is zero. This check also covers the case where adding one
3272   // to the backedge-taken count overflowed leading to an incorrect trip count
3273   // of zero. In this case we will also jump to the scalar loop.
3274   auto P = Cost->requiresScalarEpilogue(VF) ? ICmpInst::ICMP_ULE
3275                                             : ICmpInst::ICMP_ULT;
3276 
3277   // If tail is to be folded, vector loop takes care of all iterations.
3278   Value *CheckMinIters = Builder.getFalse();
3279   if (!Cost->foldTailByMasking()) {
3280     Value *Step =
3281         createStepForVF(Builder, ConstantInt::get(Count->getType(), UF), VF);
3282     CheckMinIters = Builder.CreateICmp(P, Count, Step, "min.iters.check");
3283   }
3284   // Create new preheader for vector loop.
3285   LoopVectorPreHeader =
3286       SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(), DT, LI, nullptr,
3287                  "vector.ph");
3288 
3289   assert(DT->properlyDominates(DT->getNode(TCCheckBlock),
3290                                DT->getNode(Bypass)->getIDom()) &&
3291          "TC check is expected to dominate Bypass");
3292 
3293   // Update dominator for Bypass & LoopExit (if needed).
3294   DT->changeImmediateDominator(Bypass, TCCheckBlock);
3295   if (!Cost->requiresScalarEpilogue(VF))
3296     // If there is an epilogue which must run, there's no edge from the
3297     // middle block to exit blocks  and thus no need to update the immediate
3298     // dominator of the exit blocks.
3299     DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock);
3300 
3301   ReplaceInstWithInst(
3302       TCCheckBlock->getTerminator(),
3303       BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters));
3304   LoopBypassBlocks.push_back(TCCheckBlock);
3305 }
3306 
3307 BasicBlock *InnerLoopVectorizer::emitSCEVChecks(Loop *L, BasicBlock *Bypass) {
3308 
3309   BasicBlock *const SCEVCheckBlock =
3310       RTChecks.emitSCEVChecks(L, Bypass, LoopVectorPreHeader, LoopExitBlock);
3311   if (!SCEVCheckBlock)
3312     return nullptr;
3313 
3314   assert(!(SCEVCheckBlock->getParent()->hasOptSize() ||
3315            (OptForSizeBasedOnProfile &&
3316             Cost->Hints->getForce() != LoopVectorizeHints::FK_Enabled)) &&
3317          "Cannot SCEV check stride or overflow when optimizing for size");
3318 
3319 
3320   // Update dominator only if this is first RT check.
3321   if (LoopBypassBlocks.empty()) {
3322     DT->changeImmediateDominator(Bypass, SCEVCheckBlock);
3323     if (!Cost->requiresScalarEpilogue(VF))
3324       // If there is an epilogue which must run, there's no edge from the
3325       // middle block to exit blocks  and thus no need to update the immediate
3326       // dominator of the exit blocks.
3327       DT->changeImmediateDominator(LoopExitBlock, SCEVCheckBlock);
3328   }
3329 
3330   LoopBypassBlocks.push_back(SCEVCheckBlock);
3331   AddedSafetyChecks = true;
3332   return SCEVCheckBlock;
3333 }
3334 
3335 BasicBlock *InnerLoopVectorizer::emitMemRuntimeChecks(Loop *L,
3336                                                       BasicBlock *Bypass) {
3337   // VPlan-native path does not do any analysis for runtime checks currently.
3338   if (EnableVPlanNativePath)
3339     return nullptr;
3340 
3341   BasicBlock *const MemCheckBlock =
3342       RTChecks.emitMemRuntimeChecks(L, Bypass, LoopVectorPreHeader);
3343 
3344   // Check if we generated code that checks in runtime if arrays overlap. We put
3345   // the checks into a separate block to make the more common case of few
3346   // elements faster.
3347   if (!MemCheckBlock)
3348     return nullptr;
3349 
3350   if (MemCheckBlock->getParent()->hasOptSize() || OptForSizeBasedOnProfile) {
3351     assert(Cost->Hints->getForce() == LoopVectorizeHints::FK_Enabled &&
3352            "Cannot emit memory checks when optimizing for size, unless forced "
3353            "to vectorize.");
3354     ORE->emit([&]() {
3355       return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationCodeSize",
3356                                         L->getStartLoc(), L->getHeader())
3357              << "Code-size may be reduced by not forcing "
3358                 "vectorization, or by source-code modifications "
3359                 "eliminating the need for runtime checks "
3360                 "(e.g., adding 'restrict').";
3361     });
3362   }
3363 
3364   LoopBypassBlocks.push_back(MemCheckBlock);
3365 
3366   AddedSafetyChecks = true;
3367 
3368   // We currently don't use LoopVersioning for the actual loop cloning but we
3369   // still use it to add the noalias metadata.
3370   LVer = std::make_unique<LoopVersioning>(
3371       *Legal->getLAI(),
3372       Legal->getLAI()->getRuntimePointerChecking()->getChecks(), OrigLoop, LI,
3373       DT, PSE.getSE());
3374   LVer->prepareNoAliasMetadata();
3375   return MemCheckBlock;
3376 }
3377 
3378 Value *InnerLoopVectorizer::emitTransformedIndex(
3379     IRBuilder<> &B, Value *Index, ScalarEvolution *SE, const DataLayout &DL,
3380     const InductionDescriptor &ID) const {
3381 
3382   SCEVExpander Exp(*SE, DL, "induction");
3383   auto Step = ID.getStep();
3384   auto StartValue = ID.getStartValue();
3385   assert(Index->getType()->getScalarType() == Step->getType() &&
3386          "Index scalar type does not match StepValue type");
3387 
3388   // Note: the IR at this point is broken. We cannot use SE to create any new
3389   // SCEV and then expand it, hoping that SCEV's simplification will give us
3390   // a more optimal code. Unfortunately, attempt of doing so on invalid IR may
3391   // lead to various SCEV crashes. So all we can do is to use builder and rely
3392   // on InstCombine for future simplifications. Here we handle some trivial
3393   // cases only.
3394   auto CreateAdd = [&B](Value *X, Value *Y) {
3395     assert(X->getType() == Y->getType() && "Types don't match!");
3396     if (auto *CX = dyn_cast<ConstantInt>(X))
3397       if (CX->isZero())
3398         return Y;
3399     if (auto *CY = dyn_cast<ConstantInt>(Y))
3400       if (CY->isZero())
3401         return X;
3402     return B.CreateAdd(X, Y);
3403   };
3404 
3405   // We allow X to be a vector type, in which case Y will potentially be
3406   // splatted into a vector with the same element count.
3407   auto CreateMul = [&B](Value *X, Value *Y) {
3408     assert(X->getType()->getScalarType() == Y->getType() &&
3409            "Types don't match!");
3410     if (auto *CX = dyn_cast<ConstantInt>(X))
3411       if (CX->isOne())
3412         return Y;
3413     if (auto *CY = dyn_cast<ConstantInt>(Y))
3414       if (CY->isOne())
3415         return X;
3416     VectorType *XVTy = dyn_cast<VectorType>(X->getType());
3417     if (XVTy && !isa<VectorType>(Y->getType()))
3418       Y = B.CreateVectorSplat(XVTy->getElementCount(), Y);
3419     return B.CreateMul(X, Y);
3420   };
3421 
3422   // Get a suitable insert point for SCEV expansion. For blocks in the vector
3423   // loop, choose the end of the vector loop header (=LoopVectorBody), because
3424   // the DomTree is not kept up-to-date for additional blocks generated in the
3425   // vector loop. By using the header as insertion point, we guarantee that the
3426   // expanded instructions dominate all their uses.
3427   auto GetInsertPoint = [this, &B]() {
3428     BasicBlock *InsertBB = B.GetInsertPoint()->getParent();
3429     if (InsertBB != LoopVectorBody &&
3430         LI->getLoopFor(LoopVectorBody) == LI->getLoopFor(InsertBB))
3431       return LoopVectorBody->getTerminator();
3432     return &*B.GetInsertPoint();
3433   };
3434 
3435   switch (ID.getKind()) {
3436   case InductionDescriptor::IK_IntInduction: {
3437     assert(!isa<VectorType>(Index->getType()) &&
3438            "Vector indices not supported for integer inductions yet");
3439     assert(Index->getType() == StartValue->getType() &&
3440            "Index type does not match StartValue type");
3441     if (ID.getConstIntStepValue() && ID.getConstIntStepValue()->isMinusOne())
3442       return B.CreateSub(StartValue, Index);
3443     auto *Offset = CreateMul(
3444         Index, Exp.expandCodeFor(Step, Index->getType(), GetInsertPoint()));
3445     return CreateAdd(StartValue, Offset);
3446   }
3447   case InductionDescriptor::IK_PtrInduction: {
3448     assert(isa<SCEVConstant>(Step) &&
3449            "Expected constant step for pointer induction");
3450     return B.CreateGEP(
3451         StartValue->getType()->getPointerElementType(), StartValue,
3452         CreateMul(Index,
3453                   Exp.expandCodeFor(Step, Index->getType()->getScalarType(),
3454                                     GetInsertPoint())));
3455   }
3456   case InductionDescriptor::IK_FpInduction: {
3457     assert(!isa<VectorType>(Index->getType()) &&
3458            "Vector indices not supported for FP inductions yet");
3459     assert(Step->getType()->isFloatingPointTy() && "Expected FP Step value");
3460     auto InductionBinOp = ID.getInductionBinOp();
3461     assert(InductionBinOp &&
3462            (InductionBinOp->getOpcode() == Instruction::FAdd ||
3463             InductionBinOp->getOpcode() == Instruction::FSub) &&
3464            "Original bin op should be defined for FP induction");
3465 
3466     Value *StepValue = cast<SCEVUnknown>(Step)->getValue();
3467     Value *MulExp = B.CreateFMul(StepValue, Index);
3468     return B.CreateBinOp(InductionBinOp->getOpcode(), StartValue, MulExp,
3469                          "induction");
3470   }
3471   case InductionDescriptor::IK_NoInduction:
3472     return nullptr;
3473   }
3474   llvm_unreachable("invalid enum");
3475 }
3476 
3477 Loop *InnerLoopVectorizer::createVectorLoopSkeleton(StringRef Prefix) {
3478   LoopScalarBody = OrigLoop->getHeader();
3479   LoopVectorPreHeader = OrigLoop->getLoopPreheader();
3480   assert(LoopVectorPreHeader && "Invalid loop structure");
3481   LoopExitBlock = OrigLoop->getUniqueExitBlock(); // may be nullptr
3482   assert((LoopExitBlock || Cost->requiresScalarEpilogue(VF)) &&
3483          "multiple exit loop without required epilogue?");
3484 
3485   LoopMiddleBlock =
3486       SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT,
3487                  LI, nullptr, Twine(Prefix) + "middle.block");
3488   LoopScalarPreHeader =
3489       SplitBlock(LoopMiddleBlock, LoopMiddleBlock->getTerminator(), DT, LI,
3490                  nullptr, Twine(Prefix) + "scalar.ph");
3491 
3492   auto *ScalarLatchTerm = OrigLoop->getLoopLatch()->getTerminator();
3493 
3494   // Set up the middle block terminator.  Two cases:
3495   // 1) If we know that we must execute the scalar epilogue, emit an
3496   //    unconditional branch.
3497   // 2) Otherwise, we must have a single unique exit block (due to how we
3498   //    implement the multiple exit case).  In this case, set up a conditonal
3499   //    branch from the middle block to the loop scalar preheader, and the
3500   //    exit block.  completeLoopSkeleton will update the condition to use an
3501   //    iteration check, if required to decide whether to execute the remainder.
3502   BranchInst *BrInst = Cost->requiresScalarEpilogue(VF) ?
3503     BranchInst::Create(LoopScalarPreHeader) :
3504     BranchInst::Create(LoopExitBlock, LoopScalarPreHeader,
3505                        Builder.getTrue());
3506   BrInst->setDebugLoc(ScalarLatchTerm->getDebugLoc());
3507   ReplaceInstWithInst(LoopMiddleBlock->getTerminator(), BrInst);
3508 
3509   // We intentionally don't let SplitBlock to update LoopInfo since
3510   // LoopVectorBody should belong to another loop than LoopVectorPreHeader.
3511   // LoopVectorBody is explicitly added to the correct place few lines later.
3512   LoopVectorBody =
3513       SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT,
3514                  nullptr, nullptr, Twine(Prefix) + "vector.body");
3515 
3516   // Update dominator for loop exit.
3517   if (!Cost->requiresScalarEpilogue(VF))
3518     // If there is an epilogue which must run, there's no edge from the
3519     // middle block to exit blocks  and thus no need to update the immediate
3520     // dominator of the exit blocks.
3521     DT->changeImmediateDominator(LoopExitBlock, LoopMiddleBlock);
3522 
3523   // Create and register the new vector loop.
3524   Loop *Lp = LI->AllocateLoop();
3525   Loop *ParentLoop = OrigLoop->getParentLoop();
3526 
3527   // Insert the new loop into the loop nest and register the new basic blocks
3528   // before calling any utilities such as SCEV that require valid LoopInfo.
3529   if (ParentLoop) {
3530     ParentLoop->addChildLoop(Lp);
3531   } else {
3532     LI->addTopLevelLoop(Lp);
3533   }
3534   Lp->addBasicBlockToLoop(LoopVectorBody, *LI);
3535   return Lp;
3536 }
3537 
3538 void InnerLoopVectorizer::createInductionResumeValues(
3539     Loop *L, Value *VectorTripCount,
3540     std::pair<BasicBlock *, Value *> AdditionalBypass) {
3541   assert(VectorTripCount && L && "Expected valid arguments");
3542   assert(((AdditionalBypass.first && AdditionalBypass.second) ||
3543           (!AdditionalBypass.first && !AdditionalBypass.second)) &&
3544          "Inconsistent information about additional bypass.");
3545   // We are going to resume the execution of the scalar loop.
3546   // Go over all of the induction variables that we found and fix the
3547   // PHIs that are left in the scalar version of the loop.
3548   // The starting values of PHI nodes depend on the counter of the last
3549   // iteration in the vectorized loop.
3550   // If we come from a bypass edge then we need to start from the original
3551   // start value.
3552   for (auto &InductionEntry : Legal->getInductionVars()) {
3553     PHINode *OrigPhi = InductionEntry.first;
3554     InductionDescriptor II = InductionEntry.second;
3555 
3556     // Create phi nodes to merge from the  backedge-taken check block.
3557     PHINode *BCResumeVal =
3558         PHINode::Create(OrigPhi->getType(), 3, "bc.resume.val",
3559                         LoopScalarPreHeader->getTerminator());
3560     // Copy original phi DL over to the new one.
3561     BCResumeVal->setDebugLoc(OrigPhi->getDebugLoc());
3562     Value *&EndValue = IVEndValues[OrigPhi];
3563     Value *EndValueFromAdditionalBypass = AdditionalBypass.second;
3564     if (OrigPhi == OldInduction) {
3565       // We know what the end value is.
3566       EndValue = VectorTripCount;
3567     } else {
3568       IRBuilder<> B(L->getLoopPreheader()->getTerminator());
3569 
3570       // Fast-math-flags propagate from the original induction instruction.
3571       if (II.getInductionBinOp() && isa<FPMathOperator>(II.getInductionBinOp()))
3572         B.setFastMathFlags(II.getInductionBinOp()->getFastMathFlags());
3573 
3574       Type *StepType = II.getStep()->getType();
3575       Instruction::CastOps CastOp =
3576           CastInst::getCastOpcode(VectorTripCount, true, StepType, true);
3577       Value *CRD = B.CreateCast(CastOp, VectorTripCount, StepType, "cast.crd");
3578       const DataLayout &DL = LoopScalarBody->getModule()->getDataLayout();
3579       EndValue = emitTransformedIndex(B, CRD, PSE.getSE(), DL, II);
3580       EndValue->setName("ind.end");
3581 
3582       // Compute the end value for the additional bypass (if applicable).
3583       if (AdditionalBypass.first) {
3584         B.SetInsertPoint(&(*AdditionalBypass.first->getFirstInsertionPt()));
3585         CastOp = CastInst::getCastOpcode(AdditionalBypass.second, true,
3586                                          StepType, true);
3587         CRD =
3588             B.CreateCast(CastOp, AdditionalBypass.second, StepType, "cast.crd");
3589         EndValueFromAdditionalBypass =
3590             emitTransformedIndex(B, CRD, PSE.getSE(), DL, II);
3591         EndValueFromAdditionalBypass->setName("ind.end");
3592       }
3593     }
3594     // The new PHI merges the original incoming value, in case of a bypass,
3595     // or the value at the end of the vectorized loop.
3596     BCResumeVal->addIncoming(EndValue, LoopMiddleBlock);
3597 
3598     // Fix the scalar body counter (PHI node).
3599     // The old induction's phi node in the scalar body needs the truncated
3600     // value.
3601     for (BasicBlock *BB : LoopBypassBlocks)
3602       BCResumeVal->addIncoming(II.getStartValue(), BB);
3603 
3604     if (AdditionalBypass.first)
3605       BCResumeVal->setIncomingValueForBlock(AdditionalBypass.first,
3606                                             EndValueFromAdditionalBypass);
3607 
3608     OrigPhi->setIncomingValueForBlock(LoopScalarPreHeader, BCResumeVal);
3609   }
3610 }
3611 
3612 BasicBlock *InnerLoopVectorizer::completeLoopSkeleton(Loop *L,
3613                                                       MDNode *OrigLoopID) {
3614   assert(L && "Expected valid loop.");
3615 
3616   // The trip counts should be cached by now.
3617   Value *Count = getOrCreateTripCount(L);
3618   Value *VectorTripCount = getOrCreateVectorTripCount(L);
3619 
3620   auto *ScalarLatchTerm = OrigLoop->getLoopLatch()->getTerminator();
3621 
3622   // Add a check in the middle block to see if we have completed
3623   // all of the iterations in the first vector loop.  Three cases:
3624   // 1) If we require a scalar epilogue, there is no conditional branch as
3625   //    we unconditionally branch to the scalar preheader.  Do nothing.
3626   // 2) If (N - N%VF) == N, then we *don't* need to run the remainder.
3627   //    Thus if tail is to be folded, we know we don't need to run the
3628   //    remainder and we can use the previous value for the condition (true).
3629   // 3) Otherwise, construct a runtime check.
3630   if (!Cost->requiresScalarEpilogue(VF) && !Cost->foldTailByMasking()) {
3631     Instruction *CmpN = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ,
3632                                         Count, VectorTripCount, "cmp.n",
3633                                         LoopMiddleBlock->getTerminator());
3634 
3635     // Here we use the same DebugLoc as the scalar loop latch terminator instead
3636     // of the corresponding compare because they may have ended up with
3637     // different line numbers and we want to avoid awkward line stepping while
3638     // debugging. Eg. if the compare has got a line number inside the loop.
3639     CmpN->setDebugLoc(ScalarLatchTerm->getDebugLoc());
3640     cast<BranchInst>(LoopMiddleBlock->getTerminator())->setCondition(CmpN);
3641   }
3642 
3643   // Get ready to start creating new instructions into the vectorized body.
3644   assert(LoopVectorPreHeader == L->getLoopPreheader() &&
3645          "Inconsistent vector loop preheader");
3646   Builder.SetInsertPoint(&*LoopVectorBody->getFirstInsertionPt());
3647 
3648   Optional<MDNode *> VectorizedLoopID =
3649       makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll,
3650                                       LLVMLoopVectorizeFollowupVectorized});
3651   if (VectorizedLoopID.hasValue()) {
3652     L->setLoopID(VectorizedLoopID.getValue());
3653 
3654     // Do not setAlreadyVectorized if loop attributes have been defined
3655     // explicitly.
3656     return LoopVectorPreHeader;
3657   }
3658 
3659   // Keep all loop hints from the original loop on the vector loop (we'll
3660   // replace the vectorizer-specific hints below).
3661   if (MDNode *LID = OrigLoop->getLoopID())
3662     L->setLoopID(LID);
3663 
3664   LoopVectorizeHints Hints(L, true, *ORE);
3665   Hints.setAlreadyVectorized();
3666 
3667 #ifdef EXPENSIVE_CHECKS
3668   assert(DT->verify(DominatorTree::VerificationLevel::Fast));
3669   LI->verify(*DT);
3670 #endif
3671 
3672   return LoopVectorPreHeader;
3673 }
3674 
3675 BasicBlock *InnerLoopVectorizer::createVectorizedLoopSkeleton() {
3676   /*
3677    In this function we generate a new loop. The new loop will contain
3678    the vectorized instructions while the old loop will continue to run the
3679    scalar remainder.
3680 
3681        [ ] <-- loop iteration number check.
3682     /   |
3683    /    v
3684   |    [ ] <-- vector loop bypass (may consist of multiple blocks).
3685   |  /  |
3686   | /   v
3687   ||   [ ]     <-- vector pre header.
3688   |/    |
3689   |     v
3690   |    [  ] \
3691   |    [  ]_|   <-- vector loop.
3692   |     |
3693   |     v
3694   \   -[ ]   <--- middle-block.
3695    \/   |
3696    /\   v
3697    | ->[ ]     <--- new preheader.
3698    |    |
3699  (opt)  v      <-- edge from middle to exit iff epilogue is not required.
3700    |   [ ] \
3701    |   [ ]_|   <-- old scalar loop to handle remainder (scalar epilogue).
3702     \   |
3703      \  v
3704       >[ ]     <-- exit block(s).
3705    ...
3706    */
3707 
3708   // Get the metadata of the original loop before it gets modified.
3709   MDNode *OrigLoopID = OrigLoop->getLoopID();
3710 
3711   // Workaround!  Compute the trip count of the original loop and cache it
3712   // before we start modifying the CFG.  This code has a systemic problem
3713   // wherein it tries to run analysis over partially constructed IR; this is
3714   // wrong, and not simply for SCEV.  The trip count of the original loop
3715   // simply happens to be prone to hitting this in practice.  In theory, we
3716   // can hit the same issue for any SCEV, or ValueTracking query done during
3717   // mutation.  See PR49900.
3718   getOrCreateTripCount(OrigLoop);
3719 
3720   // Create an empty vector loop, and prepare basic blocks for the runtime
3721   // checks.
3722   Loop *Lp = createVectorLoopSkeleton("");
3723 
3724   // Now, compare the new count to zero. If it is zero skip the vector loop and
3725   // jump to the scalar loop. This check also covers the case where the
3726   // backedge-taken count is uint##_max: adding one to it will overflow leading
3727   // to an incorrect trip count of zero. In this (rare) case we will also jump
3728   // to the scalar loop.
3729   emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader);
3730 
3731   // Generate the code to check any assumptions that we've made for SCEV
3732   // expressions.
3733   emitSCEVChecks(Lp, LoopScalarPreHeader);
3734 
3735   // Generate the code that checks in runtime if arrays overlap. We put the
3736   // checks into a separate block to make the more common case of few elements
3737   // faster.
3738   emitMemRuntimeChecks(Lp, LoopScalarPreHeader);
3739 
3740   // Some loops have a single integer induction variable, while other loops
3741   // don't. One example is c++ iterators that often have multiple pointer
3742   // induction variables. In the code below we also support a case where we
3743   // don't have a single induction variable.
3744   //
3745   // We try to obtain an induction variable from the original loop as hard
3746   // as possible. However if we don't find one that:
3747   //   - is an integer
3748   //   - counts from zero, stepping by one
3749   //   - is the size of the widest induction variable type
3750   // then we create a new one.
3751   OldInduction = Legal->getPrimaryInduction();
3752   Type *IdxTy = Legal->getWidestInductionType();
3753   Value *StartIdx = ConstantInt::get(IdxTy, 0);
3754   // The loop step is equal to the vectorization factor (num of SIMD elements)
3755   // times the unroll factor (num of SIMD instructions).
3756   Builder.SetInsertPoint(&*Lp->getHeader()->getFirstInsertionPt());
3757   Value *Step = createStepForVF(Builder, ConstantInt::get(IdxTy, UF), VF);
3758   Value *CountRoundDown = getOrCreateVectorTripCount(Lp);
3759   Induction =
3760       createInductionVariable(Lp, StartIdx, CountRoundDown, Step,
3761                               getDebugLocFromInstOrOperands(OldInduction));
3762 
3763   // Emit phis for the new starting index of the scalar loop.
3764   createInductionResumeValues(Lp, CountRoundDown);
3765 
3766   return completeLoopSkeleton(Lp, OrigLoopID);
3767 }
3768 
3769 // Fix up external users of the induction variable. At this point, we are
3770 // in LCSSA form, with all external PHIs that use the IV having one input value,
3771 // coming from the remainder loop. We need those PHIs to also have a correct
3772 // value for the IV when arriving directly from the middle block.
3773 void InnerLoopVectorizer::fixupIVUsers(PHINode *OrigPhi,
3774                                        const InductionDescriptor &II,
3775                                        Value *CountRoundDown, Value *EndValue,
3776                                        BasicBlock *MiddleBlock) {
3777   // There are two kinds of external IV usages - those that use the value
3778   // computed in the last iteration (the PHI) and those that use the penultimate
3779   // value (the value that feeds into the phi from the loop latch).
3780   // We allow both, but they, obviously, have different values.
3781 
3782   assert(OrigLoop->getUniqueExitBlock() && "Expected a single exit block");
3783 
3784   DenseMap<Value *, Value *> MissingVals;
3785 
3786   // An external user of the last iteration's value should see the value that
3787   // the remainder loop uses to initialize its own IV.
3788   Value *PostInc = OrigPhi->getIncomingValueForBlock(OrigLoop->getLoopLatch());
3789   for (User *U : PostInc->users()) {
3790     Instruction *UI = cast<Instruction>(U);
3791     if (!OrigLoop->contains(UI)) {
3792       assert(isa<PHINode>(UI) && "Expected LCSSA form");
3793       MissingVals[UI] = EndValue;
3794     }
3795   }
3796 
3797   // An external user of the penultimate value need to see EndValue - Step.
3798   // The simplest way to get this is to recompute it from the constituent SCEVs,
3799   // that is Start + (Step * (CRD - 1)).
3800   for (User *U : OrigPhi->users()) {
3801     auto *UI = cast<Instruction>(U);
3802     if (!OrigLoop->contains(UI)) {
3803       const DataLayout &DL =
3804           OrigLoop->getHeader()->getModule()->getDataLayout();
3805       assert(isa<PHINode>(UI) && "Expected LCSSA form");
3806 
3807       IRBuilder<> B(MiddleBlock->getTerminator());
3808 
3809       // Fast-math-flags propagate from the original induction instruction.
3810       if (II.getInductionBinOp() && isa<FPMathOperator>(II.getInductionBinOp()))
3811         B.setFastMathFlags(II.getInductionBinOp()->getFastMathFlags());
3812 
3813       Value *CountMinusOne = B.CreateSub(
3814           CountRoundDown, ConstantInt::get(CountRoundDown->getType(), 1));
3815       Value *CMO =
3816           !II.getStep()->getType()->isIntegerTy()
3817               ? B.CreateCast(Instruction::SIToFP, CountMinusOne,
3818                              II.getStep()->getType())
3819               : B.CreateSExtOrTrunc(CountMinusOne, II.getStep()->getType());
3820       CMO->setName("cast.cmo");
3821       Value *Escape = emitTransformedIndex(B, CMO, PSE.getSE(), DL, II);
3822       Escape->setName("ind.escape");
3823       MissingVals[UI] = Escape;
3824     }
3825   }
3826 
3827   for (auto &I : MissingVals) {
3828     PHINode *PHI = cast<PHINode>(I.first);
3829     // One corner case we have to handle is two IVs "chasing" each-other,
3830     // that is %IV2 = phi [...], [ %IV1, %latch ]
3831     // In this case, if IV1 has an external use, we need to avoid adding both
3832     // "last value of IV1" and "penultimate value of IV2". So, verify that we
3833     // don't already have an incoming value for the middle block.
3834     if (PHI->getBasicBlockIndex(MiddleBlock) == -1)
3835       PHI->addIncoming(I.second, MiddleBlock);
3836   }
3837 }
3838 
3839 namespace {
3840 
3841 struct CSEDenseMapInfo {
3842   static bool canHandle(const Instruction *I) {
3843     return isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) ||
3844            isa<ShuffleVectorInst>(I) || isa<GetElementPtrInst>(I);
3845   }
3846 
3847   static inline Instruction *getEmptyKey() {
3848     return DenseMapInfo<Instruction *>::getEmptyKey();
3849   }
3850 
3851   static inline Instruction *getTombstoneKey() {
3852     return DenseMapInfo<Instruction *>::getTombstoneKey();
3853   }
3854 
3855   static unsigned getHashValue(const Instruction *I) {
3856     assert(canHandle(I) && "Unknown instruction!");
3857     return hash_combine(I->getOpcode(), hash_combine_range(I->value_op_begin(),
3858                                                            I->value_op_end()));
3859   }
3860 
3861   static bool isEqual(const Instruction *LHS, const Instruction *RHS) {
3862     if (LHS == getEmptyKey() || RHS == getEmptyKey() ||
3863         LHS == getTombstoneKey() || RHS == getTombstoneKey())
3864       return LHS == RHS;
3865     return LHS->isIdenticalTo(RHS);
3866   }
3867 };
3868 
3869 } // end anonymous namespace
3870 
3871 ///Perform cse of induction variable instructions.
3872 static void cse(BasicBlock *BB) {
3873   // Perform simple cse.
3874   SmallDenseMap<Instruction *, Instruction *, 4, CSEDenseMapInfo> CSEMap;
3875   for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) {
3876     Instruction *In = &*I++;
3877 
3878     if (!CSEDenseMapInfo::canHandle(In))
3879       continue;
3880 
3881     // Check if we can replace this instruction with any of the
3882     // visited instructions.
3883     if (Instruction *V = CSEMap.lookup(In)) {
3884       In->replaceAllUsesWith(V);
3885       In->eraseFromParent();
3886       continue;
3887     }
3888 
3889     CSEMap[In] = In;
3890   }
3891 }
3892 
3893 InstructionCost
3894 LoopVectorizationCostModel::getVectorCallCost(CallInst *CI, ElementCount VF,
3895                                               bool &NeedToScalarize) const {
3896   Function *F = CI->getCalledFunction();
3897   Type *ScalarRetTy = CI->getType();
3898   SmallVector<Type *, 4> Tys, ScalarTys;
3899   for (auto &ArgOp : CI->arg_operands())
3900     ScalarTys.push_back(ArgOp->getType());
3901 
3902   // Estimate cost of scalarized vector call. The source operands are assumed
3903   // to be vectors, so we need to extract individual elements from there,
3904   // execute VF scalar calls, and then gather the result into the vector return
3905   // value.
3906   InstructionCost ScalarCallCost =
3907       TTI.getCallInstrCost(F, ScalarRetTy, ScalarTys, TTI::TCK_RecipThroughput);
3908   if (VF.isScalar())
3909     return ScalarCallCost;
3910 
3911   // Compute corresponding vector type for return value and arguments.
3912   Type *RetTy = ToVectorTy(ScalarRetTy, VF);
3913   for (Type *ScalarTy : ScalarTys)
3914     Tys.push_back(ToVectorTy(ScalarTy, VF));
3915 
3916   // Compute costs of unpacking argument values for the scalar calls and
3917   // packing the return values to a vector.
3918   InstructionCost ScalarizationCost = getScalarizationOverhead(CI, VF);
3919 
3920   InstructionCost Cost =
3921       ScalarCallCost * VF.getKnownMinValue() + ScalarizationCost;
3922 
3923   // If we can't emit a vector call for this function, then the currently found
3924   // cost is the cost we need to return.
3925   NeedToScalarize = true;
3926   VFShape Shape = VFShape::get(*CI, VF, false /*HasGlobalPred*/);
3927   Function *VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape);
3928 
3929   if (!TLI || CI->isNoBuiltin() || !VecFunc)
3930     return Cost;
3931 
3932   // If the corresponding vector cost is cheaper, return its cost.
3933   InstructionCost VectorCallCost =
3934       TTI.getCallInstrCost(nullptr, RetTy, Tys, TTI::TCK_RecipThroughput);
3935   if (VectorCallCost < Cost) {
3936     NeedToScalarize = false;
3937     Cost = VectorCallCost;
3938   }
3939   return Cost;
3940 }
3941 
3942 static Type *MaybeVectorizeType(Type *Elt, ElementCount VF) {
3943   if (VF.isScalar() || (!Elt->isIntOrPtrTy() && !Elt->isFloatingPointTy()))
3944     return Elt;
3945   return VectorType::get(Elt, VF);
3946 }
3947 
3948 InstructionCost
3949 LoopVectorizationCostModel::getVectorIntrinsicCost(CallInst *CI,
3950                                                    ElementCount VF) const {
3951   Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
3952   assert(ID && "Expected intrinsic call!");
3953   Type *RetTy = MaybeVectorizeType(CI->getType(), VF);
3954   FastMathFlags FMF;
3955   if (auto *FPMO = dyn_cast<FPMathOperator>(CI))
3956     FMF = FPMO->getFastMathFlags();
3957 
3958   SmallVector<const Value *> Arguments(CI->arg_begin(), CI->arg_end());
3959   FunctionType *FTy = CI->getCalledFunction()->getFunctionType();
3960   SmallVector<Type *> ParamTys;
3961   std::transform(FTy->param_begin(), FTy->param_end(),
3962                  std::back_inserter(ParamTys),
3963                  [&](Type *Ty) { return MaybeVectorizeType(Ty, VF); });
3964 
3965   IntrinsicCostAttributes CostAttrs(ID, RetTy, Arguments, ParamTys, FMF,
3966                                     dyn_cast<IntrinsicInst>(CI));
3967   return TTI.getIntrinsicInstrCost(CostAttrs,
3968                                    TargetTransformInfo::TCK_RecipThroughput);
3969 }
3970 
3971 static Type *smallestIntegerVectorType(Type *T1, Type *T2) {
3972   auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType());
3973   auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType());
3974   return I1->getBitWidth() < I2->getBitWidth() ? T1 : T2;
3975 }
3976 
3977 static Type *largestIntegerVectorType(Type *T1, Type *T2) {
3978   auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType());
3979   auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType());
3980   return I1->getBitWidth() > I2->getBitWidth() ? T1 : T2;
3981 }
3982 
3983 void InnerLoopVectorizer::truncateToMinimalBitwidths(VPTransformState &State) {
3984   // For every instruction `I` in MinBWs, truncate the operands, create a
3985   // truncated version of `I` and reextend its result. InstCombine runs
3986   // later and will remove any ext/trunc pairs.
3987   SmallPtrSet<Value *, 4> Erased;
3988   for (const auto &KV : Cost->getMinimalBitwidths()) {
3989     // If the value wasn't vectorized, we must maintain the original scalar
3990     // type. The absence of the value from State indicates that it
3991     // wasn't vectorized.
3992     VPValue *Def = State.Plan->getVPValue(KV.first);
3993     if (!State.hasAnyVectorValue(Def))
3994       continue;
3995     for (unsigned Part = 0; Part < UF; ++Part) {
3996       Value *I = State.get(Def, Part);
3997       if (Erased.count(I) || I->use_empty() || !isa<Instruction>(I))
3998         continue;
3999       Type *OriginalTy = I->getType();
4000       Type *ScalarTruncatedTy =
4001           IntegerType::get(OriginalTy->getContext(), KV.second);
4002       auto *TruncatedTy = VectorType::get(
4003           ScalarTruncatedTy, cast<VectorType>(OriginalTy)->getElementCount());
4004       if (TruncatedTy == OriginalTy)
4005         continue;
4006 
4007       IRBuilder<> B(cast<Instruction>(I));
4008       auto ShrinkOperand = [&](Value *V) -> Value * {
4009         if (auto *ZI = dyn_cast<ZExtInst>(V))
4010           if (ZI->getSrcTy() == TruncatedTy)
4011             return ZI->getOperand(0);
4012         return B.CreateZExtOrTrunc(V, TruncatedTy);
4013       };
4014 
4015       // The actual instruction modification depends on the instruction type,
4016       // unfortunately.
4017       Value *NewI = nullptr;
4018       if (auto *BO = dyn_cast<BinaryOperator>(I)) {
4019         NewI = B.CreateBinOp(BO->getOpcode(), ShrinkOperand(BO->getOperand(0)),
4020                              ShrinkOperand(BO->getOperand(1)));
4021 
4022         // Any wrapping introduced by shrinking this operation shouldn't be
4023         // considered undefined behavior. So, we can't unconditionally copy
4024         // arithmetic wrapping flags to NewI.
4025         cast<BinaryOperator>(NewI)->copyIRFlags(I, /*IncludeWrapFlags=*/false);
4026       } else if (auto *CI = dyn_cast<ICmpInst>(I)) {
4027         NewI =
4028             B.CreateICmp(CI->getPredicate(), ShrinkOperand(CI->getOperand(0)),
4029                          ShrinkOperand(CI->getOperand(1)));
4030       } else if (auto *SI = dyn_cast<SelectInst>(I)) {
4031         NewI = B.CreateSelect(SI->getCondition(),
4032                               ShrinkOperand(SI->getTrueValue()),
4033                               ShrinkOperand(SI->getFalseValue()));
4034       } else if (auto *CI = dyn_cast<CastInst>(I)) {
4035         switch (CI->getOpcode()) {
4036         default:
4037           llvm_unreachable("Unhandled cast!");
4038         case Instruction::Trunc:
4039           NewI = ShrinkOperand(CI->getOperand(0));
4040           break;
4041         case Instruction::SExt:
4042           NewI = B.CreateSExtOrTrunc(
4043               CI->getOperand(0),
4044               smallestIntegerVectorType(OriginalTy, TruncatedTy));
4045           break;
4046         case Instruction::ZExt:
4047           NewI = B.CreateZExtOrTrunc(
4048               CI->getOperand(0),
4049               smallestIntegerVectorType(OriginalTy, TruncatedTy));
4050           break;
4051         }
4052       } else if (auto *SI = dyn_cast<ShuffleVectorInst>(I)) {
4053         auto Elements0 =
4054             cast<VectorType>(SI->getOperand(0)->getType())->getElementCount();
4055         auto *O0 = B.CreateZExtOrTrunc(
4056             SI->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements0));
4057         auto Elements1 =
4058             cast<VectorType>(SI->getOperand(1)->getType())->getElementCount();
4059         auto *O1 = B.CreateZExtOrTrunc(
4060             SI->getOperand(1), VectorType::get(ScalarTruncatedTy, Elements1));
4061 
4062         NewI = B.CreateShuffleVector(O0, O1, SI->getShuffleMask());
4063       } else if (isa<LoadInst>(I) || isa<PHINode>(I)) {
4064         // Don't do anything with the operands, just extend the result.
4065         continue;
4066       } else if (auto *IE = dyn_cast<InsertElementInst>(I)) {
4067         auto Elements =
4068             cast<VectorType>(IE->getOperand(0)->getType())->getElementCount();
4069         auto *O0 = B.CreateZExtOrTrunc(
4070             IE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements));
4071         auto *O1 = B.CreateZExtOrTrunc(IE->getOperand(1), ScalarTruncatedTy);
4072         NewI = B.CreateInsertElement(O0, O1, IE->getOperand(2));
4073       } else if (auto *EE = dyn_cast<ExtractElementInst>(I)) {
4074         auto Elements =
4075             cast<VectorType>(EE->getOperand(0)->getType())->getElementCount();
4076         auto *O0 = B.CreateZExtOrTrunc(
4077             EE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements));
4078         NewI = B.CreateExtractElement(O0, EE->getOperand(2));
4079       } else {
4080         // If we don't know what to do, be conservative and don't do anything.
4081         continue;
4082       }
4083 
4084       // Lastly, extend the result.
4085       NewI->takeName(cast<Instruction>(I));
4086       Value *Res = B.CreateZExtOrTrunc(NewI, OriginalTy);
4087       I->replaceAllUsesWith(Res);
4088       cast<Instruction>(I)->eraseFromParent();
4089       Erased.insert(I);
4090       State.reset(Def, Res, Part);
4091     }
4092   }
4093 
4094   // We'll have created a bunch of ZExts that are now parentless. Clean up.
4095   for (const auto &KV : Cost->getMinimalBitwidths()) {
4096     // If the value wasn't vectorized, we must maintain the original scalar
4097     // type. The absence of the value from State indicates that it
4098     // wasn't vectorized.
4099     VPValue *Def = State.Plan->getVPValue(KV.first);
4100     if (!State.hasAnyVectorValue(Def))
4101       continue;
4102     for (unsigned Part = 0; Part < UF; ++Part) {
4103       Value *I = State.get(Def, Part);
4104       ZExtInst *Inst = dyn_cast<ZExtInst>(I);
4105       if (Inst && Inst->use_empty()) {
4106         Value *NewI = Inst->getOperand(0);
4107         Inst->eraseFromParent();
4108         State.reset(Def, NewI, Part);
4109       }
4110     }
4111   }
4112 }
4113 
4114 void InnerLoopVectorizer::fixVectorizedLoop(VPTransformState &State) {
4115   // Insert truncates and extends for any truncated instructions as hints to
4116   // InstCombine.
4117   if (VF.isVector())
4118     truncateToMinimalBitwidths(State);
4119 
4120   // Fix widened non-induction PHIs by setting up the PHI operands.
4121   if (OrigPHIsToFix.size()) {
4122     assert(EnableVPlanNativePath &&
4123            "Unexpected non-induction PHIs for fixup in non VPlan-native path");
4124     fixNonInductionPHIs(State);
4125   }
4126 
4127   // At this point every instruction in the original loop is widened to a
4128   // vector form. Now we need to fix the recurrences in the loop. These PHI
4129   // nodes are currently empty because we did not want to introduce cycles.
4130   // This is the second stage of vectorizing recurrences.
4131   fixCrossIterationPHIs(State);
4132 
4133   // Forget the original basic block.
4134   PSE.getSE()->forgetLoop(OrigLoop);
4135 
4136   // If we inserted an edge from the middle block to the unique exit block,
4137   // update uses outside the loop (phis) to account for the newly inserted
4138   // edge.
4139   if (!Cost->requiresScalarEpilogue(VF)) {
4140     // Fix-up external users of the induction variables.
4141     for (auto &Entry : Legal->getInductionVars())
4142       fixupIVUsers(Entry.first, Entry.second,
4143                    getOrCreateVectorTripCount(LI->getLoopFor(LoopVectorBody)),
4144                    IVEndValues[Entry.first], LoopMiddleBlock);
4145 
4146     fixLCSSAPHIs(State);
4147   }
4148 
4149   for (Instruction *PI : PredicatedInstructions)
4150     sinkScalarOperands(&*PI);
4151 
4152   // Remove redundant induction instructions.
4153   cse(LoopVectorBody);
4154 
4155   // Set/update profile weights for the vector and remainder loops as original
4156   // loop iterations are now distributed among them. Note that original loop
4157   // represented by LoopScalarBody becomes remainder loop after vectorization.
4158   //
4159   // For cases like foldTailByMasking() and requiresScalarEpiloque() we may
4160   // end up getting slightly roughened result but that should be OK since
4161   // profile is not inherently precise anyway. Note also possible bypass of
4162   // vector code caused by legality checks is ignored, assigning all the weight
4163   // to the vector loop, optimistically.
4164   //
4165   // For scalable vectorization we can't know at compile time how many iterations
4166   // of the loop are handled in one vector iteration, so instead assume a pessimistic
4167   // vscale of '1'.
4168   setProfileInfoAfterUnrolling(
4169       LI->getLoopFor(LoopScalarBody), LI->getLoopFor(LoopVectorBody),
4170       LI->getLoopFor(LoopScalarBody), VF.getKnownMinValue() * UF);
4171 }
4172 
4173 void InnerLoopVectorizer::fixCrossIterationPHIs(VPTransformState &State) {
4174   // In order to support recurrences we need to be able to vectorize Phi nodes.
4175   // Phi nodes have cycles, so we need to vectorize them in two stages. This is
4176   // stage #2: We now need to fix the recurrences by adding incoming edges to
4177   // the currently empty PHI nodes. At this point every instruction in the
4178   // original loop is widened to a vector form so we can use them to construct
4179   // the incoming edges.
4180   VPBasicBlock *Header = State.Plan->getEntry()->getEntryBasicBlock();
4181   for (VPRecipeBase &R : Header->phis()) {
4182     if (auto *ReductionPhi = dyn_cast<VPReductionPHIRecipe>(&R))
4183       fixReduction(ReductionPhi, State);
4184     else if (auto *FOR = dyn_cast<VPFirstOrderRecurrencePHIRecipe>(&R))
4185       fixFirstOrderRecurrence(FOR, State);
4186   }
4187 }
4188 
4189 void InnerLoopVectorizer::fixFirstOrderRecurrence(VPWidenPHIRecipe *PhiR,
4190                                                   VPTransformState &State) {
4191   // This is the second phase of vectorizing first-order recurrences. An
4192   // overview of the transformation is described below. Suppose we have the
4193   // following loop.
4194   //
4195   //   for (int i = 0; i < n; ++i)
4196   //     b[i] = a[i] - a[i - 1];
4197   //
4198   // There is a first-order recurrence on "a". For this loop, the shorthand
4199   // scalar IR looks like:
4200   //
4201   //   scalar.ph:
4202   //     s_init = a[-1]
4203   //     br scalar.body
4204   //
4205   //   scalar.body:
4206   //     i = phi [0, scalar.ph], [i+1, scalar.body]
4207   //     s1 = phi [s_init, scalar.ph], [s2, scalar.body]
4208   //     s2 = a[i]
4209   //     b[i] = s2 - s1
4210   //     br cond, scalar.body, ...
4211   //
4212   // In this example, s1 is a recurrence because it's value depends on the
4213   // previous iteration. In the first phase of vectorization, we created a
4214   // vector phi v1 for s1. We now complete the vectorization and produce the
4215   // shorthand vector IR shown below (for VF = 4, UF = 1).
4216   //
4217   //   vector.ph:
4218   //     v_init = vector(..., ..., ..., a[-1])
4219   //     br vector.body
4220   //
4221   //   vector.body
4222   //     i = phi [0, vector.ph], [i+4, vector.body]
4223   //     v1 = phi [v_init, vector.ph], [v2, vector.body]
4224   //     v2 = a[i, i+1, i+2, i+3];
4225   //     v3 = vector(v1(3), v2(0, 1, 2))
4226   //     b[i, i+1, i+2, i+3] = v2 - v3
4227   //     br cond, vector.body, middle.block
4228   //
4229   //   middle.block:
4230   //     x = v2(3)
4231   //     br scalar.ph
4232   //
4233   //   scalar.ph:
4234   //     s_init = phi [x, middle.block], [a[-1], otherwise]
4235   //     br scalar.body
4236   //
4237   // After execution completes the vector loop, we extract the next value of
4238   // the recurrence (x) to use as the initial value in the scalar loop.
4239 
4240   // Extract the last vector element in the middle block. This will be the
4241   // initial value for the recurrence when jumping to the scalar loop.
4242   VPValue *PreviousDef = PhiR->getBackedgeValue();
4243   Value *Incoming = State.get(PreviousDef, UF - 1);
4244   auto *ExtractForScalar = Incoming;
4245   auto *IdxTy = Builder.getInt32Ty();
4246   if (VF.isVector()) {
4247     auto *One = ConstantInt::get(IdxTy, 1);
4248     Builder.SetInsertPoint(LoopMiddleBlock->getTerminator());
4249     auto *RuntimeVF = getRuntimeVF(Builder, IdxTy, VF);
4250     auto *LastIdx = Builder.CreateSub(RuntimeVF, One);
4251     ExtractForScalar = Builder.CreateExtractElement(ExtractForScalar, LastIdx,
4252                                                     "vector.recur.extract");
4253   }
4254   // Extract the second last element in the middle block if the
4255   // Phi is used outside the loop. We need to extract the phi itself
4256   // and not the last element (the phi update in the current iteration). This
4257   // will be the value when jumping to the exit block from the LoopMiddleBlock,
4258   // when the scalar loop is not run at all.
4259   Value *ExtractForPhiUsedOutsideLoop = nullptr;
4260   if (VF.isVector()) {
4261     auto *RuntimeVF = getRuntimeVF(Builder, IdxTy, VF);
4262     auto *Idx = Builder.CreateSub(RuntimeVF, ConstantInt::get(IdxTy, 2));
4263     ExtractForPhiUsedOutsideLoop = Builder.CreateExtractElement(
4264         Incoming, Idx, "vector.recur.extract.for.phi");
4265   } else if (UF > 1)
4266     // When loop is unrolled without vectorizing, initialize
4267     // ExtractForPhiUsedOutsideLoop with the value just prior to unrolled value
4268     // of `Incoming`. This is analogous to the vectorized case above: extracting
4269     // the second last element when VF > 1.
4270     ExtractForPhiUsedOutsideLoop = State.get(PreviousDef, UF - 2);
4271 
4272   // Fix the initial value of the original recurrence in the scalar loop.
4273   Builder.SetInsertPoint(&*LoopScalarPreHeader->begin());
4274   PHINode *Phi = cast<PHINode>(PhiR->getUnderlyingValue());
4275   auto *Start = Builder.CreatePHI(Phi->getType(), 2, "scalar.recur.init");
4276   auto *ScalarInit = PhiR->getStartValue()->getLiveInIRValue();
4277   for (auto *BB : predecessors(LoopScalarPreHeader)) {
4278     auto *Incoming = BB == LoopMiddleBlock ? ExtractForScalar : ScalarInit;
4279     Start->addIncoming(Incoming, BB);
4280   }
4281 
4282   Phi->setIncomingValueForBlock(LoopScalarPreHeader, Start);
4283   Phi->setName("scalar.recur");
4284 
4285   // Finally, fix users of the recurrence outside the loop. The users will need
4286   // either the last value of the scalar recurrence or the last value of the
4287   // vector recurrence we extracted in the middle block. Since the loop is in
4288   // LCSSA form, we just need to find all the phi nodes for the original scalar
4289   // recurrence in the exit block, and then add an edge for the middle block.
4290   // Note that LCSSA does not imply single entry when the original scalar loop
4291   // had multiple exiting edges (as we always run the last iteration in the
4292   // scalar epilogue); in that case, there is no edge from middle to exit and
4293   // and thus no phis which needed updated.
4294   if (!Cost->requiresScalarEpilogue(VF))
4295     for (PHINode &LCSSAPhi : LoopExitBlock->phis())
4296       if (any_of(LCSSAPhi.incoming_values(),
4297                  [Phi](Value *V) { return V == Phi; }))
4298         LCSSAPhi.addIncoming(ExtractForPhiUsedOutsideLoop, LoopMiddleBlock);
4299 }
4300 
4301 void InnerLoopVectorizer::fixReduction(VPReductionPHIRecipe *PhiR,
4302                                        VPTransformState &State) {
4303   PHINode *OrigPhi = cast<PHINode>(PhiR->getUnderlyingValue());
4304   // Get it's reduction variable descriptor.
4305   assert(Legal->isReductionVariable(OrigPhi) &&
4306          "Unable to find the reduction variable");
4307   const RecurrenceDescriptor &RdxDesc = PhiR->getRecurrenceDescriptor();
4308 
4309   RecurKind RK = RdxDesc.getRecurrenceKind();
4310   TrackingVH<Value> ReductionStartValue = RdxDesc.getRecurrenceStartValue();
4311   Instruction *LoopExitInst = RdxDesc.getLoopExitInstr();
4312   setDebugLocFromInst(ReductionStartValue);
4313 
4314   VPValue *LoopExitInstDef = PhiR->getBackedgeValue();
4315   // This is the vector-clone of the value that leaves the loop.
4316   Type *VecTy = State.get(LoopExitInstDef, 0)->getType();
4317 
4318   // Wrap flags are in general invalid after vectorization, clear them.
4319   clearReductionWrapFlags(RdxDesc, State);
4320 
4321   // Before each round, move the insertion point right between
4322   // the PHIs and the values we are going to write.
4323   // This allows us to write both PHINodes and the extractelement
4324   // instructions.
4325   Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt());
4326 
4327   setDebugLocFromInst(LoopExitInst);
4328 
4329   Type *PhiTy = OrigPhi->getType();
4330   // If tail is folded by masking, the vector value to leave the loop should be
4331   // a Select choosing between the vectorized LoopExitInst and vectorized Phi,
4332   // instead of the former. For an inloop reduction the reduction will already
4333   // be predicated, and does not need to be handled here.
4334   if (Cost->foldTailByMasking() && !PhiR->isInLoop()) {
4335     for (unsigned Part = 0; Part < UF; ++Part) {
4336       Value *VecLoopExitInst = State.get(LoopExitInstDef, Part);
4337       Value *Sel = nullptr;
4338       for (User *U : VecLoopExitInst->users()) {
4339         if (isa<SelectInst>(U)) {
4340           assert(!Sel && "Reduction exit feeding two selects");
4341           Sel = U;
4342         } else
4343           assert(isa<PHINode>(U) && "Reduction exit must feed Phi's or select");
4344       }
4345       assert(Sel && "Reduction exit feeds no select");
4346       State.reset(LoopExitInstDef, Sel, Part);
4347 
4348       // If the target can create a predicated operator for the reduction at no
4349       // extra cost in the loop (for example a predicated vadd), it can be
4350       // cheaper for the select to remain in the loop than be sunk out of it,
4351       // and so use the select value for the phi instead of the old
4352       // LoopExitValue.
4353       if (PreferPredicatedReductionSelect ||
4354           TTI->preferPredicatedReductionSelect(
4355               RdxDesc.getOpcode(), PhiTy,
4356               TargetTransformInfo::ReductionFlags())) {
4357         auto *VecRdxPhi =
4358             cast<PHINode>(State.get(PhiR->getVPSingleValue(), Part));
4359         VecRdxPhi->setIncomingValueForBlock(
4360             LI->getLoopFor(LoopVectorBody)->getLoopLatch(), Sel);
4361       }
4362     }
4363   }
4364 
4365   // If the vector reduction can be performed in a smaller type, we truncate
4366   // then extend the loop exit value to enable InstCombine to evaluate the
4367   // entire expression in the smaller type.
4368   if (VF.isVector() && PhiTy != RdxDesc.getRecurrenceType()) {
4369     assert(!PhiR->isInLoop() && "Unexpected truncated inloop reduction!");
4370     Type *RdxVecTy = VectorType::get(RdxDesc.getRecurrenceType(), VF);
4371     Builder.SetInsertPoint(
4372         LI->getLoopFor(LoopVectorBody)->getLoopLatch()->getTerminator());
4373     VectorParts RdxParts(UF);
4374     for (unsigned Part = 0; Part < UF; ++Part) {
4375       RdxParts[Part] = State.get(LoopExitInstDef, Part);
4376       Value *Trunc = Builder.CreateTrunc(RdxParts[Part], RdxVecTy);
4377       Value *Extnd = RdxDesc.isSigned() ? Builder.CreateSExt(Trunc, VecTy)
4378                                         : Builder.CreateZExt(Trunc, VecTy);
4379       for (Value::user_iterator UI = RdxParts[Part]->user_begin();
4380            UI != RdxParts[Part]->user_end();)
4381         if (*UI != Trunc) {
4382           (*UI++)->replaceUsesOfWith(RdxParts[Part], Extnd);
4383           RdxParts[Part] = Extnd;
4384         } else {
4385           ++UI;
4386         }
4387     }
4388     Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt());
4389     for (unsigned Part = 0; Part < UF; ++Part) {
4390       RdxParts[Part] = Builder.CreateTrunc(RdxParts[Part], RdxVecTy);
4391       State.reset(LoopExitInstDef, RdxParts[Part], Part);
4392     }
4393   }
4394 
4395   // Reduce all of the unrolled parts into a single vector.
4396   Value *ReducedPartRdx = State.get(LoopExitInstDef, 0);
4397   unsigned Op = RecurrenceDescriptor::getOpcode(RK);
4398 
4399   // The middle block terminator has already been assigned a DebugLoc here (the
4400   // OrigLoop's single latch terminator). We want the whole middle block to
4401   // appear to execute on this line because: (a) it is all compiler generated,
4402   // (b) these instructions are always executed after evaluating the latch
4403   // conditional branch, and (c) other passes may add new predecessors which
4404   // terminate on this line. This is the easiest way to ensure we don't
4405   // accidentally cause an extra step back into the loop while debugging.
4406   setDebugLocFromInst(LoopMiddleBlock->getTerminator());
4407   if (PhiR->isOrdered())
4408     ReducedPartRdx = State.get(LoopExitInstDef, UF - 1);
4409   else {
4410     // Floating-point operations should have some FMF to enable the reduction.
4411     IRBuilderBase::FastMathFlagGuard FMFG(Builder);
4412     Builder.setFastMathFlags(RdxDesc.getFastMathFlags());
4413     for (unsigned Part = 1; Part < UF; ++Part) {
4414       Value *RdxPart = State.get(LoopExitInstDef, Part);
4415       if (Op != Instruction::ICmp && Op != Instruction::FCmp) {
4416         ReducedPartRdx = Builder.CreateBinOp(
4417             (Instruction::BinaryOps)Op, RdxPart, ReducedPartRdx, "bin.rdx");
4418       } else {
4419         ReducedPartRdx = createMinMaxOp(Builder, RK, ReducedPartRdx, RdxPart);
4420       }
4421     }
4422   }
4423 
4424   // Create the reduction after the loop. Note that inloop reductions create the
4425   // target reduction in the loop using a Reduction recipe.
4426   if (VF.isVector() && !PhiR->isInLoop()) {
4427     ReducedPartRdx =
4428         createTargetReduction(Builder, TTI, RdxDesc, ReducedPartRdx);
4429     // If the reduction can be performed in a smaller type, we need to extend
4430     // the reduction to the wider type before we branch to the original loop.
4431     if (PhiTy != RdxDesc.getRecurrenceType())
4432       ReducedPartRdx = RdxDesc.isSigned()
4433                            ? Builder.CreateSExt(ReducedPartRdx, PhiTy)
4434                            : Builder.CreateZExt(ReducedPartRdx, PhiTy);
4435   }
4436 
4437   // Create a phi node that merges control-flow from the backedge-taken check
4438   // block and the middle block.
4439   PHINode *BCBlockPhi = PHINode::Create(PhiTy, 2, "bc.merge.rdx",
4440                                         LoopScalarPreHeader->getTerminator());
4441   for (unsigned I = 0, E = LoopBypassBlocks.size(); I != E; ++I)
4442     BCBlockPhi->addIncoming(ReductionStartValue, LoopBypassBlocks[I]);
4443   BCBlockPhi->addIncoming(ReducedPartRdx, LoopMiddleBlock);
4444 
4445   // Now, we need to fix the users of the reduction variable
4446   // inside and outside of the scalar remainder loop.
4447 
4448   // We know that the loop is in LCSSA form. We need to update the PHI nodes
4449   // in the exit blocks.  See comment on analogous loop in
4450   // fixFirstOrderRecurrence for a more complete explaination of the logic.
4451   if (!Cost->requiresScalarEpilogue(VF))
4452     for (PHINode &LCSSAPhi : LoopExitBlock->phis())
4453       if (any_of(LCSSAPhi.incoming_values(),
4454                  [LoopExitInst](Value *V) { return V == LoopExitInst; }))
4455         LCSSAPhi.addIncoming(ReducedPartRdx, LoopMiddleBlock);
4456 
4457   // Fix the scalar loop reduction variable with the incoming reduction sum
4458   // from the vector body and from the backedge value.
4459   int IncomingEdgeBlockIdx =
4460       OrigPhi->getBasicBlockIndex(OrigLoop->getLoopLatch());
4461   assert(IncomingEdgeBlockIdx >= 0 && "Invalid block index");
4462   // Pick the other block.
4463   int SelfEdgeBlockIdx = (IncomingEdgeBlockIdx ? 0 : 1);
4464   OrigPhi->setIncomingValue(SelfEdgeBlockIdx, BCBlockPhi);
4465   OrigPhi->setIncomingValue(IncomingEdgeBlockIdx, LoopExitInst);
4466 }
4467 
4468 void InnerLoopVectorizer::clearReductionWrapFlags(const RecurrenceDescriptor &RdxDesc,
4469                                                   VPTransformState &State) {
4470   RecurKind RK = RdxDesc.getRecurrenceKind();
4471   if (RK != RecurKind::Add && RK != RecurKind::Mul)
4472     return;
4473 
4474   Instruction *LoopExitInstr = RdxDesc.getLoopExitInstr();
4475   assert(LoopExitInstr && "null loop exit instruction");
4476   SmallVector<Instruction *, 8> Worklist;
4477   SmallPtrSet<Instruction *, 8> Visited;
4478   Worklist.push_back(LoopExitInstr);
4479   Visited.insert(LoopExitInstr);
4480 
4481   while (!Worklist.empty()) {
4482     Instruction *Cur = Worklist.pop_back_val();
4483     if (isa<OverflowingBinaryOperator>(Cur))
4484       for (unsigned Part = 0; Part < UF; ++Part) {
4485         Value *V = State.get(State.Plan->getVPValue(Cur), Part);
4486         cast<Instruction>(V)->dropPoisonGeneratingFlags();
4487       }
4488 
4489     for (User *U : Cur->users()) {
4490       Instruction *UI = cast<Instruction>(U);
4491       if ((Cur != LoopExitInstr || OrigLoop->contains(UI->getParent())) &&
4492           Visited.insert(UI).second)
4493         Worklist.push_back(UI);
4494     }
4495   }
4496 }
4497 
4498 void InnerLoopVectorizer::fixLCSSAPHIs(VPTransformState &State) {
4499   for (PHINode &LCSSAPhi : LoopExitBlock->phis()) {
4500     if (LCSSAPhi.getBasicBlockIndex(LoopMiddleBlock) != -1)
4501       // Some phis were already hand updated by the reduction and recurrence
4502       // code above, leave them alone.
4503       continue;
4504 
4505     auto *IncomingValue = LCSSAPhi.getIncomingValue(0);
4506     // Non-instruction incoming values will have only one value.
4507 
4508     VPLane Lane = VPLane::getFirstLane();
4509     if (isa<Instruction>(IncomingValue) &&
4510         !Cost->isUniformAfterVectorization(cast<Instruction>(IncomingValue),
4511                                            VF))
4512       Lane = VPLane::getLastLaneForVF(VF);
4513 
4514     // Can be a loop invariant incoming value or the last scalar value to be
4515     // extracted from the vectorized loop.
4516     Builder.SetInsertPoint(LoopMiddleBlock->getTerminator());
4517     Value *lastIncomingValue =
4518         OrigLoop->isLoopInvariant(IncomingValue)
4519             ? IncomingValue
4520             : State.get(State.Plan->getVPValue(IncomingValue),
4521                         VPIteration(UF - 1, Lane));
4522     LCSSAPhi.addIncoming(lastIncomingValue, LoopMiddleBlock);
4523   }
4524 }
4525 
4526 void InnerLoopVectorizer::sinkScalarOperands(Instruction *PredInst) {
4527   // The basic block and loop containing the predicated instruction.
4528   auto *PredBB = PredInst->getParent();
4529   auto *VectorLoop = LI->getLoopFor(PredBB);
4530 
4531   // Initialize a worklist with the operands of the predicated instruction.
4532   SetVector<Value *> Worklist(PredInst->op_begin(), PredInst->op_end());
4533 
4534   // Holds instructions that we need to analyze again. An instruction may be
4535   // reanalyzed if we don't yet know if we can sink it or not.
4536   SmallVector<Instruction *, 8> InstsToReanalyze;
4537 
4538   // Returns true if a given use occurs in the predicated block. Phi nodes use
4539   // their operands in their corresponding predecessor blocks.
4540   auto isBlockOfUsePredicated = [&](Use &U) -> bool {
4541     auto *I = cast<Instruction>(U.getUser());
4542     BasicBlock *BB = I->getParent();
4543     if (auto *Phi = dyn_cast<PHINode>(I))
4544       BB = Phi->getIncomingBlock(
4545           PHINode::getIncomingValueNumForOperand(U.getOperandNo()));
4546     return BB == PredBB;
4547   };
4548 
4549   // Iteratively sink the scalarized operands of the predicated instruction
4550   // into the block we created for it. When an instruction is sunk, it's
4551   // operands are then added to the worklist. The algorithm ends after one pass
4552   // through the worklist doesn't sink a single instruction.
4553   bool Changed;
4554   do {
4555     // Add the instructions that need to be reanalyzed to the worklist, and
4556     // reset the changed indicator.
4557     Worklist.insert(InstsToReanalyze.begin(), InstsToReanalyze.end());
4558     InstsToReanalyze.clear();
4559     Changed = false;
4560 
4561     while (!Worklist.empty()) {
4562       auto *I = dyn_cast<Instruction>(Worklist.pop_back_val());
4563 
4564       // We can't sink an instruction if it is a phi node, is not in the loop,
4565       // or may have side effects.
4566       if (!I || isa<PHINode>(I) || !VectorLoop->contains(I) ||
4567           I->mayHaveSideEffects())
4568         continue;
4569 
4570       // If the instruction is already in PredBB, check if we can sink its
4571       // operands. In that case, VPlan's sinkScalarOperands() succeeded in
4572       // sinking the scalar instruction I, hence it appears in PredBB; but it
4573       // may have failed to sink I's operands (recursively), which we try
4574       // (again) here.
4575       if (I->getParent() == PredBB) {
4576         Worklist.insert(I->op_begin(), I->op_end());
4577         continue;
4578       }
4579 
4580       // It's legal to sink the instruction if all its uses occur in the
4581       // predicated block. Otherwise, there's nothing to do yet, and we may
4582       // need to reanalyze the instruction.
4583       if (!llvm::all_of(I->uses(), isBlockOfUsePredicated)) {
4584         InstsToReanalyze.push_back(I);
4585         continue;
4586       }
4587 
4588       // Move the instruction to the beginning of the predicated block, and add
4589       // it's operands to the worklist.
4590       I->moveBefore(&*PredBB->getFirstInsertionPt());
4591       Worklist.insert(I->op_begin(), I->op_end());
4592 
4593       // The sinking may have enabled other instructions to be sunk, so we will
4594       // need to iterate.
4595       Changed = true;
4596     }
4597   } while (Changed);
4598 }
4599 
4600 void InnerLoopVectorizer::fixNonInductionPHIs(VPTransformState &State) {
4601   for (PHINode *OrigPhi : OrigPHIsToFix) {
4602     VPWidenPHIRecipe *VPPhi =
4603         cast<VPWidenPHIRecipe>(State.Plan->getVPValue(OrigPhi));
4604     PHINode *NewPhi = cast<PHINode>(State.get(VPPhi, 0));
4605     // Make sure the builder has a valid insert point.
4606     Builder.SetInsertPoint(NewPhi);
4607     for (unsigned i = 0; i < VPPhi->getNumOperands(); ++i) {
4608       VPValue *Inc = VPPhi->getIncomingValue(i);
4609       VPBasicBlock *VPBB = VPPhi->getIncomingBlock(i);
4610       NewPhi->addIncoming(State.get(Inc, 0), State.CFG.VPBB2IRBB[VPBB]);
4611     }
4612   }
4613 }
4614 
4615 bool InnerLoopVectorizer::useOrderedReductions(RecurrenceDescriptor &RdxDesc) {
4616   return Cost->useOrderedReductions(RdxDesc);
4617 }
4618 
4619 void InnerLoopVectorizer::widenGEP(GetElementPtrInst *GEP, VPValue *VPDef,
4620                                    VPUser &Operands, unsigned UF,
4621                                    ElementCount VF, bool IsPtrLoopInvariant,
4622                                    SmallBitVector &IsIndexLoopInvariant,
4623                                    VPTransformState &State) {
4624   // Construct a vector GEP by widening the operands of the scalar GEP as
4625   // necessary. We mark the vector GEP 'inbounds' if appropriate. A GEP
4626   // results in a vector of pointers when at least one operand of the GEP
4627   // is vector-typed. Thus, to keep the representation compact, we only use
4628   // vector-typed operands for loop-varying values.
4629 
4630   if (VF.isVector() && IsPtrLoopInvariant && IsIndexLoopInvariant.all()) {
4631     // If we are vectorizing, but the GEP has only loop-invariant operands,
4632     // the GEP we build (by only using vector-typed operands for
4633     // loop-varying values) would be a scalar pointer. Thus, to ensure we
4634     // produce a vector of pointers, we need to either arbitrarily pick an
4635     // operand to broadcast, or broadcast a clone of the original GEP.
4636     // Here, we broadcast a clone of the original.
4637     //
4638     // TODO: If at some point we decide to scalarize instructions having
4639     //       loop-invariant operands, this special case will no longer be
4640     //       required. We would add the scalarization decision to
4641     //       collectLoopScalars() and teach getVectorValue() to broadcast
4642     //       the lane-zero scalar value.
4643     auto *Clone = Builder.Insert(GEP->clone());
4644     for (unsigned Part = 0; Part < UF; ++Part) {
4645       Value *EntryPart = Builder.CreateVectorSplat(VF, Clone);
4646       State.set(VPDef, EntryPart, Part);
4647       addMetadata(EntryPart, GEP);
4648     }
4649   } else {
4650     // If the GEP has at least one loop-varying operand, we are sure to
4651     // produce a vector of pointers. But if we are only unrolling, we want
4652     // to produce a scalar GEP for each unroll part. Thus, the GEP we
4653     // produce with the code below will be scalar (if VF == 1) or vector
4654     // (otherwise). Note that for the unroll-only case, we still maintain
4655     // values in the vector mapping with initVector, as we do for other
4656     // instructions.
4657     for (unsigned Part = 0; Part < UF; ++Part) {
4658       // The pointer operand of the new GEP. If it's loop-invariant, we
4659       // won't broadcast it.
4660       auto *Ptr = IsPtrLoopInvariant
4661                       ? State.get(Operands.getOperand(0), VPIteration(0, 0))
4662                       : State.get(Operands.getOperand(0), Part);
4663 
4664       // Collect all the indices for the new GEP. If any index is
4665       // loop-invariant, we won't broadcast it.
4666       SmallVector<Value *, 4> Indices;
4667       for (unsigned I = 1, E = Operands.getNumOperands(); I < E; I++) {
4668         VPValue *Operand = Operands.getOperand(I);
4669         if (IsIndexLoopInvariant[I - 1])
4670           Indices.push_back(State.get(Operand, VPIteration(0, 0)));
4671         else
4672           Indices.push_back(State.get(Operand, Part));
4673       }
4674 
4675       // Create the new GEP. Note that this GEP may be a scalar if VF == 1,
4676       // but it should be a vector, otherwise.
4677       auto *NewGEP =
4678           GEP->isInBounds()
4679               ? Builder.CreateInBoundsGEP(GEP->getSourceElementType(), Ptr,
4680                                           Indices)
4681               : Builder.CreateGEP(GEP->getSourceElementType(), Ptr, Indices);
4682       assert((VF.isScalar() || NewGEP->getType()->isVectorTy()) &&
4683              "NewGEP is not a pointer vector");
4684       State.set(VPDef, NewGEP, Part);
4685       addMetadata(NewGEP, GEP);
4686     }
4687   }
4688 }
4689 
4690 void InnerLoopVectorizer::widenPHIInstruction(Instruction *PN,
4691                                               VPWidenPHIRecipe *PhiR,
4692                                               VPTransformState &State) {
4693   PHINode *P = cast<PHINode>(PN);
4694   if (EnableVPlanNativePath) {
4695     // Currently we enter here in the VPlan-native path for non-induction
4696     // PHIs where all control flow is uniform. We simply widen these PHIs.
4697     // Create a vector phi with no operands - the vector phi operands will be
4698     // set at the end of vector code generation.
4699     Type *VecTy = (State.VF.isScalar())
4700                       ? PN->getType()
4701                       : VectorType::get(PN->getType(), State.VF);
4702     Value *VecPhi = Builder.CreatePHI(VecTy, PN->getNumOperands(), "vec.phi");
4703     State.set(PhiR, VecPhi, 0);
4704     OrigPHIsToFix.push_back(P);
4705 
4706     return;
4707   }
4708 
4709   assert(PN->getParent() == OrigLoop->getHeader() &&
4710          "Non-header phis should have been handled elsewhere");
4711 
4712   // In order to support recurrences we need to be able to vectorize Phi nodes.
4713   // Phi nodes have cycles, so we need to vectorize them in two stages. This is
4714   // stage #1: We create a new vector PHI node with no incoming edges. We'll use
4715   // this value when we vectorize all of the instructions that use the PHI.
4716 
4717   assert(!Legal->isReductionVariable(P) &&
4718          "reductions should be handled elsewhere");
4719 
4720   setDebugLocFromInst(P);
4721 
4722   // This PHINode must be an induction variable.
4723   // Make sure that we know about it.
4724   assert(Legal->getInductionVars().count(P) && "Not an induction variable");
4725 
4726   InductionDescriptor II = Legal->getInductionVars().lookup(P);
4727   const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout();
4728 
4729   // FIXME: The newly created binary instructions should contain nsw/nuw flags,
4730   // which can be found from the original scalar operations.
4731   switch (II.getKind()) {
4732   case InductionDescriptor::IK_NoInduction:
4733     llvm_unreachable("Unknown induction");
4734   case InductionDescriptor::IK_IntInduction:
4735   case InductionDescriptor::IK_FpInduction:
4736     llvm_unreachable("Integer/fp induction is handled elsewhere.");
4737   case InductionDescriptor::IK_PtrInduction: {
4738     // Handle the pointer induction variable case.
4739     assert(P->getType()->isPointerTy() && "Unexpected type.");
4740 
4741     if (Cost->isScalarAfterVectorization(P, State.VF)) {
4742       // This is the normalized GEP that starts counting at zero.
4743       Value *PtrInd =
4744           Builder.CreateSExtOrTrunc(Induction, II.getStep()->getType());
4745       // Determine the number of scalars we need to generate for each unroll
4746       // iteration. If the instruction is uniform, we only need to generate the
4747       // first lane. Otherwise, we generate all VF values.
4748       bool IsUniform = Cost->isUniformAfterVectorization(P, State.VF);
4749       unsigned Lanes = IsUniform ? 1 : State.VF.getKnownMinValue();
4750 
4751       bool NeedsVectorIndex = !IsUniform && VF.isScalable();
4752       Value *UnitStepVec = nullptr, *PtrIndSplat = nullptr;
4753       if (NeedsVectorIndex) {
4754         Type *VecIVTy = VectorType::get(PtrInd->getType(), VF);
4755         UnitStepVec = Builder.CreateStepVector(VecIVTy);
4756         PtrIndSplat = Builder.CreateVectorSplat(VF, PtrInd);
4757       }
4758 
4759       for (unsigned Part = 0; Part < UF; ++Part) {
4760         Value *PartStart = createStepForVF(
4761             Builder, ConstantInt::get(PtrInd->getType(), Part), VF);
4762 
4763         if (NeedsVectorIndex) {
4764           Value *PartStartSplat = Builder.CreateVectorSplat(VF, PartStart);
4765           Value *Indices = Builder.CreateAdd(PartStartSplat, UnitStepVec);
4766           Value *GlobalIndices = Builder.CreateAdd(PtrIndSplat, Indices);
4767           Value *SclrGep =
4768               emitTransformedIndex(Builder, GlobalIndices, PSE.getSE(), DL, II);
4769           SclrGep->setName("next.gep");
4770           State.set(PhiR, SclrGep, Part);
4771           // We've cached the whole vector, which means we can support the
4772           // extraction of any lane.
4773           continue;
4774         }
4775 
4776         for (unsigned Lane = 0; Lane < Lanes; ++Lane) {
4777           Value *Idx = Builder.CreateAdd(
4778               PartStart, ConstantInt::get(PtrInd->getType(), Lane));
4779           Value *GlobalIdx = Builder.CreateAdd(PtrInd, Idx);
4780           Value *SclrGep =
4781               emitTransformedIndex(Builder, GlobalIdx, PSE.getSE(), DL, II);
4782           SclrGep->setName("next.gep");
4783           State.set(PhiR, SclrGep, VPIteration(Part, Lane));
4784         }
4785       }
4786       return;
4787     }
4788     assert(isa<SCEVConstant>(II.getStep()) &&
4789            "Induction step not a SCEV constant!");
4790     Type *PhiType = II.getStep()->getType();
4791 
4792     // Build a pointer phi
4793     Value *ScalarStartValue = II.getStartValue();
4794     Type *ScStValueType = ScalarStartValue->getType();
4795     PHINode *NewPointerPhi =
4796         PHINode::Create(ScStValueType, 2, "pointer.phi", Induction);
4797     NewPointerPhi->addIncoming(ScalarStartValue, LoopVectorPreHeader);
4798 
4799     // A pointer induction, performed by using a gep
4800     BasicBlock *LoopLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch();
4801     Instruction *InductionLoc = LoopLatch->getTerminator();
4802     const SCEV *ScalarStep = II.getStep();
4803     SCEVExpander Exp(*PSE.getSE(), DL, "induction");
4804     Value *ScalarStepValue =
4805         Exp.expandCodeFor(ScalarStep, PhiType, InductionLoc);
4806     Value *RuntimeVF = getRuntimeVF(Builder, PhiType, VF);
4807     Value *NumUnrolledElems =
4808         Builder.CreateMul(RuntimeVF, ConstantInt::get(PhiType, State.UF));
4809     Value *InductionGEP = GetElementPtrInst::Create(
4810         ScStValueType->getPointerElementType(), NewPointerPhi,
4811         Builder.CreateMul(ScalarStepValue, NumUnrolledElems), "ptr.ind",
4812         InductionLoc);
4813     NewPointerPhi->addIncoming(InductionGEP, LoopLatch);
4814 
4815     // Create UF many actual address geps that use the pointer
4816     // phi as base and a vectorized version of the step value
4817     // (<step*0, ..., step*N>) as offset.
4818     for (unsigned Part = 0; Part < State.UF; ++Part) {
4819       Type *VecPhiType = VectorType::get(PhiType, State.VF);
4820       Value *StartOffsetScalar =
4821           Builder.CreateMul(RuntimeVF, ConstantInt::get(PhiType, Part));
4822       Value *StartOffset =
4823           Builder.CreateVectorSplat(State.VF, StartOffsetScalar);
4824       // Create a vector of consecutive numbers from zero to VF.
4825       StartOffset =
4826           Builder.CreateAdd(StartOffset, Builder.CreateStepVector(VecPhiType));
4827 
4828       Value *GEP = Builder.CreateGEP(
4829           ScStValueType->getPointerElementType(), NewPointerPhi,
4830           Builder.CreateMul(
4831               StartOffset, Builder.CreateVectorSplat(State.VF, ScalarStepValue),
4832               "vector.gep"));
4833       State.set(PhiR, GEP, Part);
4834     }
4835   }
4836   }
4837 }
4838 
4839 /// A helper function for checking whether an integer division-related
4840 /// instruction may divide by zero (in which case it must be predicated if
4841 /// executed conditionally in the scalar code).
4842 /// TODO: It may be worthwhile to generalize and check isKnownNonZero().
4843 /// Non-zero divisors that are non compile-time constants will not be
4844 /// converted into multiplication, so we will still end up scalarizing
4845 /// the division, but can do so w/o predication.
4846 static bool mayDivideByZero(Instruction &I) {
4847   assert((I.getOpcode() == Instruction::UDiv ||
4848           I.getOpcode() == Instruction::SDiv ||
4849           I.getOpcode() == Instruction::URem ||
4850           I.getOpcode() == Instruction::SRem) &&
4851          "Unexpected instruction");
4852   Value *Divisor = I.getOperand(1);
4853   auto *CInt = dyn_cast<ConstantInt>(Divisor);
4854   return !CInt || CInt->isZero();
4855 }
4856 
4857 void InnerLoopVectorizer::widenInstruction(Instruction &I, VPValue *Def,
4858                                            VPUser &User,
4859                                            VPTransformState &State) {
4860   switch (I.getOpcode()) {
4861   case Instruction::Call:
4862   case Instruction::Br:
4863   case Instruction::PHI:
4864   case Instruction::GetElementPtr:
4865   case Instruction::Select:
4866     llvm_unreachable("This instruction is handled by a different recipe.");
4867   case Instruction::UDiv:
4868   case Instruction::SDiv:
4869   case Instruction::SRem:
4870   case Instruction::URem:
4871   case Instruction::Add:
4872   case Instruction::FAdd:
4873   case Instruction::Sub:
4874   case Instruction::FSub:
4875   case Instruction::FNeg:
4876   case Instruction::Mul:
4877   case Instruction::FMul:
4878   case Instruction::FDiv:
4879   case Instruction::FRem:
4880   case Instruction::Shl:
4881   case Instruction::LShr:
4882   case Instruction::AShr:
4883   case Instruction::And:
4884   case Instruction::Or:
4885   case Instruction::Xor: {
4886     // Just widen unops and binops.
4887     setDebugLocFromInst(&I);
4888 
4889     for (unsigned Part = 0; Part < UF; ++Part) {
4890       SmallVector<Value *, 2> Ops;
4891       for (VPValue *VPOp : User.operands())
4892         Ops.push_back(State.get(VPOp, Part));
4893 
4894       Value *V = Builder.CreateNAryOp(I.getOpcode(), Ops);
4895 
4896       if (auto *VecOp = dyn_cast<Instruction>(V))
4897         VecOp->copyIRFlags(&I);
4898 
4899       // Use this vector value for all users of the original instruction.
4900       State.set(Def, V, Part);
4901       addMetadata(V, &I);
4902     }
4903 
4904     break;
4905   }
4906   case Instruction::ICmp:
4907   case Instruction::FCmp: {
4908     // Widen compares. Generate vector compares.
4909     bool FCmp = (I.getOpcode() == Instruction::FCmp);
4910     auto *Cmp = cast<CmpInst>(&I);
4911     setDebugLocFromInst(Cmp);
4912     for (unsigned Part = 0; Part < UF; ++Part) {
4913       Value *A = State.get(User.getOperand(0), Part);
4914       Value *B = State.get(User.getOperand(1), Part);
4915       Value *C = nullptr;
4916       if (FCmp) {
4917         // Propagate fast math flags.
4918         IRBuilder<>::FastMathFlagGuard FMFG(Builder);
4919         Builder.setFastMathFlags(Cmp->getFastMathFlags());
4920         C = Builder.CreateFCmp(Cmp->getPredicate(), A, B);
4921       } else {
4922         C = Builder.CreateICmp(Cmp->getPredicate(), A, B);
4923       }
4924       State.set(Def, C, Part);
4925       addMetadata(C, &I);
4926     }
4927 
4928     break;
4929   }
4930 
4931   case Instruction::ZExt:
4932   case Instruction::SExt:
4933   case Instruction::FPToUI:
4934   case Instruction::FPToSI:
4935   case Instruction::FPExt:
4936   case Instruction::PtrToInt:
4937   case Instruction::IntToPtr:
4938   case Instruction::SIToFP:
4939   case Instruction::UIToFP:
4940   case Instruction::Trunc:
4941   case Instruction::FPTrunc:
4942   case Instruction::BitCast: {
4943     auto *CI = cast<CastInst>(&I);
4944     setDebugLocFromInst(CI);
4945 
4946     /// Vectorize casts.
4947     Type *DestTy =
4948         (VF.isScalar()) ? CI->getType() : VectorType::get(CI->getType(), VF);
4949 
4950     for (unsigned Part = 0; Part < UF; ++Part) {
4951       Value *A = State.get(User.getOperand(0), Part);
4952       Value *Cast = Builder.CreateCast(CI->getOpcode(), A, DestTy);
4953       State.set(Def, Cast, Part);
4954       addMetadata(Cast, &I);
4955     }
4956     break;
4957   }
4958   default:
4959     // This instruction is not vectorized by simple widening.
4960     LLVM_DEBUG(dbgs() << "LV: Found an unhandled instruction: " << I);
4961     llvm_unreachable("Unhandled instruction!");
4962   } // end of switch.
4963 }
4964 
4965 void InnerLoopVectorizer::widenCallInstruction(CallInst &I, VPValue *Def,
4966                                                VPUser &ArgOperands,
4967                                                VPTransformState &State) {
4968   assert(!isa<DbgInfoIntrinsic>(I) &&
4969          "DbgInfoIntrinsic should have been dropped during VPlan construction");
4970   setDebugLocFromInst(&I);
4971 
4972   Module *M = I.getParent()->getParent()->getParent();
4973   auto *CI = cast<CallInst>(&I);
4974 
4975   SmallVector<Type *, 4> Tys;
4976   for (Value *ArgOperand : CI->arg_operands())
4977     Tys.push_back(ToVectorTy(ArgOperand->getType(), VF.getKnownMinValue()));
4978 
4979   Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
4980 
4981   // The flag shows whether we use Intrinsic or a usual Call for vectorized
4982   // version of the instruction.
4983   // Is it beneficial to perform intrinsic call compared to lib call?
4984   bool NeedToScalarize = false;
4985   InstructionCost CallCost = Cost->getVectorCallCost(CI, VF, NeedToScalarize);
4986   InstructionCost IntrinsicCost = ID ? Cost->getVectorIntrinsicCost(CI, VF) : 0;
4987   bool UseVectorIntrinsic = ID && IntrinsicCost <= CallCost;
4988   assert((UseVectorIntrinsic || !NeedToScalarize) &&
4989          "Instruction should be scalarized elsewhere.");
4990   assert((IntrinsicCost.isValid() || CallCost.isValid()) &&
4991          "Either the intrinsic cost or vector call cost must be valid");
4992 
4993   for (unsigned Part = 0; Part < UF; ++Part) {
4994     SmallVector<Type *, 2> TysForDecl = {CI->getType()};
4995     SmallVector<Value *, 4> Args;
4996     for (auto &I : enumerate(ArgOperands.operands())) {
4997       // Some intrinsics have a scalar argument - don't replace it with a
4998       // vector.
4999       Value *Arg;
5000       if (!UseVectorIntrinsic || !hasVectorInstrinsicScalarOpd(ID, I.index()))
5001         Arg = State.get(I.value(), Part);
5002       else {
5003         Arg = State.get(I.value(), VPIteration(0, 0));
5004         if (hasVectorInstrinsicOverloadedScalarOpd(ID, I.index()))
5005           TysForDecl.push_back(Arg->getType());
5006       }
5007       Args.push_back(Arg);
5008     }
5009 
5010     Function *VectorF;
5011     if (UseVectorIntrinsic) {
5012       // Use vector version of the intrinsic.
5013       if (VF.isVector())
5014         TysForDecl[0] = VectorType::get(CI->getType()->getScalarType(), VF);
5015       VectorF = Intrinsic::getDeclaration(M, ID, TysForDecl);
5016       assert(VectorF && "Can't retrieve vector intrinsic.");
5017     } else {
5018       // Use vector version of the function call.
5019       const VFShape Shape = VFShape::get(*CI, VF, false /*HasGlobalPred*/);
5020 #ifndef NDEBUG
5021       assert(VFDatabase(*CI).getVectorizedFunction(Shape) != nullptr &&
5022              "Can't create vector function.");
5023 #endif
5024         VectorF = VFDatabase(*CI).getVectorizedFunction(Shape);
5025     }
5026       SmallVector<OperandBundleDef, 1> OpBundles;
5027       CI->getOperandBundlesAsDefs(OpBundles);
5028       CallInst *V = Builder.CreateCall(VectorF, Args, OpBundles);
5029 
5030       if (isa<FPMathOperator>(V))
5031         V->copyFastMathFlags(CI);
5032 
5033       State.set(Def, V, Part);
5034       addMetadata(V, &I);
5035   }
5036 }
5037 
5038 void InnerLoopVectorizer::widenSelectInstruction(SelectInst &I, VPValue *VPDef,
5039                                                  VPUser &Operands,
5040                                                  bool InvariantCond,
5041                                                  VPTransformState &State) {
5042   setDebugLocFromInst(&I);
5043 
5044   // The condition can be loop invariant  but still defined inside the
5045   // loop. This means that we can't just use the original 'cond' value.
5046   // We have to take the 'vectorized' value and pick the first lane.
5047   // Instcombine will make this a no-op.
5048   auto *InvarCond = InvariantCond
5049                         ? State.get(Operands.getOperand(0), VPIteration(0, 0))
5050                         : nullptr;
5051 
5052   for (unsigned Part = 0; Part < UF; ++Part) {
5053     Value *Cond =
5054         InvarCond ? InvarCond : State.get(Operands.getOperand(0), Part);
5055     Value *Op0 = State.get(Operands.getOperand(1), Part);
5056     Value *Op1 = State.get(Operands.getOperand(2), Part);
5057     Value *Sel = Builder.CreateSelect(Cond, Op0, Op1);
5058     State.set(VPDef, Sel, Part);
5059     addMetadata(Sel, &I);
5060   }
5061 }
5062 
5063 void LoopVectorizationCostModel::collectLoopScalars(ElementCount VF) {
5064   // We should not collect Scalars more than once per VF. Right now, this
5065   // function is called from collectUniformsAndScalars(), which already does
5066   // this check. Collecting Scalars for VF=1 does not make any sense.
5067   assert(VF.isVector() && Scalars.find(VF) == Scalars.end() &&
5068          "This function should not be visited twice for the same VF");
5069 
5070   SmallSetVector<Instruction *, 8> Worklist;
5071 
5072   // These sets are used to seed the analysis with pointers used by memory
5073   // accesses that will remain scalar.
5074   SmallSetVector<Instruction *, 8> ScalarPtrs;
5075   SmallPtrSet<Instruction *, 8> PossibleNonScalarPtrs;
5076   auto *Latch = TheLoop->getLoopLatch();
5077 
5078   // A helper that returns true if the use of Ptr by MemAccess will be scalar.
5079   // The pointer operands of loads and stores will be scalar as long as the
5080   // memory access is not a gather or scatter operation. The value operand of a
5081   // store will remain scalar if the store is scalarized.
5082   auto isScalarUse = [&](Instruction *MemAccess, Value *Ptr) {
5083     InstWidening WideningDecision = getWideningDecision(MemAccess, VF);
5084     assert(WideningDecision != CM_Unknown &&
5085            "Widening decision should be ready at this moment");
5086     if (auto *Store = dyn_cast<StoreInst>(MemAccess))
5087       if (Ptr == Store->getValueOperand())
5088         return WideningDecision == CM_Scalarize;
5089     assert(Ptr == getLoadStorePointerOperand(MemAccess) &&
5090            "Ptr is neither a value or pointer operand");
5091     return WideningDecision != CM_GatherScatter;
5092   };
5093 
5094   // A helper that returns true if the given value is a bitcast or
5095   // getelementptr instruction contained in the loop.
5096   auto isLoopVaryingBitCastOrGEP = [&](Value *V) {
5097     return ((isa<BitCastInst>(V) && V->getType()->isPointerTy()) ||
5098             isa<GetElementPtrInst>(V)) &&
5099            !TheLoop->isLoopInvariant(V);
5100   };
5101 
5102   auto isScalarPtrInduction = [&](Instruction *MemAccess, Value *Ptr) {
5103     if (!isa<PHINode>(Ptr) ||
5104         !Legal->getInductionVars().count(cast<PHINode>(Ptr)))
5105       return false;
5106     auto &Induction = Legal->getInductionVars()[cast<PHINode>(Ptr)];
5107     if (Induction.getKind() != InductionDescriptor::IK_PtrInduction)
5108       return false;
5109     return isScalarUse(MemAccess, Ptr);
5110   };
5111 
5112   // A helper that evaluates a memory access's use of a pointer. If the
5113   // pointer is actually the pointer induction of a loop, it is being
5114   // inserted into Worklist. If the use will be a scalar use, and the
5115   // pointer is only used by memory accesses, we place the pointer in
5116   // ScalarPtrs. Otherwise, the pointer is placed in PossibleNonScalarPtrs.
5117   auto evaluatePtrUse = [&](Instruction *MemAccess, Value *Ptr) {
5118     if (isScalarPtrInduction(MemAccess, Ptr)) {
5119       Worklist.insert(cast<Instruction>(Ptr));
5120       LLVM_DEBUG(dbgs() << "LV: Found new scalar instruction: " << *Ptr
5121                         << "\n");
5122 
5123       Instruction *Update = cast<Instruction>(
5124           cast<PHINode>(Ptr)->getIncomingValueForBlock(Latch));
5125       ScalarPtrs.insert(Update);
5126       return;
5127     }
5128     // We only care about bitcast and getelementptr instructions contained in
5129     // the loop.
5130     if (!isLoopVaryingBitCastOrGEP(Ptr))
5131       return;
5132 
5133     // If the pointer has already been identified as scalar (e.g., if it was
5134     // also identified as uniform), there's nothing to do.
5135     auto *I = cast<Instruction>(Ptr);
5136     if (Worklist.count(I))
5137       return;
5138 
5139     // If the use of the pointer will be a scalar use, and all users of the
5140     // pointer are memory accesses, place the pointer in ScalarPtrs. Otherwise,
5141     // place the pointer in PossibleNonScalarPtrs.
5142     if (isScalarUse(MemAccess, Ptr) && llvm::all_of(I->users(), [&](User *U) {
5143           return isa<LoadInst>(U) || isa<StoreInst>(U);
5144         }))
5145       ScalarPtrs.insert(I);
5146     else
5147       PossibleNonScalarPtrs.insert(I);
5148   };
5149 
5150   // We seed the scalars analysis with three classes of instructions: (1)
5151   // instructions marked uniform-after-vectorization and (2) bitcast,
5152   // getelementptr and (pointer) phi instructions used by memory accesses
5153   // requiring a scalar use.
5154   //
5155   // (1) Add to the worklist all instructions that have been identified as
5156   // uniform-after-vectorization.
5157   Worklist.insert(Uniforms[VF].begin(), Uniforms[VF].end());
5158 
5159   // (2) Add to the worklist all bitcast and getelementptr instructions used by
5160   // memory accesses requiring a scalar use. The pointer operands of loads and
5161   // stores will be scalar as long as the memory accesses is not a gather or
5162   // scatter operation. The value operand of a store will remain scalar if the
5163   // store is scalarized.
5164   for (auto *BB : TheLoop->blocks())
5165     for (auto &I : *BB) {
5166       if (auto *Load = dyn_cast<LoadInst>(&I)) {
5167         evaluatePtrUse(Load, Load->getPointerOperand());
5168       } else if (auto *Store = dyn_cast<StoreInst>(&I)) {
5169         evaluatePtrUse(Store, Store->getPointerOperand());
5170         evaluatePtrUse(Store, Store->getValueOperand());
5171       }
5172     }
5173   for (auto *I : ScalarPtrs)
5174     if (!PossibleNonScalarPtrs.count(I)) {
5175       LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *I << "\n");
5176       Worklist.insert(I);
5177     }
5178 
5179   // Insert the forced scalars.
5180   // FIXME: Currently widenPHIInstruction() often creates a dead vector
5181   // induction variable when the PHI user is scalarized.
5182   auto ForcedScalar = ForcedScalars.find(VF);
5183   if (ForcedScalar != ForcedScalars.end())
5184     for (auto *I : ForcedScalar->second)
5185       Worklist.insert(I);
5186 
5187   // Expand the worklist by looking through any bitcasts and getelementptr
5188   // instructions we've already identified as scalar. This is similar to the
5189   // expansion step in collectLoopUniforms(); however, here we're only
5190   // expanding to include additional bitcasts and getelementptr instructions.
5191   unsigned Idx = 0;
5192   while (Idx != Worklist.size()) {
5193     Instruction *Dst = Worklist[Idx++];
5194     if (!isLoopVaryingBitCastOrGEP(Dst->getOperand(0)))
5195       continue;
5196     auto *Src = cast<Instruction>(Dst->getOperand(0));
5197     if (llvm::all_of(Src->users(), [&](User *U) -> bool {
5198           auto *J = cast<Instruction>(U);
5199           return !TheLoop->contains(J) || Worklist.count(J) ||
5200                  ((isa<LoadInst>(J) || isa<StoreInst>(J)) &&
5201                   isScalarUse(J, Src));
5202         })) {
5203       Worklist.insert(Src);
5204       LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Src << "\n");
5205     }
5206   }
5207 
5208   // An induction variable will remain scalar if all users of the induction
5209   // variable and induction variable update remain scalar.
5210   for (auto &Induction : Legal->getInductionVars()) {
5211     auto *Ind = Induction.first;
5212     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
5213 
5214     // If tail-folding is applied, the primary induction variable will be used
5215     // to feed a vector compare.
5216     if (Ind == Legal->getPrimaryInduction() && foldTailByMasking())
5217       continue;
5218 
5219     // Determine if all users of the induction variable are scalar after
5220     // vectorization.
5221     auto ScalarInd = llvm::all_of(Ind->users(), [&](User *U) -> bool {
5222       auto *I = cast<Instruction>(U);
5223       return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I);
5224     });
5225     if (!ScalarInd)
5226       continue;
5227 
5228     // Determine if all users of the induction variable update instruction are
5229     // scalar after vectorization.
5230     auto ScalarIndUpdate =
5231         llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
5232           auto *I = cast<Instruction>(U);
5233           return I == Ind || !TheLoop->contains(I) || Worklist.count(I);
5234         });
5235     if (!ScalarIndUpdate)
5236       continue;
5237 
5238     // The induction variable and its update instruction will remain scalar.
5239     Worklist.insert(Ind);
5240     Worklist.insert(IndUpdate);
5241     LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n");
5242     LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate
5243                       << "\n");
5244   }
5245 
5246   Scalars[VF].insert(Worklist.begin(), Worklist.end());
5247 }
5248 
5249 bool LoopVectorizationCostModel::isScalarWithPredication(Instruction *I) const {
5250   if (!blockNeedsPredication(I->getParent()))
5251     return false;
5252   switch(I->getOpcode()) {
5253   default:
5254     break;
5255   case Instruction::Load:
5256   case Instruction::Store: {
5257     if (!Legal->isMaskRequired(I))
5258       return false;
5259     auto *Ptr = getLoadStorePointerOperand(I);
5260     auto *Ty = getLoadStoreType(I);
5261     const Align Alignment = getLoadStoreAlignment(I);
5262     return isa<LoadInst>(I) ? !(isLegalMaskedLoad(Ty, Ptr, Alignment) ||
5263                                 TTI.isLegalMaskedGather(Ty, Alignment))
5264                             : !(isLegalMaskedStore(Ty, Ptr, Alignment) ||
5265                                 TTI.isLegalMaskedScatter(Ty, Alignment));
5266   }
5267   case Instruction::UDiv:
5268   case Instruction::SDiv:
5269   case Instruction::SRem:
5270   case Instruction::URem:
5271     return mayDivideByZero(*I);
5272   }
5273   return false;
5274 }
5275 
5276 bool LoopVectorizationCostModel::interleavedAccessCanBeWidened(
5277     Instruction *I, ElementCount VF) {
5278   assert(isAccessInterleaved(I) && "Expecting interleaved access.");
5279   assert(getWideningDecision(I, VF) == CM_Unknown &&
5280          "Decision should not be set yet.");
5281   auto *Group = getInterleavedAccessGroup(I);
5282   assert(Group && "Must have a group.");
5283 
5284   // If the instruction's allocated size doesn't equal it's type size, it
5285   // requires padding and will be scalarized.
5286   auto &DL = I->getModule()->getDataLayout();
5287   auto *ScalarTy = getLoadStoreType(I);
5288   if (hasIrregularType(ScalarTy, DL))
5289     return false;
5290 
5291   // Check if masking is required.
5292   // A Group may need masking for one of two reasons: it resides in a block that
5293   // needs predication, or it was decided to use masking to deal with gaps
5294   // (either a gap at the end of a load-access that may result in a speculative
5295   // load, or any gaps in a store-access).
5296   bool PredicatedAccessRequiresMasking =
5297       Legal->blockNeedsPredication(I->getParent()) && Legal->isMaskRequired(I);
5298   bool LoadAccessWithGapsRequiresEpilogMasking =
5299       isa<LoadInst>(I) && Group->requiresScalarEpilogue() &&
5300       !isScalarEpilogueAllowed();
5301   bool StoreAccessWithGapsRequiresMasking =
5302       isa<StoreInst>(I) && (Group->getNumMembers() < Group->getFactor());
5303   if (!PredicatedAccessRequiresMasking &&
5304       !LoadAccessWithGapsRequiresEpilogMasking &&
5305       !StoreAccessWithGapsRequiresMasking)
5306     return true;
5307 
5308   // If masked interleaving is required, we expect that the user/target had
5309   // enabled it, because otherwise it either wouldn't have been created or
5310   // it should have been invalidated by the CostModel.
5311   assert(useMaskedInterleavedAccesses(TTI) &&
5312          "Masked interleave-groups for predicated accesses are not enabled.");
5313 
5314   auto *Ty = getLoadStoreType(I);
5315   const Align Alignment = getLoadStoreAlignment(I);
5316   return isa<LoadInst>(I) ? TTI.isLegalMaskedLoad(Ty, Alignment)
5317                           : TTI.isLegalMaskedStore(Ty, Alignment);
5318 }
5319 
5320 bool LoopVectorizationCostModel::memoryInstructionCanBeWidened(
5321     Instruction *I, ElementCount VF) {
5322   // Get and ensure we have a valid memory instruction.
5323   LoadInst *LI = dyn_cast<LoadInst>(I);
5324   StoreInst *SI = dyn_cast<StoreInst>(I);
5325   assert((LI || SI) && "Invalid memory instruction");
5326 
5327   auto *Ptr = getLoadStorePointerOperand(I);
5328 
5329   // In order to be widened, the pointer should be consecutive, first of all.
5330   if (!Legal->isConsecutivePtr(Ptr))
5331     return false;
5332 
5333   // If the instruction is a store located in a predicated block, it will be
5334   // scalarized.
5335   if (isScalarWithPredication(I))
5336     return false;
5337 
5338   // If the instruction's allocated size doesn't equal it's type size, it
5339   // requires padding and will be scalarized.
5340   auto &DL = I->getModule()->getDataLayout();
5341   auto *ScalarTy = LI ? LI->getType() : SI->getValueOperand()->getType();
5342   if (hasIrregularType(ScalarTy, DL))
5343     return false;
5344 
5345   return true;
5346 }
5347 
5348 void LoopVectorizationCostModel::collectLoopUniforms(ElementCount VF) {
5349   // We should not collect Uniforms more than once per VF. Right now,
5350   // this function is called from collectUniformsAndScalars(), which
5351   // already does this check. Collecting Uniforms for VF=1 does not make any
5352   // sense.
5353 
5354   assert(VF.isVector() && Uniforms.find(VF) == Uniforms.end() &&
5355          "This function should not be visited twice for the same VF");
5356 
5357   // Visit the list of Uniforms. If we'll not find any uniform value, we'll
5358   // not analyze again.  Uniforms.count(VF) will return 1.
5359   Uniforms[VF].clear();
5360 
5361   // We now know that the loop is vectorizable!
5362   // Collect instructions inside the loop that will remain uniform after
5363   // vectorization.
5364 
5365   // Global values, params and instructions outside of current loop are out of
5366   // scope.
5367   auto isOutOfScope = [&](Value *V) -> bool {
5368     Instruction *I = dyn_cast<Instruction>(V);
5369     return (!I || !TheLoop->contains(I));
5370   };
5371 
5372   SetVector<Instruction *> Worklist;
5373   BasicBlock *Latch = TheLoop->getLoopLatch();
5374 
5375   // Instructions that are scalar with predication must not be considered
5376   // uniform after vectorization, because that would create an erroneous
5377   // replicating region where only a single instance out of VF should be formed.
5378   // TODO: optimize such seldom cases if found important, see PR40816.
5379   auto addToWorklistIfAllowed = [&](Instruction *I) -> void {
5380     if (isOutOfScope(I)) {
5381       LLVM_DEBUG(dbgs() << "LV: Found not uniform due to scope: "
5382                         << *I << "\n");
5383       return;
5384     }
5385     if (isScalarWithPredication(I)) {
5386       LLVM_DEBUG(dbgs() << "LV: Found not uniform being ScalarWithPredication: "
5387                         << *I << "\n");
5388       return;
5389     }
5390     LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *I << "\n");
5391     Worklist.insert(I);
5392   };
5393 
5394   // Start with the conditional branch. If the branch condition is an
5395   // instruction contained in the loop that is only used by the branch, it is
5396   // uniform.
5397   auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0));
5398   if (Cmp && TheLoop->contains(Cmp) && Cmp->hasOneUse())
5399     addToWorklistIfAllowed(Cmp);
5400 
5401   auto isUniformDecision = [&](Instruction *I, ElementCount VF) {
5402     InstWidening WideningDecision = getWideningDecision(I, VF);
5403     assert(WideningDecision != CM_Unknown &&
5404            "Widening decision should be ready at this moment");
5405 
5406     // A uniform memory op is itself uniform.  We exclude uniform stores
5407     // here as they demand the last lane, not the first one.
5408     if (isa<LoadInst>(I) && Legal->isUniformMemOp(*I)) {
5409       assert(WideningDecision == CM_Scalarize);
5410       return true;
5411     }
5412 
5413     return (WideningDecision == CM_Widen ||
5414             WideningDecision == CM_Widen_Reverse ||
5415             WideningDecision == CM_Interleave);
5416   };
5417 
5418 
5419   // Returns true if Ptr is the pointer operand of a memory access instruction
5420   // I, and I is known to not require scalarization.
5421   auto isVectorizedMemAccessUse = [&](Instruction *I, Value *Ptr) -> bool {
5422     return getLoadStorePointerOperand(I) == Ptr && isUniformDecision(I, VF);
5423   };
5424 
5425   // Holds a list of values which are known to have at least one uniform use.
5426   // Note that there may be other uses which aren't uniform.  A "uniform use"
5427   // here is something which only demands lane 0 of the unrolled iterations;
5428   // it does not imply that all lanes produce the same value (e.g. this is not
5429   // the usual meaning of uniform)
5430   SetVector<Value *> HasUniformUse;
5431 
5432   // Scan the loop for instructions which are either a) known to have only
5433   // lane 0 demanded or b) are uses which demand only lane 0 of their operand.
5434   for (auto *BB : TheLoop->blocks())
5435     for (auto &I : *BB) {
5436       if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(&I)) {
5437         switch (II->getIntrinsicID()) {
5438         case Intrinsic::sideeffect:
5439         case Intrinsic::experimental_noalias_scope_decl:
5440         case Intrinsic::assume:
5441         case Intrinsic::lifetime_start:
5442         case Intrinsic::lifetime_end:
5443           if (TheLoop->hasLoopInvariantOperands(&I))
5444             addToWorklistIfAllowed(&I);
5445           break;
5446         default:
5447           break;
5448         }
5449       }
5450 
5451       // ExtractValue instructions must be uniform, because the operands are
5452       // known to be loop-invariant.
5453       if (auto *EVI = dyn_cast<ExtractValueInst>(&I)) {
5454         assert(isOutOfScope(EVI->getAggregateOperand()) &&
5455                "Expected aggregate value to be loop invariant");
5456         addToWorklistIfAllowed(EVI);
5457         continue;
5458       }
5459 
5460       // If there's no pointer operand, there's nothing to do.
5461       auto *Ptr = getLoadStorePointerOperand(&I);
5462       if (!Ptr)
5463         continue;
5464 
5465       // A uniform memory op is itself uniform.  We exclude uniform stores
5466       // here as they demand the last lane, not the first one.
5467       if (isa<LoadInst>(I) && Legal->isUniformMemOp(I))
5468         addToWorklistIfAllowed(&I);
5469 
5470       if (isUniformDecision(&I, VF)) {
5471         assert(isVectorizedMemAccessUse(&I, Ptr) && "consistency check");
5472         HasUniformUse.insert(Ptr);
5473       }
5474     }
5475 
5476   // Add to the worklist any operands which have *only* uniform (e.g. lane 0
5477   // demanding) users.  Since loops are assumed to be in LCSSA form, this
5478   // disallows uses outside the loop as well.
5479   for (auto *V : HasUniformUse) {
5480     if (isOutOfScope(V))
5481       continue;
5482     auto *I = cast<Instruction>(V);
5483     auto UsersAreMemAccesses =
5484       llvm::all_of(I->users(), [&](User *U) -> bool {
5485         return isVectorizedMemAccessUse(cast<Instruction>(U), V);
5486       });
5487     if (UsersAreMemAccesses)
5488       addToWorklistIfAllowed(I);
5489   }
5490 
5491   // Expand Worklist in topological order: whenever a new instruction
5492   // is added , its users should be already inside Worklist.  It ensures
5493   // a uniform instruction will only be used by uniform instructions.
5494   unsigned idx = 0;
5495   while (idx != Worklist.size()) {
5496     Instruction *I = Worklist[idx++];
5497 
5498     for (auto OV : I->operand_values()) {
5499       // isOutOfScope operands cannot be uniform instructions.
5500       if (isOutOfScope(OV))
5501         continue;
5502       // First order recurrence Phi's should typically be considered
5503       // non-uniform.
5504       auto *OP = dyn_cast<PHINode>(OV);
5505       if (OP && Legal->isFirstOrderRecurrence(OP))
5506         continue;
5507       // If all the users of the operand are uniform, then add the
5508       // operand into the uniform worklist.
5509       auto *OI = cast<Instruction>(OV);
5510       if (llvm::all_of(OI->users(), [&](User *U) -> bool {
5511             auto *J = cast<Instruction>(U);
5512             return Worklist.count(J) || isVectorizedMemAccessUse(J, OI);
5513           }))
5514         addToWorklistIfAllowed(OI);
5515     }
5516   }
5517 
5518   // For an instruction to be added into Worklist above, all its users inside
5519   // the loop should also be in Worklist. However, this condition cannot be
5520   // true for phi nodes that form a cyclic dependence. We must process phi
5521   // nodes separately. An induction variable will remain uniform if all users
5522   // of the induction variable and induction variable update remain uniform.
5523   // The code below handles both pointer and non-pointer induction variables.
5524   for (auto &Induction : Legal->getInductionVars()) {
5525     auto *Ind = Induction.first;
5526     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
5527 
5528     // Determine if all users of the induction variable are uniform after
5529     // vectorization.
5530     auto UniformInd = llvm::all_of(Ind->users(), [&](User *U) -> bool {
5531       auto *I = cast<Instruction>(U);
5532       return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) ||
5533              isVectorizedMemAccessUse(I, Ind);
5534     });
5535     if (!UniformInd)
5536       continue;
5537 
5538     // Determine if all users of the induction variable update instruction are
5539     // uniform after vectorization.
5540     auto UniformIndUpdate =
5541         llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
5542           auto *I = cast<Instruction>(U);
5543           return I == Ind || !TheLoop->contains(I) || Worklist.count(I) ||
5544                  isVectorizedMemAccessUse(I, IndUpdate);
5545         });
5546     if (!UniformIndUpdate)
5547       continue;
5548 
5549     // The induction variable and its update instruction will remain uniform.
5550     addToWorklistIfAllowed(Ind);
5551     addToWorklistIfAllowed(IndUpdate);
5552   }
5553 
5554   Uniforms[VF].insert(Worklist.begin(), Worklist.end());
5555 }
5556 
5557 bool LoopVectorizationCostModel::runtimeChecksRequired() {
5558   LLVM_DEBUG(dbgs() << "LV: Performing code size checks.\n");
5559 
5560   if (Legal->getRuntimePointerChecking()->Need) {
5561     reportVectorizationFailure("Runtime ptr check is required with -Os/-Oz",
5562         "runtime pointer checks needed. Enable vectorization of this "
5563         "loop with '#pragma clang loop vectorize(enable)' when "
5564         "compiling with -Os/-Oz",
5565         "CantVersionLoopWithOptForSize", ORE, TheLoop);
5566     return true;
5567   }
5568 
5569   if (!PSE.getUnionPredicate().getPredicates().empty()) {
5570     reportVectorizationFailure("Runtime SCEV check is required with -Os/-Oz",
5571         "runtime SCEV checks needed. Enable vectorization of this "
5572         "loop with '#pragma clang loop vectorize(enable)' when "
5573         "compiling with -Os/-Oz",
5574         "CantVersionLoopWithOptForSize", ORE, TheLoop);
5575     return true;
5576   }
5577 
5578   // FIXME: Avoid specializing for stride==1 instead of bailing out.
5579   if (!Legal->getLAI()->getSymbolicStrides().empty()) {
5580     reportVectorizationFailure("Runtime stride check for small trip count",
5581         "runtime stride == 1 checks needed. Enable vectorization of "
5582         "this loop without such check by compiling with -Os/-Oz",
5583         "CantVersionLoopWithOptForSize", ORE, TheLoop);
5584     return true;
5585   }
5586 
5587   return false;
5588 }
5589 
5590 ElementCount
5591 LoopVectorizationCostModel::getMaxLegalScalableVF(unsigned MaxSafeElements) {
5592   if (!TTI.supportsScalableVectors() && !ForceTargetSupportsScalableVectors)
5593     return ElementCount::getScalable(0);
5594 
5595   if (Hints->isScalableVectorizationDisabled()) {
5596     reportVectorizationInfo("Scalable vectorization is explicitly disabled",
5597                             "ScalableVectorizationDisabled", ORE, TheLoop);
5598     return ElementCount::getScalable(0);
5599   }
5600 
5601   LLVM_DEBUG(dbgs() << "LV: Scalable vectorization is available\n");
5602 
5603   auto MaxScalableVF = ElementCount::getScalable(
5604       std::numeric_limits<ElementCount::ScalarTy>::max());
5605 
5606   // Test that the loop-vectorizer can legalize all operations for this MaxVF.
5607   // FIXME: While for scalable vectors this is currently sufficient, this should
5608   // be replaced by a more detailed mechanism that filters out specific VFs,
5609   // instead of invalidating vectorization for a whole set of VFs based on the
5610   // MaxVF.
5611 
5612   // Disable scalable vectorization if the loop contains unsupported reductions.
5613   if (!canVectorizeReductions(MaxScalableVF)) {
5614     reportVectorizationInfo(
5615         "Scalable vectorization not supported for the reduction "
5616         "operations found in this loop.",
5617         "ScalableVFUnfeasible", ORE, TheLoop);
5618     return ElementCount::getScalable(0);
5619   }
5620 
5621   // Disable scalable vectorization if the loop contains any instructions
5622   // with element types not supported for scalable vectors.
5623   if (any_of(ElementTypesInLoop, [&](Type *Ty) {
5624         return !Ty->isVoidTy() &&
5625                !this->TTI.isElementTypeLegalForScalableVector(Ty);
5626       })) {
5627     reportVectorizationInfo("Scalable vectorization is not supported "
5628                             "for all element types found in this loop.",
5629                             "ScalableVFUnfeasible", ORE, TheLoop);
5630     return ElementCount::getScalable(0);
5631   }
5632 
5633   if (Legal->isSafeForAnyVectorWidth())
5634     return MaxScalableVF;
5635 
5636   // Limit MaxScalableVF by the maximum safe dependence distance.
5637   Optional<unsigned> MaxVScale = TTI.getMaxVScale();
5638   if (!MaxVScale && TheFunction->hasFnAttribute(Attribute::VScaleRange)) {
5639     unsigned VScaleMax = TheFunction->getFnAttribute(Attribute::VScaleRange)
5640                              .getVScaleRangeArgs()
5641                              .second;
5642     if (VScaleMax > 0)
5643       MaxVScale = VScaleMax;
5644   }
5645   MaxScalableVF = ElementCount::getScalable(
5646       MaxVScale ? (MaxSafeElements / MaxVScale.getValue()) : 0);
5647   if (!MaxScalableVF)
5648     reportVectorizationInfo(
5649         "Max legal vector width too small, scalable vectorization "
5650         "unfeasible.",
5651         "ScalableVFUnfeasible", ORE, TheLoop);
5652 
5653   return MaxScalableVF;
5654 }
5655 
5656 FixedScalableVFPair
5657 LoopVectorizationCostModel::computeFeasibleMaxVF(unsigned ConstTripCount,
5658                                                  ElementCount UserVF) {
5659   MinBWs = computeMinimumValueSizes(TheLoop->getBlocks(), *DB, &TTI);
5660   unsigned SmallestType, WidestType;
5661   std::tie(SmallestType, WidestType) = getSmallestAndWidestTypes();
5662 
5663   // Get the maximum safe dependence distance in bits computed by LAA.
5664   // It is computed by MaxVF * sizeOf(type) * 8, where type is taken from
5665   // the memory accesses that is most restrictive (involved in the smallest
5666   // dependence distance).
5667   unsigned MaxSafeElements =
5668       PowerOf2Floor(Legal->getMaxSafeVectorWidthInBits() / WidestType);
5669 
5670   auto MaxSafeFixedVF = ElementCount::getFixed(MaxSafeElements);
5671   auto MaxSafeScalableVF = getMaxLegalScalableVF(MaxSafeElements);
5672 
5673   LLVM_DEBUG(dbgs() << "LV: The max safe fixed VF is: " << MaxSafeFixedVF
5674                     << ".\n");
5675   LLVM_DEBUG(dbgs() << "LV: The max safe scalable VF is: " << MaxSafeScalableVF
5676                     << ".\n");
5677 
5678   // First analyze the UserVF, fall back if the UserVF should be ignored.
5679   if (UserVF) {
5680     auto MaxSafeUserVF =
5681         UserVF.isScalable() ? MaxSafeScalableVF : MaxSafeFixedVF;
5682 
5683     if (ElementCount::isKnownLE(UserVF, MaxSafeUserVF)) {
5684       // If `VF=vscale x N` is safe, then so is `VF=N`
5685       if (UserVF.isScalable())
5686         return FixedScalableVFPair(
5687             ElementCount::getFixed(UserVF.getKnownMinValue()), UserVF);
5688       else
5689         return UserVF;
5690     }
5691 
5692     assert(ElementCount::isKnownGT(UserVF, MaxSafeUserVF));
5693 
5694     // Only clamp if the UserVF is not scalable. If the UserVF is scalable, it
5695     // is better to ignore the hint and let the compiler choose a suitable VF.
5696     if (!UserVF.isScalable()) {
5697       LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF
5698                         << " is unsafe, clamping to max safe VF="
5699                         << MaxSafeFixedVF << ".\n");
5700       ORE->emit([&]() {
5701         return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor",
5702                                           TheLoop->getStartLoc(),
5703                                           TheLoop->getHeader())
5704                << "User-specified vectorization factor "
5705                << ore::NV("UserVectorizationFactor", UserVF)
5706                << " is unsafe, clamping to maximum safe vectorization factor "
5707                << ore::NV("VectorizationFactor", MaxSafeFixedVF);
5708       });
5709       return MaxSafeFixedVF;
5710     }
5711 
5712     if (!TTI.supportsScalableVectors() && !ForceTargetSupportsScalableVectors) {
5713       LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF
5714                         << " is ignored because scalable vectors are not "
5715                            "available.\n");
5716       ORE->emit([&]() {
5717         return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor",
5718                                           TheLoop->getStartLoc(),
5719                                           TheLoop->getHeader())
5720                << "User-specified vectorization factor "
5721                << ore::NV("UserVectorizationFactor", UserVF)
5722                << " is ignored because the target does not support scalable "
5723                   "vectors. The compiler will pick a more suitable value.";
5724       });
5725     } else {
5726       LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF
5727                         << " is unsafe. Ignoring scalable UserVF.\n");
5728       ORE->emit([&]() {
5729         return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor",
5730                                           TheLoop->getStartLoc(),
5731                                           TheLoop->getHeader())
5732                << "User-specified vectorization factor "
5733                << ore::NV("UserVectorizationFactor", UserVF)
5734                << " is unsafe. Ignoring the hint to let the compiler pick a "
5735                   "more suitable value.";
5736       });
5737     }
5738   }
5739 
5740   LLVM_DEBUG(dbgs() << "LV: The Smallest and Widest types: " << SmallestType
5741                     << " / " << WidestType << " bits.\n");
5742 
5743   FixedScalableVFPair Result(ElementCount::getFixed(1),
5744                              ElementCount::getScalable(0));
5745   if (auto MaxVF = getMaximizedVFForTarget(ConstTripCount, SmallestType,
5746                                            WidestType, MaxSafeFixedVF))
5747     Result.FixedVF = MaxVF;
5748 
5749   if (auto MaxVF = getMaximizedVFForTarget(ConstTripCount, SmallestType,
5750                                            WidestType, MaxSafeScalableVF))
5751     if (MaxVF.isScalable()) {
5752       Result.ScalableVF = MaxVF;
5753       LLVM_DEBUG(dbgs() << "LV: Found feasible scalable VF = " << MaxVF
5754                         << "\n");
5755     }
5756 
5757   return Result;
5758 }
5759 
5760 FixedScalableVFPair
5761 LoopVectorizationCostModel::computeMaxVF(ElementCount UserVF, unsigned UserIC) {
5762   if (Legal->getRuntimePointerChecking()->Need && TTI.hasBranchDivergence()) {
5763     // TODO: It may by useful to do since it's still likely to be dynamically
5764     // uniform if the target can skip.
5765     reportVectorizationFailure(
5766         "Not inserting runtime ptr check for divergent target",
5767         "runtime pointer checks needed. Not enabled for divergent target",
5768         "CantVersionLoopWithDivergentTarget", ORE, TheLoop);
5769     return FixedScalableVFPair::getNone();
5770   }
5771 
5772   unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop);
5773   LLVM_DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n');
5774   if (TC == 1) {
5775     reportVectorizationFailure("Single iteration (non) loop",
5776         "loop trip count is one, irrelevant for vectorization",
5777         "SingleIterationLoop", ORE, TheLoop);
5778     return FixedScalableVFPair::getNone();
5779   }
5780 
5781   switch (ScalarEpilogueStatus) {
5782   case CM_ScalarEpilogueAllowed:
5783     return computeFeasibleMaxVF(TC, UserVF);
5784   case CM_ScalarEpilogueNotAllowedUsePredicate:
5785     LLVM_FALLTHROUGH;
5786   case CM_ScalarEpilogueNotNeededUsePredicate:
5787     LLVM_DEBUG(
5788         dbgs() << "LV: vector predicate hint/switch found.\n"
5789                << "LV: Not allowing scalar epilogue, creating predicated "
5790                << "vector loop.\n");
5791     break;
5792   case CM_ScalarEpilogueNotAllowedLowTripLoop:
5793     // fallthrough as a special case of OptForSize
5794   case CM_ScalarEpilogueNotAllowedOptSize:
5795     if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedOptSize)
5796       LLVM_DEBUG(
5797           dbgs() << "LV: Not allowing scalar epilogue due to -Os/-Oz.\n");
5798     else
5799       LLVM_DEBUG(dbgs() << "LV: Not allowing scalar epilogue due to low trip "
5800                         << "count.\n");
5801 
5802     // Bail if runtime checks are required, which are not good when optimising
5803     // for size.
5804     if (runtimeChecksRequired())
5805       return FixedScalableVFPair::getNone();
5806 
5807     break;
5808   }
5809 
5810   // The only loops we can vectorize without a scalar epilogue, are loops with
5811   // a bottom-test and a single exiting block. We'd have to handle the fact
5812   // that not every instruction executes on the last iteration.  This will
5813   // require a lane mask which varies through the vector loop body.  (TODO)
5814   if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) {
5815     // If there was a tail-folding hint/switch, but we can't fold the tail by
5816     // masking, fallback to a vectorization with a scalar epilogue.
5817     if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) {
5818       LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a "
5819                            "scalar epilogue instead.\n");
5820       ScalarEpilogueStatus = CM_ScalarEpilogueAllowed;
5821       return computeFeasibleMaxVF(TC, UserVF);
5822     }
5823     return FixedScalableVFPair::getNone();
5824   }
5825 
5826   // Now try the tail folding
5827 
5828   // Invalidate interleave groups that require an epilogue if we can't mask
5829   // the interleave-group.
5830   if (!useMaskedInterleavedAccesses(TTI)) {
5831     assert(WideningDecisions.empty() && Uniforms.empty() && Scalars.empty() &&
5832            "No decisions should have been taken at this point");
5833     // Note: There is no need to invalidate any cost modeling decisions here, as
5834     // non where taken so far.
5835     InterleaveInfo.invalidateGroupsRequiringScalarEpilogue();
5836   }
5837 
5838   FixedScalableVFPair MaxFactors = computeFeasibleMaxVF(TC, UserVF);
5839   // Avoid tail folding if the trip count is known to be a multiple of any VF
5840   // we chose.
5841   // FIXME: The condition below pessimises the case for fixed-width vectors,
5842   // when scalable VFs are also candidates for vectorization.
5843   if (MaxFactors.FixedVF.isVector() && !MaxFactors.ScalableVF) {
5844     ElementCount MaxFixedVF = MaxFactors.FixedVF;
5845     assert((UserVF.isNonZero() || isPowerOf2_32(MaxFixedVF.getFixedValue())) &&
5846            "MaxFixedVF must be a power of 2");
5847     unsigned MaxVFtimesIC = UserIC ? MaxFixedVF.getFixedValue() * UserIC
5848                                    : MaxFixedVF.getFixedValue();
5849     ScalarEvolution *SE = PSE.getSE();
5850     const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount();
5851     const SCEV *ExitCount = SE->getAddExpr(
5852         BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType()));
5853     const SCEV *Rem = SE->getURemExpr(
5854         SE->applyLoopGuards(ExitCount, TheLoop),
5855         SE->getConstant(BackedgeTakenCount->getType(), MaxVFtimesIC));
5856     if (Rem->isZero()) {
5857       // Accept MaxFixedVF if we do not have a tail.
5858       LLVM_DEBUG(dbgs() << "LV: No tail will remain for any chosen VF.\n");
5859       return MaxFactors;
5860     }
5861   }
5862 
5863   // For scalable vectors, don't use tail folding as this is currently not yet
5864   // supported. The code is likely to have ended up here if the tripcount is
5865   // low, in which case it makes sense not to use scalable vectors.
5866   if (MaxFactors.ScalableVF.isVector())
5867     MaxFactors.ScalableVF = ElementCount::getScalable(0);
5868 
5869   // If we don't know the precise trip count, or if the trip count that we
5870   // found modulo the vectorization factor is not zero, try to fold the tail
5871   // by masking.
5872   // FIXME: look for a smaller MaxVF that does divide TC rather than masking.
5873   if (Legal->prepareToFoldTailByMasking()) {
5874     FoldTailByMasking = true;
5875     return MaxFactors;
5876   }
5877 
5878   // If there was a tail-folding hint/switch, but we can't fold the tail by
5879   // masking, fallback to a vectorization with a scalar epilogue.
5880   if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) {
5881     LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a "
5882                          "scalar epilogue instead.\n");
5883     ScalarEpilogueStatus = CM_ScalarEpilogueAllowed;
5884     return MaxFactors;
5885   }
5886 
5887   if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedUsePredicate) {
5888     LLVM_DEBUG(dbgs() << "LV: Can't fold tail by masking: don't vectorize\n");
5889     return FixedScalableVFPair::getNone();
5890   }
5891 
5892   if (TC == 0) {
5893     reportVectorizationFailure(
5894         "Unable to calculate the loop count due to complex control flow",
5895         "unable to calculate the loop count due to complex control flow",
5896         "UnknownLoopCountComplexCFG", ORE, TheLoop);
5897     return FixedScalableVFPair::getNone();
5898   }
5899 
5900   reportVectorizationFailure(
5901       "Cannot optimize for size and vectorize at the same time.",
5902       "cannot optimize for size and vectorize at the same time. "
5903       "Enable vectorization of this loop with '#pragma clang loop "
5904       "vectorize(enable)' when compiling with -Os/-Oz",
5905       "NoTailLoopWithOptForSize", ORE, TheLoop);
5906   return FixedScalableVFPair::getNone();
5907 }
5908 
5909 ElementCount LoopVectorizationCostModel::getMaximizedVFForTarget(
5910     unsigned ConstTripCount, unsigned SmallestType, unsigned WidestType,
5911     const ElementCount &MaxSafeVF) {
5912   bool ComputeScalableMaxVF = MaxSafeVF.isScalable();
5913   TypeSize WidestRegister = TTI.getRegisterBitWidth(
5914       ComputeScalableMaxVF ? TargetTransformInfo::RGK_ScalableVector
5915                            : TargetTransformInfo::RGK_FixedWidthVector);
5916 
5917   // Convenience function to return the minimum of two ElementCounts.
5918   auto MinVF = [](const ElementCount &LHS, const ElementCount &RHS) {
5919     assert((LHS.isScalable() == RHS.isScalable()) &&
5920            "Scalable flags must match");
5921     return ElementCount::isKnownLT(LHS, RHS) ? LHS : RHS;
5922   };
5923 
5924   // Ensure MaxVF is a power of 2; the dependence distance bound may not be.
5925   // Note that both WidestRegister and WidestType may not be a powers of 2.
5926   auto MaxVectorElementCount = ElementCount::get(
5927       PowerOf2Floor(WidestRegister.getKnownMinSize() / WidestType),
5928       ComputeScalableMaxVF);
5929   MaxVectorElementCount = MinVF(MaxVectorElementCount, MaxSafeVF);
5930   LLVM_DEBUG(dbgs() << "LV: The Widest register safe to use is: "
5931                     << (MaxVectorElementCount * WidestType) << " bits.\n");
5932 
5933   if (!MaxVectorElementCount) {
5934     LLVM_DEBUG(dbgs() << "LV: The target has no "
5935                       << (ComputeScalableMaxVF ? "scalable" : "fixed")
5936                       << " vector registers.\n");
5937     return ElementCount::getFixed(1);
5938   }
5939 
5940   const auto TripCountEC = ElementCount::getFixed(ConstTripCount);
5941   if (ConstTripCount &&
5942       ElementCount::isKnownLE(TripCountEC, MaxVectorElementCount) &&
5943       isPowerOf2_32(ConstTripCount)) {
5944     // We need to clamp the VF to be the ConstTripCount. There is no point in
5945     // choosing a higher viable VF as done in the loop below. If
5946     // MaxVectorElementCount is scalable, we only fall back on a fixed VF when
5947     // the TC is less than or equal to the known number of lanes.
5948     LLVM_DEBUG(dbgs() << "LV: Clamping the MaxVF to the constant trip count: "
5949                       << ConstTripCount << "\n");
5950     return TripCountEC;
5951   }
5952 
5953   ElementCount MaxVF = MaxVectorElementCount;
5954   if (TTI.shouldMaximizeVectorBandwidth() ||
5955       (MaximizeBandwidth && isScalarEpilogueAllowed())) {
5956     auto MaxVectorElementCountMaxBW = ElementCount::get(
5957         PowerOf2Floor(WidestRegister.getKnownMinSize() / SmallestType),
5958         ComputeScalableMaxVF);
5959     MaxVectorElementCountMaxBW = MinVF(MaxVectorElementCountMaxBW, MaxSafeVF);
5960 
5961     // Collect all viable vectorization factors larger than the default MaxVF
5962     // (i.e. MaxVectorElementCount).
5963     SmallVector<ElementCount, 8> VFs;
5964     for (ElementCount VS = MaxVectorElementCount * 2;
5965          ElementCount::isKnownLE(VS, MaxVectorElementCountMaxBW); VS *= 2)
5966       VFs.push_back(VS);
5967 
5968     // For each VF calculate its register usage.
5969     auto RUs = calculateRegisterUsage(VFs);
5970 
5971     // Select the largest VF which doesn't require more registers than existing
5972     // ones.
5973     for (int i = RUs.size() - 1; i >= 0; --i) {
5974       bool Selected = true;
5975       for (auto &pair : RUs[i].MaxLocalUsers) {
5976         unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first);
5977         if (pair.second > TargetNumRegisters)
5978           Selected = false;
5979       }
5980       if (Selected) {
5981         MaxVF = VFs[i];
5982         break;
5983       }
5984     }
5985     if (ElementCount MinVF =
5986             TTI.getMinimumVF(SmallestType, ComputeScalableMaxVF)) {
5987       if (ElementCount::isKnownLT(MaxVF, MinVF)) {
5988         LLVM_DEBUG(dbgs() << "LV: Overriding calculated MaxVF(" << MaxVF
5989                           << ") with target's minimum: " << MinVF << '\n');
5990         MaxVF = MinVF;
5991       }
5992     }
5993   }
5994   return MaxVF;
5995 }
5996 
5997 bool LoopVectorizationCostModel::isMoreProfitable(
5998     const VectorizationFactor &A, const VectorizationFactor &B) const {
5999   InstructionCost CostA = A.Cost;
6000   InstructionCost CostB = B.Cost;
6001 
6002   unsigned MaxTripCount = PSE.getSE()->getSmallConstantMaxTripCount(TheLoop);
6003 
6004   if (!A.Width.isScalable() && !B.Width.isScalable() && FoldTailByMasking &&
6005       MaxTripCount) {
6006     // If we are folding the tail and the trip count is a known (possibly small)
6007     // constant, the trip count will be rounded up to an integer number of
6008     // iterations. The total cost will be PerIterationCost*ceil(TripCount/VF),
6009     // which we compare directly. When not folding the tail, the total cost will
6010     // be PerIterationCost*floor(TC/VF) + Scalar remainder cost, and so is
6011     // approximated with the per-lane cost below instead of using the tripcount
6012     // as here.
6013     auto RTCostA = CostA * divideCeil(MaxTripCount, A.Width.getFixedValue());
6014     auto RTCostB = CostB * divideCeil(MaxTripCount, B.Width.getFixedValue());
6015     return RTCostA < RTCostB;
6016   }
6017 
6018   // When set to preferred, for now assume vscale may be larger than 1, so
6019   // that scalable vectorization is slightly favorable over fixed-width
6020   // vectorization.
6021   if (Hints->isScalableVectorizationPreferred())
6022     if (A.Width.isScalable() && !B.Width.isScalable())
6023       return (CostA * B.Width.getKnownMinValue()) <=
6024              (CostB * A.Width.getKnownMinValue());
6025 
6026   // To avoid the need for FP division:
6027   //      (CostA / A.Width) < (CostB / B.Width)
6028   // <=>  (CostA * B.Width) < (CostB * A.Width)
6029   return (CostA * B.Width.getKnownMinValue()) <
6030          (CostB * A.Width.getKnownMinValue());
6031 }
6032 
6033 VectorizationFactor LoopVectorizationCostModel::selectVectorizationFactor(
6034     const ElementCountSet &VFCandidates) {
6035   InstructionCost ExpectedCost = expectedCost(ElementCount::getFixed(1)).first;
6036   LLVM_DEBUG(dbgs() << "LV: Scalar loop costs: " << ExpectedCost << ".\n");
6037   assert(ExpectedCost.isValid() && "Unexpected invalid cost for scalar loop");
6038   assert(VFCandidates.count(ElementCount::getFixed(1)) &&
6039          "Expected Scalar VF to be a candidate");
6040 
6041   const VectorizationFactor ScalarCost(ElementCount::getFixed(1), ExpectedCost);
6042   VectorizationFactor ChosenFactor = ScalarCost;
6043 
6044   bool ForceVectorization = Hints->getForce() == LoopVectorizeHints::FK_Enabled;
6045   if (ForceVectorization && VFCandidates.size() > 1) {
6046     // Ignore scalar width, because the user explicitly wants vectorization.
6047     // Initialize cost to max so that VF = 2 is, at least, chosen during cost
6048     // evaluation.
6049     ChosenFactor.Cost = InstructionCost::getMax();
6050   }
6051 
6052   SmallVector<InstructionVFPair> InvalidCosts;
6053   for (const auto &i : VFCandidates) {
6054     // The cost for scalar VF=1 is already calculated, so ignore it.
6055     if (i.isScalar())
6056       continue;
6057 
6058     VectorizationCostTy C = expectedCost(i, &InvalidCosts);
6059     VectorizationFactor Candidate(i, C.first);
6060     LLVM_DEBUG(
6061         dbgs() << "LV: Vector loop of width " << i << " costs: "
6062                << (Candidate.Cost / Candidate.Width.getKnownMinValue())
6063                << (i.isScalable() ? " (assuming a minimum vscale of 1)" : "")
6064                << ".\n");
6065 
6066     if (!C.second && !ForceVectorization) {
6067       LLVM_DEBUG(
6068           dbgs() << "LV: Not considering vector loop of width " << i
6069                  << " because it will not generate any vector instructions.\n");
6070       continue;
6071     }
6072 
6073     // If profitable add it to ProfitableVF list.
6074     if (isMoreProfitable(Candidate, ScalarCost))
6075       ProfitableVFs.push_back(Candidate);
6076 
6077     if (isMoreProfitable(Candidate, ChosenFactor))
6078       ChosenFactor = Candidate;
6079   }
6080 
6081   // Emit a report of VFs with invalid costs in the loop.
6082   if (!InvalidCosts.empty()) {
6083     // Group the remarks per instruction, keeping the instruction order from
6084     // InvalidCosts.
6085     std::map<Instruction *, unsigned> Numbering;
6086     unsigned I = 0;
6087     for (auto &Pair : InvalidCosts)
6088       if (!Numbering.count(Pair.first))
6089         Numbering[Pair.first] = I++;
6090 
6091     // Sort the list, first on instruction(number) then on VF.
6092     llvm::sort(InvalidCosts,
6093                [&Numbering](InstructionVFPair &A, InstructionVFPair &B) {
6094                  if (Numbering[A.first] != Numbering[B.first])
6095                    return Numbering[A.first] < Numbering[B.first];
6096                  ElementCountComparator ECC;
6097                  return ECC(A.second, B.second);
6098                });
6099 
6100     // For a list of ordered instruction-vf pairs:
6101     //   [(load, vf1), (load, vf2), (store, vf1)]
6102     // Group the instructions together to emit separate remarks for:
6103     //   load  (vf1, vf2)
6104     //   store (vf1)
6105     auto Tail = ArrayRef<InstructionVFPair>(InvalidCosts);
6106     auto Subset = ArrayRef<InstructionVFPair>();
6107     do {
6108       if (Subset.empty())
6109         Subset = Tail.take_front(1);
6110 
6111       Instruction *I = Subset.front().first;
6112 
6113       // If the next instruction is different, or if there are no other pairs,
6114       // emit a remark for the collated subset. e.g.
6115       //   [(load, vf1), (load, vf2))]
6116       // to emit:
6117       //  remark: invalid costs for 'load' at VF=(vf, vf2)
6118       if (Subset == Tail || Tail[Subset.size()].first != I) {
6119         std::string OutString;
6120         raw_string_ostream OS(OutString);
6121         assert(!Subset.empty() && "Unexpected empty range");
6122         OS << "Instruction with invalid costs prevented vectorization at VF=(";
6123         for (auto &Pair : Subset)
6124           OS << (Pair.second == Subset.front().second ? "" : ", ")
6125              << Pair.second;
6126         OS << "):";
6127         if (auto *CI = dyn_cast<CallInst>(I))
6128           OS << " call to " << CI->getCalledFunction()->getName();
6129         else
6130           OS << " " << I->getOpcodeName();
6131         OS.flush();
6132         reportVectorizationInfo(OutString, "InvalidCost", ORE, TheLoop, I);
6133         Tail = Tail.drop_front(Subset.size());
6134         Subset = {};
6135       } else
6136         // Grow the subset by one element
6137         Subset = Tail.take_front(Subset.size() + 1);
6138     } while (!Tail.empty());
6139   }
6140 
6141   if (!EnableCondStoresVectorization && NumPredStores) {
6142     reportVectorizationFailure("There are conditional stores.",
6143         "store that is conditionally executed prevents vectorization",
6144         "ConditionalStore", ORE, TheLoop);
6145     ChosenFactor = ScalarCost;
6146   }
6147 
6148   LLVM_DEBUG(if (ForceVectorization && !ChosenFactor.Width.isScalar() &&
6149                  ChosenFactor.Cost >= ScalarCost.Cost) dbgs()
6150              << "LV: Vectorization seems to be not beneficial, "
6151              << "but was forced by a user.\n");
6152   LLVM_DEBUG(dbgs() << "LV: Selecting VF: " << ChosenFactor.Width << ".\n");
6153   return ChosenFactor;
6154 }
6155 
6156 bool LoopVectorizationCostModel::isCandidateForEpilogueVectorization(
6157     const Loop &L, ElementCount VF) const {
6158   // Cross iteration phis such as reductions need special handling and are
6159   // currently unsupported.
6160   if (any_of(L.getHeader()->phis(), [&](PHINode &Phi) {
6161         return Legal->isFirstOrderRecurrence(&Phi) ||
6162                Legal->isReductionVariable(&Phi);
6163       }))
6164     return false;
6165 
6166   // Phis with uses outside of the loop require special handling and are
6167   // currently unsupported.
6168   for (auto &Entry : Legal->getInductionVars()) {
6169     // Look for uses of the value of the induction at the last iteration.
6170     Value *PostInc = Entry.first->getIncomingValueForBlock(L.getLoopLatch());
6171     for (User *U : PostInc->users())
6172       if (!L.contains(cast<Instruction>(U)))
6173         return false;
6174     // Look for uses of penultimate value of the induction.
6175     for (User *U : Entry.first->users())
6176       if (!L.contains(cast<Instruction>(U)))
6177         return false;
6178   }
6179 
6180   // Induction variables that are widened require special handling that is
6181   // currently not supported.
6182   if (any_of(Legal->getInductionVars(), [&](auto &Entry) {
6183         return !(this->isScalarAfterVectorization(Entry.first, VF) ||
6184                  this->isProfitableToScalarize(Entry.first, VF));
6185       }))
6186     return false;
6187 
6188   // Epilogue vectorization code has not been auditted to ensure it handles
6189   // non-latch exits properly.  It may be fine, but it needs auditted and
6190   // tested.
6191   if (L.getExitingBlock() != L.getLoopLatch())
6192     return false;
6193 
6194   return true;
6195 }
6196 
6197 bool LoopVectorizationCostModel::isEpilogueVectorizationProfitable(
6198     const ElementCount VF) const {
6199   // FIXME: We need a much better cost-model to take different parameters such
6200   // as register pressure, code size increase and cost of extra branches into
6201   // account. For now we apply a very crude heuristic and only consider loops
6202   // with vectorization factors larger than a certain value.
6203   // We also consider epilogue vectorization unprofitable for targets that don't
6204   // consider interleaving beneficial (eg. MVE).
6205   if (TTI.getMaxInterleaveFactor(VF.getKnownMinValue()) <= 1)
6206     return false;
6207   if (VF.getFixedValue() >= EpilogueVectorizationMinVF)
6208     return true;
6209   return false;
6210 }
6211 
6212 VectorizationFactor
6213 LoopVectorizationCostModel::selectEpilogueVectorizationFactor(
6214     const ElementCount MainLoopVF, const LoopVectorizationPlanner &LVP) {
6215   VectorizationFactor Result = VectorizationFactor::Disabled();
6216   if (!EnableEpilogueVectorization) {
6217     LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization is disabled.\n";);
6218     return Result;
6219   }
6220 
6221   if (!isScalarEpilogueAllowed()) {
6222     LLVM_DEBUG(
6223         dbgs() << "LEV: Unable to vectorize epilogue because no epilogue is "
6224                   "allowed.\n";);
6225     return Result;
6226   }
6227 
6228   // FIXME: This can be fixed for scalable vectors later, because at this stage
6229   // the LoopVectorizer will only consider vectorizing a loop with scalable
6230   // vectors when the loop has a hint to enable vectorization for a given VF.
6231   if (MainLoopVF.isScalable()) {
6232     LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization for scalable vectors not "
6233                          "yet supported.\n");
6234     return Result;
6235   }
6236 
6237   // Not really a cost consideration, but check for unsupported cases here to
6238   // simplify the logic.
6239   if (!isCandidateForEpilogueVectorization(*TheLoop, MainLoopVF)) {
6240     LLVM_DEBUG(
6241         dbgs() << "LEV: Unable to vectorize epilogue because the loop is "
6242                   "not a supported candidate.\n";);
6243     return Result;
6244   }
6245 
6246   if (EpilogueVectorizationForceVF > 1) {
6247     LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization factor is forced.\n";);
6248     if (LVP.hasPlanWithVFs(
6249             {MainLoopVF, ElementCount::getFixed(EpilogueVectorizationForceVF)}))
6250       return {ElementCount::getFixed(EpilogueVectorizationForceVF), 0};
6251     else {
6252       LLVM_DEBUG(
6253           dbgs()
6254               << "LEV: Epilogue vectorization forced factor is not viable.\n";);
6255       return Result;
6256     }
6257   }
6258 
6259   if (TheLoop->getHeader()->getParent()->hasOptSize() ||
6260       TheLoop->getHeader()->getParent()->hasMinSize()) {
6261     LLVM_DEBUG(
6262         dbgs()
6263             << "LEV: Epilogue vectorization skipped due to opt for size.\n";);
6264     return Result;
6265   }
6266 
6267   if (!isEpilogueVectorizationProfitable(MainLoopVF))
6268     return Result;
6269 
6270   for (auto &NextVF : ProfitableVFs)
6271     if (ElementCount::isKnownLT(NextVF.Width, MainLoopVF) &&
6272         (Result.Width.getFixedValue() == 1 ||
6273          isMoreProfitable(NextVF, Result)) &&
6274         LVP.hasPlanWithVFs({MainLoopVF, NextVF.Width}))
6275       Result = NextVF;
6276 
6277   if (Result != VectorizationFactor::Disabled())
6278     LLVM_DEBUG(dbgs() << "LEV: Vectorizing epilogue loop with VF = "
6279                       << Result.Width.getFixedValue() << "\n";);
6280   return Result;
6281 }
6282 
6283 std::pair<unsigned, unsigned>
6284 LoopVectorizationCostModel::getSmallestAndWidestTypes() {
6285   unsigned MinWidth = -1U;
6286   unsigned MaxWidth = 8;
6287   const DataLayout &DL = TheFunction->getParent()->getDataLayout();
6288   for (Type *T : ElementTypesInLoop) {
6289     MinWidth = std::min<unsigned>(
6290         MinWidth, DL.getTypeSizeInBits(T->getScalarType()).getFixedSize());
6291     MaxWidth = std::max<unsigned>(
6292         MaxWidth, DL.getTypeSizeInBits(T->getScalarType()).getFixedSize());
6293   }
6294   return {MinWidth, MaxWidth};
6295 }
6296 
6297 void LoopVectorizationCostModel::collectElementTypesForWidening() {
6298   ElementTypesInLoop.clear();
6299   // For each block.
6300   for (BasicBlock *BB : TheLoop->blocks()) {
6301     // For each instruction in the loop.
6302     for (Instruction &I : BB->instructionsWithoutDebug()) {
6303       Type *T = I.getType();
6304 
6305       // Skip ignored values.
6306       if (ValuesToIgnore.count(&I))
6307         continue;
6308 
6309       // Only examine Loads, Stores and PHINodes.
6310       if (!isa<LoadInst>(I) && !isa<StoreInst>(I) && !isa<PHINode>(I))
6311         continue;
6312 
6313       // Examine PHI nodes that are reduction variables. Update the type to
6314       // account for the recurrence type.
6315       if (auto *PN = dyn_cast<PHINode>(&I)) {
6316         if (!Legal->isReductionVariable(PN))
6317           continue;
6318         const RecurrenceDescriptor &RdxDesc = Legal->getReductionVars()[PN];
6319         if (PreferInLoopReductions || useOrderedReductions(RdxDesc) ||
6320             TTI.preferInLoopReduction(RdxDesc.getOpcode(),
6321                                       RdxDesc.getRecurrenceType(),
6322                                       TargetTransformInfo::ReductionFlags()))
6323           continue;
6324         T = RdxDesc.getRecurrenceType();
6325       }
6326 
6327       // Examine the stored values.
6328       if (auto *ST = dyn_cast<StoreInst>(&I))
6329         T = ST->getValueOperand()->getType();
6330 
6331       // Ignore loaded pointer types and stored pointer types that are not
6332       // vectorizable.
6333       //
6334       // FIXME: The check here attempts to predict whether a load or store will
6335       //        be vectorized. We only know this for certain after a VF has
6336       //        been selected. Here, we assume that if an access can be
6337       //        vectorized, it will be. We should also look at extending this
6338       //        optimization to non-pointer types.
6339       //
6340       if (T->isPointerTy() && !isConsecutiveLoadOrStore(&I) &&
6341           !isAccessInterleaved(&I) && !isLegalGatherOrScatter(&I))
6342         continue;
6343 
6344       ElementTypesInLoop.insert(T);
6345     }
6346   }
6347 }
6348 
6349 unsigned LoopVectorizationCostModel::selectInterleaveCount(ElementCount VF,
6350                                                            unsigned LoopCost) {
6351   // -- The interleave heuristics --
6352   // We interleave the loop in order to expose ILP and reduce the loop overhead.
6353   // There are many micro-architectural considerations that we can't predict
6354   // at this level. For example, frontend pressure (on decode or fetch) due to
6355   // code size, or the number and capabilities of the execution ports.
6356   //
6357   // We use the following heuristics to select the interleave count:
6358   // 1. If the code has reductions, then we interleave to break the cross
6359   // iteration dependency.
6360   // 2. If the loop is really small, then we interleave to reduce the loop
6361   // overhead.
6362   // 3. We don't interleave if we think that we will spill registers to memory
6363   // due to the increased register pressure.
6364 
6365   if (!isScalarEpilogueAllowed())
6366     return 1;
6367 
6368   // We used the distance for the interleave count.
6369   if (Legal->getMaxSafeDepDistBytes() != -1U)
6370     return 1;
6371 
6372   auto BestKnownTC = getSmallBestKnownTC(*PSE.getSE(), TheLoop);
6373   const bool HasReductions = !Legal->getReductionVars().empty();
6374   // Do not interleave loops with a relatively small known or estimated trip
6375   // count. But we will interleave when InterleaveSmallLoopScalarReduction is
6376   // enabled, and the code has scalar reductions(HasReductions && VF = 1),
6377   // because with the above conditions interleaving can expose ILP and break
6378   // cross iteration dependences for reductions.
6379   if (BestKnownTC && (*BestKnownTC < TinyTripCountInterleaveThreshold) &&
6380       !(InterleaveSmallLoopScalarReduction && HasReductions && VF.isScalar()))
6381     return 1;
6382 
6383   RegisterUsage R = calculateRegisterUsage({VF})[0];
6384   // We divide by these constants so assume that we have at least one
6385   // instruction that uses at least one register.
6386   for (auto& pair : R.MaxLocalUsers) {
6387     pair.second = std::max(pair.second, 1U);
6388   }
6389 
6390   // We calculate the interleave count using the following formula.
6391   // Subtract the number of loop invariants from the number of available
6392   // registers. These registers are used by all of the interleaved instances.
6393   // Next, divide the remaining registers by the number of registers that is
6394   // required by the loop, in order to estimate how many parallel instances
6395   // fit without causing spills. All of this is rounded down if necessary to be
6396   // a power of two. We want power of two interleave count to simplify any
6397   // addressing operations or alignment considerations.
6398   // We also want power of two interleave counts to ensure that the induction
6399   // variable of the vector loop wraps to zero, when tail is folded by masking;
6400   // this currently happens when OptForSize, in which case IC is set to 1 above.
6401   unsigned IC = UINT_MAX;
6402 
6403   for (auto& pair : R.MaxLocalUsers) {
6404     unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first);
6405     LLVM_DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters
6406                       << " registers of "
6407                       << TTI.getRegisterClassName(pair.first) << " register class\n");
6408     if (VF.isScalar()) {
6409       if (ForceTargetNumScalarRegs.getNumOccurrences() > 0)
6410         TargetNumRegisters = ForceTargetNumScalarRegs;
6411     } else {
6412       if (ForceTargetNumVectorRegs.getNumOccurrences() > 0)
6413         TargetNumRegisters = ForceTargetNumVectorRegs;
6414     }
6415     unsigned MaxLocalUsers = pair.second;
6416     unsigned LoopInvariantRegs = 0;
6417     if (R.LoopInvariantRegs.find(pair.first) != R.LoopInvariantRegs.end())
6418       LoopInvariantRegs = R.LoopInvariantRegs[pair.first];
6419 
6420     unsigned TmpIC = PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs) / MaxLocalUsers);
6421     // Don't count the induction variable as interleaved.
6422     if (EnableIndVarRegisterHeur) {
6423       TmpIC =
6424           PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs - 1) /
6425                         std::max(1U, (MaxLocalUsers - 1)));
6426     }
6427 
6428     IC = std::min(IC, TmpIC);
6429   }
6430 
6431   // Clamp the interleave ranges to reasonable counts.
6432   unsigned MaxInterleaveCount =
6433       TTI.getMaxInterleaveFactor(VF.getKnownMinValue());
6434 
6435   // Check if the user has overridden the max.
6436   if (VF.isScalar()) {
6437     if (ForceTargetMaxScalarInterleaveFactor.getNumOccurrences() > 0)
6438       MaxInterleaveCount = ForceTargetMaxScalarInterleaveFactor;
6439   } else {
6440     if (ForceTargetMaxVectorInterleaveFactor.getNumOccurrences() > 0)
6441       MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor;
6442   }
6443 
6444   // If trip count is known or estimated compile time constant, limit the
6445   // interleave count to be less than the trip count divided by VF, provided it
6446   // is at least 1.
6447   //
6448   // For scalable vectors we can't know if interleaving is beneficial. It may
6449   // not be beneficial for small loops if none of the lanes in the second vector
6450   // iterations is enabled. However, for larger loops, there is likely to be a
6451   // similar benefit as for fixed-width vectors. For now, we choose to leave
6452   // the InterleaveCount as if vscale is '1', although if some information about
6453   // the vector is known (e.g. min vector size), we can make a better decision.
6454   if (BestKnownTC) {
6455     MaxInterleaveCount =
6456         std::min(*BestKnownTC / VF.getKnownMinValue(), MaxInterleaveCount);
6457     // Make sure MaxInterleaveCount is greater than 0.
6458     MaxInterleaveCount = std::max(1u, MaxInterleaveCount);
6459   }
6460 
6461   assert(MaxInterleaveCount > 0 &&
6462          "Maximum interleave count must be greater than 0");
6463 
6464   // Clamp the calculated IC to be between the 1 and the max interleave count
6465   // that the target and trip count allows.
6466   if (IC > MaxInterleaveCount)
6467     IC = MaxInterleaveCount;
6468   else
6469     // Make sure IC is greater than 0.
6470     IC = std::max(1u, IC);
6471 
6472   assert(IC > 0 && "Interleave count must be greater than 0.");
6473 
6474   // If we did not calculate the cost for VF (because the user selected the VF)
6475   // then we calculate the cost of VF here.
6476   if (LoopCost == 0) {
6477     InstructionCost C = expectedCost(VF).first;
6478     assert(C.isValid() && "Expected to have chosen a VF with valid cost");
6479     LoopCost = *C.getValue();
6480   }
6481 
6482   assert(LoopCost && "Non-zero loop cost expected");
6483 
6484   // Interleave if we vectorized this loop and there is a reduction that could
6485   // benefit from interleaving.
6486   if (VF.isVector() && HasReductions) {
6487     LLVM_DEBUG(dbgs() << "LV: Interleaving because of reductions.\n");
6488     return IC;
6489   }
6490 
6491   // Note that if we've already vectorized the loop we will have done the
6492   // runtime check and so interleaving won't require further checks.
6493   bool InterleavingRequiresRuntimePointerCheck =
6494       (VF.isScalar() && Legal->getRuntimePointerChecking()->Need);
6495 
6496   // We want to interleave small loops in order to reduce the loop overhead and
6497   // potentially expose ILP opportunities.
6498   LLVM_DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n'
6499                     << "LV: IC is " << IC << '\n'
6500                     << "LV: VF is " << VF << '\n');
6501   const bool AggressivelyInterleaveReductions =
6502       TTI.enableAggressiveInterleaving(HasReductions);
6503   if (!InterleavingRequiresRuntimePointerCheck && LoopCost < SmallLoopCost) {
6504     // We assume that the cost overhead is 1 and we use the cost model
6505     // to estimate the cost of the loop and interleave until the cost of the
6506     // loop overhead is about 5% of the cost of the loop.
6507     unsigned SmallIC =
6508         std::min(IC, (unsigned)PowerOf2Floor(SmallLoopCost / LoopCost));
6509 
6510     // Interleave until store/load ports (estimated by max interleave count) are
6511     // saturated.
6512     unsigned NumStores = Legal->getNumStores();
6513     unsigned NumLoads = Legal->getNumLoads();
6514     unsigned StoresIC = IC / (NumStores ? NumStores : 1);
6515     unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1);
6516 
6517     // If we have a scalar reduction (vector reductions are already dealt with
6518     // by this point), we can increase the critical path length if the loop
6519     // we're interleaving is inside another loop. For tree-wise reductions
6520     // set the limit to 2, and for ordered reductions it's best to disable
6521     // interleaving entirely.
6522     if (HasReductions && TheLoop->getLoopDepth() > 1) {
6523       bool HasOrderedReductions =
6524           any_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool {
6525             const RecurrenceDescriptor &RdxDesc = Reduction.second;
6526             return RdxDesc.isOrdered();
6527           });
6528       if (HasOrderedReductions) {
6529         LLVM_DEBUG(
6530             dbgs() << "LV: Not interleaving scalar ordered reductions.\n");
6531         return 1;
6532       }
6533 
6534       unsigned F = static_cast<unsigned>(MaxNestedScalarReductionIC);
6535       SmallIC = std::min(SmallIC, F);
6536       StoresIC = std::min(StoresIC, F);
6537       LoadsIC = std::min(LoadsIC, F);
6538     }
6539 
6540     if (EnableLoadStoreRuntimeInterleave &&
6541         std::max(StoresIC, LoadsIC) > SmallIC) {
6542       LLVM_DEBUG(
6543           dbgs() << "LV: Interleaving to saturate store or load ports.\n");
6544       return std::max(StoresIC, LoadsIC);
6545     }
6546 
6547     // If there are scalar reductions and TTI has enabled aggressive
6548     // interleaving for reductions, we will interleave to expose ILP.
6549     if (InterleaveSmallLoopScalarReduction && VF.isScalar() &&
6550         AggressivelyInterleaveReductions) {
6551       LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n");
6552       // Interleave no less than SmallIC but not as aggressive as the normal IC
6553       // to satisfy the rare situation when resources are too limited.
6554       return std::max(IC / 2, SmallIC);
6555     } else {
6556       LLVM_DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n");
6557       return SmallIC;
6558     }
6559   }
6560 
6561   // Interleave if this is a large loop (small loops are already dealt with by
6562   // this point) that could benefit from interleaving.
6563   if (AggressivelyInterleaveReductions) {
6564     LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n");
6565     return IC;
6566   }
6567 
6568   LLVM_DEBUG(dbgs() << "LV: Not Interleaving.\n");
6569   return 1;
6570 }
6571 
6572 SmallVector<LoopVectorizationCostModel::RegisterUsage, 8>
6573 LoopVectorizationCostModel::calculateRegisterUsage(ArrayRef<ElementCount> VFs) {
6574   // This function calculates the register usage by measuring the highest number
6575   // of values that are alive at a single location. Obviously, this is a very
6576   // rough estimation. We scan the loop in a topological order in order and
6577   // assign a number to each instruction. We use RPO to ensure that defs are
6578   // met before their users. We assume that each instruction that has in-loop
6579   // users starts an interval. We record every time that an in-loop value is
6580   // used, so we have a list of the first and last occurrences of each
6581   // instruction. Next, we transpose this data structure into a multi map that
6582   // holds the list of intervals that *end* at a specific location. This multi
6583   // map allows us to perform a linear search. We scan the instructions linearly
6584   // and record each time that a new interval starts, by placing it in a set.
6585   // If we find this value in the multi-map then we remove it from the set.
6586   // The max register usage is the maximum size of the set.
6587   // We also search for instructions that are defined outside the loop, but are
6588   // used inside the loop. We need this number separately from the max-interval
6589   // usage number because when we unroll, loop-invariant values do not take
6590   // more register.
6591   LoopBlocksDFS DFS(TheLoop);
6592   DFS.perform(LI);
6593 
6594   RegisterUsage RU;
6595 
6596   // Each 'key' in the map opens a new interval. The values
6597   // of the map are the index of the 'last seen' usage of the
6598   // instruction that is the key.
6599   using IntervalMap = DenseMap<Instruction *, unsigned>;
6600 
6601   // Maps instruction to its index.
6602   SmallVector<Instruction *, 64> IdxToInstr;
6603   // Marks the end of each interval.
6604   IntervalMap EndPoint;
6605   // Saves the list of instruction indices that are used in the loop.
6606   SmallPtrSet<Instruction *, 8> Ends;
6607   // Saves the list of values that are used in the loop but are
6608   // defined outside the loop, such as arguments and constants.
6609   SmallPtrSet<Value *, 8> LoopInvariants;
6610 
6611   for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) {
6612     for (Instruction &I : BB->instructionsWithoutDebug()) {
6613       IdxToInstr.push_back(&I);
6614 
6615       // Save the end location of each USE.
6616       for (Value *U : I.operands()) {
6617         auto *Instr = dyn_cast<Instruction>(U);
6618 
6619         // Ignore non-instruction values such as arguments, constants, etc.
6620         if (!Instr)
6621           continue;
6622 
6623         // If this instruction is outside the loop then record it and continue.
6624         if (!TheLoop->contains(Instr)) {
6625           LoopInvariants.insert(Instr);
6626           continue;
6627         }
6628 
6629         // Overwrite previous end points.
6630         EndPoint[Instr] = IdxToInstr.size();
6631         Ends.insert(Instr);
6632       }
6633     }
6634   }
6635 
6636   // Saves the list of intervals that end with the index in 'key'.
6637   using InstrList = SmallVector<Instruction *, 2>;
6638   DenseMap<unsigned, InstrList> TransposeEnds;
6639 
6640   // Transpose the EndPoints to a list of values that end at each index.
6641   for (auto &Interval : EndPoint)
6642     TransposeEnds[Interval.second].push_back(Interval.first);
6643 
6644   SmallPtrSet<Instruction *, 8> OpenIntervals;
6645   SmallVector<RegisterUsage, 8> RUs(VFs.size());
6646   SmallVector<SmallMapVector<unsigned, unsigned, 4>, 8> MaxUsages(VFs.size());
6647 
6648   LLVM_DEBUG(dbgs() << "LV(REG): Calculating max register usage:\n");
6649 
6650   // A lambda that gets the register usage for the given type and VF.
6651   const auto &TTICapture = TTI;
6652   auto GetRegUsage = [&TTICapture](Type *Ty, ElementCount VF) -> unsigned {
6653     if (Ty->isTokenTy() || !VectorType::isValidElementType(Ty))
6654       return 0;
6655     InstructionCost::CostType RegUsage =
6656         *TTICapture.getRegUsageForType(VectorType::get(Ty, VF)).getValue();
6657     assert(RegUsage >= 0 && RegUsage <= std::numeric_limits<unsigned>::max() &&
6658            "Nonsensical values for register usage.");
6659     return RegUsage;
6660   };
6661 
6662   for (unsigned int i = 0, s = IdxToInstr.size(); i < s; ++i) {
6663     Instruction *I = IdxToInstr[i];
6664 
6665     // Remove all of the instructions that end at this location.
6666     InstrList &List = TransposeEnds[i];
6667     for (Instruction *ToRemove : List)
6668       OpenIntervals.erase(ToRemove);
6669 
6670     // Ignore instructions that are never used within the loop.
6671     if (!Ends.count(I))
6672       continue;
6673 
6674     // Skip ignored values.
6675     if (ValuesToIgnore.count(I))
6676       continue;
6677 
6678     // For each VF find the maximum usage of registers.
6679     for (unsigned j = 0, e = VFs.size(); j < e; ++j) {
6680       // Count the number of live intervals.
6681       SmallMapVector<unsigned, unsigned, 4> RegUsage;
6682 
6683       if (VFs[j].isScalar()) {
6684         for (auto Inst : OpenIntervals) {
6685           unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType());
6686           if (RegUsage.find(ClassID) == RegUsage.end())
6687             RegUsage[ClassID] = 1;
6688           else
6689             RegUsage[ClassID] += 1;
6690         }
6691       } else {
6692         collectUniformsAndScalars(VFs[j]);
6693         for (auto Inst : OpenIntervals) {
6694           // Skip ignored values for VF > 1.
6695           if (VecValuesToIgnore.count(Inst))
6696             continue;
6697           if (isScalarAfterVectorization(Inst, VFs[j])) {
6698             unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType());
6699             if (RegUsage.find(ClassID) == RegUsage.end())
6700               RegUsage[ClassID] = 1;
6701             else
6702               RegUsage[ClassID] += 1;
6703           } else {
6704             unsigned ClassID = TTI.getRegisterClassForType(true, Inst->getType());
6705             if (RegUsage.find(ClassID) == RegUsage.end())
6706               RegUsage[ClassID] = GetRegUsage(Inst->getType(), VFs[j]);
6707             else
6708               RegUsage[ClassID] += GetRegUsage(Inst->getType(), VFs[j]);
6709           }
6710         }
6711       }
6712 
6713       for (auto& pair : RegUsage) {
6714         if (MaxUsages[j].find(pair.first) != MaxUsages[j].end())
6715           MaxUsages[j][pair.first] = std::max(MaxUsages[j][pair.first], pair.second);
6716         else
6717           MaxUsages[j][pair.first] = pair.second;
6718       }
6719     }
6720 
6721     LLVM_DEBUG(dbgs() << "LV(REG): At #" << i << " Interval # "
6722                       << OpenIntervals.size() << '\n');
6723 
6724     // Add the current instruction to the list of open intervals.
6725     OpenIntervals.insert(I);
6726   }
6727 
6728   for (unsigned i = 0, e = VFs.size(); i < e; ++i) {
6729     SmallMapVector<unsigned, unsigned, 4> Invariant;
6730 
6731     for (auto Inst : LoopInvariants) {
6732       unsigned Usage =
6733           VFs[i].isScalar() ? 1 : GetRegUsage(Inst->getType(), VFs[i]);
6734       unsigned ClassID =
6735           TTI.getRegisterClassForType(VFs[i].isVector(), Inst->getType());
6736       if (Invariant.find(ClassID) == Invariant.end())
6737         Invariant[ClassID] = Usage;
6738       else
6739         Invariant[ClassID] += Usage;
6740     }
6741 
6742     LLVM_DEBUG({
6743       dbgs() << "LV(REG): VF = " << VFs[i] << '\n';
6744       dbgs() << "LV(REG): Found max usage: " << MaxUsages[i].size()
6745              << " item\n";
6746       for (const auto &pair : MaxUsages[i]) {
6747         dbgs() << "LV(REG): RegisterClass: "
6748                << TTI.getRegisterClassName(pair.first) << ", " << pair.second
6749                << " registers\n";
6750       }
6751       dbgs() << "LV(REG): Found invariant usage: " << Invariant.size()
6752              << " item\n";
6753       for (const auto &pair : Invariant) {
6754         dbgs() << "LV(REG): RegisterClass: "
6755                << TTI.getRegisterClassName(pair.first) << ", " << pair.second
6756                << " registers\n";
6757       }
6758     });
6759 
6760     RU.LoopInvariantRegs = Invariant;
6761     RU.MaxLocalUsers = MaxUsages[i];
6762     RUs[i] = RU;
6763   }
6764 
6765   return RUs;
6766 }
6767 
6768 bool LoopVectorizationCostModel::useEmulatedMaskMemRefHack(Instruction *I){
6769   // TODO: Cost model for emulated masked load/store is completely
6770   // broken. This hack guides the cost model to use an artificially
6771   // high enough value to practically disable vectorization with such
6772   // operations, except where previously deployed legality hack allowed
6773   // using very low cost values. This is to avoid regressions coming simply
6774   // from moving "masked load/store" check from legality to cost model.
6775   // Masked Load/Gather emulation was previously never allowed.
6776   // Limited number of Masked Store/Scatter emulation was allowed.
6777   assert(isPredicatedInst(I) &&
6778          "Expecting a scalar emulated instruction");
6779   return isa<LoadInst>(I) ||
6780          (isa<StoreInst>(I) &&
6781           NumPredStores > NumberOfStoresToPredicate);
6782 }
6783 
6784 void LoopVectorizationCostModel::collectInstsToScalarize(ElementCount VF) {
6785   // If we aren't vectorizing the loop, or if we've already collected the
6786   // instructions to scalarize, there's nothing to do. Collection may already
6787   // have occurred if we have a user-selected VF and are now computing the
6788   // expected cost for interleaving.
6789   if (VF.isScalar() || VF.isZero() ||
6790       InstsToScalarize.find(VF) != InstsToScalarize.end())
6791     return;
6792 
6793   // Initialize a mapping for VF in InstsToScalalarize. If we find that it's
6794   // not profitable to scalarize any instructions, the presence of VF in the
6795   // map will indicate that we've analyzed it already.
6796   ScalarCostsTy &ScalarCostsVF = InstsToScalarize[VF];
6797 
6798   // Find all the instructions that are scalar with predication in the loop and
6799   // determine if it would be better to not if-convert the blocks they are in.
6800   // If so, we also record the instructions to scalarize.
6801   for (BasicBlock *BB : TheLoop->blocks()) {
6802     if (!blockNeedsPredication(BB))
6803       continue;
6804     for (Instruction &I : *BB)
6805       if (isScalarWithPredication(&I)) {
6806         ScalarCostsTy ScalarCosts;
6807         // Do not apply discount if scalable, because that would lead to
6808         // invalid scalarization costs.
6809         // Do not apply discount logic if hacked cost is needed
6810         // for emulated masked memrefs.
6811         if (!VF.isScalable() && !useEmulatedMaskMemRefHack(&I) &&
6812             computePredInstDiscount(&I, ScalarCosts, VF) >= 0)
6813           ScalarCostsVF.insert(ScalarCosts.begin(), ScalarCosts.end());
6814         // Remember that BB will remain after vectorization.
6815         PredicatedBBsAfterVectorization.insert(BB);
6816       }
6817   }
6818 }
6819 
6820 int LoopVectorizationCostModel::computePredInstDiscount(
6821     Instruction *PredInst, ScalarCostsTy &ScalarCosts, ElementCount VF) {
6822   assert(!isUniformAfterVectorization(PredInst, VF) &&
6823          "Instruction marked uniform-after-vectorization will be predicated");
6824 
6825   // Initialize the discount to zero, meaning that the scalar version and the
6826   // vector version cost the same.
6827   InstructionCost Discount = 0;
6828 
6829   // Holds instructions to analyze. The instructions we visit are mapped in
6830   // ScalarCosts. Those instructions are the ones that would be scalarized if
6831   // we find that the scalar version costs less.
6832   SmallVector<Instruction *, 8> Worklist;
6833 
6834   // Returns true if the given instruction can be scalarized.
6835   auto canBeScalarized = [&](Instruction *I) -> bool {
6836     // We only attempt to scalarize instructions forming a single-use chain
6837     // from the original predicated block that would otherwise be vectorized.
6838     // Although not strictly necessary, we give up on instructions we know will
6839     // already be scalar to avoid traversing chains that are unlikely to be
6840     // beneficial.
6841     if (!I->hasOneUse() || PredInst->getParent() != I->getParent() ||
6842         isScalarAfterVectorization(I, VF))
6843       return false;
6844 
6845     // If the instruction is scalar with predication, it will be analyzed
6846     // separately. We ignore it within the context of PredInst.
6847     if (isScalarWithPredication(I))
6848       return false;
6849 
6850     // If any of the instruction's operands are uniform after vectorization,
6851     // the instruction cannot be scalarized. This prevents, for example, a
6852     // masked load from being scalarized.
6853     //
6854     // We assume we will only emit a value for lane zero of an instruction
6855     // marked uniform after vectorization, rather than VF identical values.
6856     // Thus, if we scalarize an instruction that uses a uniform, we would
6857     // create uses of values corresponding to the lanes we aren't emitting code
6858     // for. This behavior can be changed by allowing getScalarValue to clone
6859     // the lane zero values for uniforms rather than asserting.
6860     for (Use &U : I->operands())
6861       if (auto *J = dyn_cast<Instruction>(U.get()))
6862         if (isUniformAfterVectorization(J, VF))
6863           return false;
6864 
6865     // Otherwise, we can scalarize the instruction.
6866     return true;
6867   };
6868 
6869   // Compute the expected cost discount from scalarizing the entire expression
6870   // feeding the predicated instruction. We currently only consider expressions
6871   // that are single-use instruction chains.
6872   Worklist.push_back(PredInst);
6873   while (!Worklist.empty()) {
6874     Instruction *I = Worklist.pop_back_val();
6875 
6876     // If we've already analyzed the instruction, there's nothing to do.
6877     if (ScalarCosts.find(I) != ScalarCosts.end())
6878       continue;
6879 
6880     // Compute the cost of the vector instruction. Note that this cost already
6881     // includes the scalarization overhead of the predicated instruction.
6882     InstructionCost VectorCost = getInstructionCost(I, VF).first;
6883 
6884     // Compute the cost of the scalarized instruction. This cost is the cost of
6885     // the instruction as if it wasn't if-converted and instead remained in the
6886     // predicated block. We will scale this cost by block probability after
6887     // computing the scalarization overhead.
6888     InstructionCost ScalarCost =
6889         VF.getFixedValue() *
6890         getInstructionCost(I, ElementCount::getFixed(1)).first;
6891 
6892     // Compute the scalarization overhead of needed insertelement instructions
6893     // and phi nodes.
6894     if (isScalarWithPredication(I) && !I->getType()->isVoidTy()) {
6895       ScalarCost += TTI.getScalarizationOverhead(
6896           cast<VectorType>(ToVectorTy(I->getType(), VF)),
6897           APInt::getAllOnesValue(VF.getFixedValue()), true, false);
6898       ScalarCost +=
6899           VF.getFixedValue() *
6900           TTI.getCFInstrCost(Instruction::PHI, TTI::TCK_RecipThroughput);
6901     }
6902 
6903     // Compute the scalarization overhead of needed extractelement
6904     // instructions. For each of the instruction's operands, if the operand can
6905     // be scalarized, add it to the worklist; otherwise, account for the
6906     // overhead.
6907     for (Use &U : I->operands())
6908       if (auto *J = dyn_cast<Instruction>(U.get())) {
6909         assert(VectorType::isValidElementType(J->getType()) &&
6910                "Instruction has non-scalar type");
6911         if (canBeScalarized(J))
6912           Worklist.push_back(J);
6913         else if (needsExtract(J, VF)) {
6914           ScalarCost += TTI.getScalarizationOverhead(
6915               cast<VectorType>(ToVectorTy(J->getType(), VF)),
6916               APInt::getAllOnesValue(VF.getFixedValue()), false, true);
6917         }
6918       }
6919 
6920     // Scale the total scalar cost by block probability.
6921     ScalarCost /= getReciprocalPredBlockProb();
6922 
6923     // Compute the discount. A non-negative discount means the vector version
6924     // of the instruction costs more, and scalarizing would be beneficial.
6925     Discount += VectorCost - ScalarCost;
6926     ScalarCosts[I] = ScalarCost;
6927   }
6928 
6929   return *Discount.getValue();
6930 }
6931 
6932 LoopVectorizationCostModel::VectorizationCostTy
6933 LoopVectorizationCostModel::expectedCost(
6934     ElementCount VF, SmallVectorImpl<InstructionVFPair> *Invalid) {
6935   VectorizationCostTy Cost;
6936 
6937   // For each block.
6938   for (BasicBlock *BB : TheLoop->blocks()) {
6939     VectorizationCostTy BlockCost;
6940 
6941     // For each instruction in the old loop.
6942     for (Instruction &I : BB->instructionsWithoutDebug()) {
6943       // Skip ignored values.
6944       if (ValuesToIgnore.count(&I) ||
6945           (VF.isVector() && VecValuesToIgnore.count(&I)))
6946         continue;
6947 
6948       VectorizationCostTy C = getInstructionCost(&I, VF);
6949 
6950       // Check if we should override the cost.
6951       if (C.first.isValid() &&
6952           ForceTargetInstructionCost.getNumOccurrences() > 0)
6953         C.first = InstructionCost(ForceTargetInstructionCost);
6954 
6955       // Keep a list of instructions with invalid costs.
6956       if (Invalid && !C.first.isValid())
6957         Invalid->emplace_back(&I, VF);
6958 
6959       BlockCost.first += C.first;
6960       BlockCost.second |= C.second;
6961       LLVM_DEBUG(dbgs() << "LV: Found an estimated cost of " << C.first
6962                         << " for VF " << VF << " For instruction: " << I
6963                         << '\n');
6964     }
6965 
6966     // If we are vectorizing a predicated block, it will have been
6967     // if-converted. This means that the block's instructions (aside from
6968     // stores and instructions that may divide by zero) will now be
6969     // unconditionally executed. For the scalar case, we may not always execute
6970     // the predicated block, if it is an if-else block. Thus, scale the block's
6971     // cost by the probability of executing it. blockNeedsPredication from
6972     // Legal is used so as to not include all blocks in tail folded loops.
6973     if (VF.isScalar() && Legal->blockNeedsPredication(BB))
6974       BlockCost.first /= getReciprocalPredBlockProb();
6975 
6976     Cost.first += BlockCost.first;
6977     Cost.second |= BlockCost.second;
6978   }
6979 
6980   return Cost;
6981 }
6982 
6983 /// Gets Address Access SCEV after verifying that the access pattern
6984 /// is loop invariant except the induction variable dependence.
6985 ///
6986 /// This SCEV can be sent to the Target in order to estimate the address
6987 /// calculation cost.
6988 static const SCEV *getAddressAccessSCEV(
6989               Value *Ptr,
6990               LoopVectorizationLegality *Legal,
6991               PredicatedScalarEvolution &PSE,
6992               const Loop *TheLoop) {
6993 
6994   auto *Gep = dyn_cast<GetElementPtrInst>(Ptr);
6995   if (!Gep)
6996     return nullptr;
6997 
6998   // We are looking for a gep with all loop invariant indices except for one
6999   // which should be an induction variable.
7000   auto SE = PSE.getSE();
7001   unsigned NumOperands = Gep->getNumOperands();
7002   for (unsigned i = 1; i < NumOperands; ++i) {
7003     Value *Opd = Gep->getOperand(i);
7004     if (!SE->isLoopInvariant(SE->getSCEV(Opd), TheLoop) &&
7005         !Legal->isInductionVariable(Opd))
7006       return nullptr;
7007   }
7008 
7009   // Now we know we have a GEP ptr, %inv, %ind, %inv. return the Ptr SCEV.
7010   return PSE.getSCEV(Ptr);
7011 }
7012 
7013 static bool isStrideMul(Instruction *I, LoopVectorizationLegality *Legal) {
7014   return Legal->hasStride(I->getOperand(0)) ||
7015          Legal->hasStride(I->getOperand(1));
7016 }
7017 
7018 InstructionCost
7019 LoopVectorizationCostModel::getMemInstScalarizationCost(Instruction *I,
7020                                                         ElementCount VF) {
7021   assert(VF.isVector() &&
7022          "Scalarization cost of instruction implies vectorization.");
7023   if (VF.isScalable())
7024     return InstructionCost::getInvalid();
7025 
7026   Type *ValTy = getLoadStoreType(I);
7027   auto SE = PSE.getSE();
7028 
7029   unsigned AS = getLoadStoreAddressSpace(I);
7030   Value *Ptr = getLoadStorePointerOperand(I);
7031   Type *PtrTy = ToVectorTy(Ptr->getType(), VF);
7032 
7033   // Figure out whether the access is strided and get the stride value
7034   // if it's known in compile time
7035   const SCEV *PtrSCEV = getAddressAccessSCEV(Ptr, Legal, PSE, TheLoop);
7036 
7037   // Get the cost of the scalar memory instruction and address computation.
7038   InstructionCost Cost =
7039       VF.getKnownMinValue() * TTI.getAddressComputationCost(PtrTy, SE, PtrSCEV);
7040 
7041   // Don't pass *I here, since it is scalar but will actually be part of a
7042   // vectorized loop where the user of it is a vectorized instruction.
7043   const Align Alignment = getLoadStoreAlignment(I);
7044   Cost += VF.getKnownMinValue() *
7045           TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), Alignment,
7046                               AS, TTI::TCK_RecipThroughput);
7047 
7048   // Get the overhead of the extractelement and insertelement instructions
7049   // we might create due to scalarization.
7050   Cost += getScalarizationOverhead(I, VF);
7051 
7052   // If we have a predicated load/store, it will need extra i1 extracts and
7053   // conditional branches, but may not be executed for each vector lane. Scale
7054   // the cost by the probability of executing the predicated block.
7055   if (isPredicatedInst(I)) {
7056     Cost /= getReciprocalPredBlockProb();
7057 
7058     // Add the cost of an i1 extract and a branch
7059     auto *Vec_i1Ty =
7060         VectorType::get(IntegerType::getInt1Ty(ValTy->getContext()), VF);
7061     Cost += TTI.getScalarizationOverhead(
7062         Vec_i1Ty, APInt::getAllOnesValue(VF.getKnownMinValue()),
7063         /*Insert=*/false, /*Extract=*/true);
7064     Cost += TTI.getCFInstrCost(Instruction::Br, TTI::TCK_RecipThroughput);
7065 
7066     if (useEmulatedMaskMemRefHack(I))
7067       // Artificially setting to a high enough value to practically disable
7068       // vectorization with such operations.
7069       Cost = 3000000;
7070   }
7071 
7072   return Cost;
7073 }
7074 
7075 InstructionCost
7076 LoopVectorizationCostModel::getConsecutiveMemOpCost(Instruction *I,
7077                                                     ElementCount VF) {
7078   Type *ValTy = getLoadStoreType(I);
7079   auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
7080   Value *Ptr = getLoadStorePointerOperand(I);
7081   unsigned AS = getLoadStoreAddressSpace(I);
7082   int ConsecutiveStride = Legal->isConsecutivePtr(Ptr);
7083   enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
7084 
7085   assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) &&
7086          "Stride should be 1 or -1 for consecutive memory access");
7087   const Align Alignment = getLoadStoreAlignment(I);
7088   InstructionCost Cost = 0;
7089   if (Legal->isMaskRequired(I))
7090     Cost += TTI.getMaskedMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS,
7091                                       CostKind);
7092   else
7093     Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS,
7094                                 CostKind, I);
7095 
7096   bool Reverse = ConsecutiveStride < 0;
7097   if (Reverse)
7098     Cost +=
7099         TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, None, 0);
7100   return Cost;
7101 }
7102 
7103 InstructionCost
7104 LoopVectorizationCostModel::getUniformMemOpCost(Instruction *I,
7105                                                 ElementCount VF) {
7106   assert(Legal->isUniformMemOp(*I));
7107 
7108   Type *ValTy = getLoadStoreType(I);
7109   auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
7110   const Align Alignment = getLoadStoreAlignment(I);
7111   unsigned AS = getLoadStoreAddressSpace(I);
7112   enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
7113   if (isa<LoadInst>(I)) {
7114     return TTI.getAddressComputationCost(ValTy) +
7115            TTI.getMemoryOpCost(Instruction::Load, ValTy, Alignment, AS,
7116                                CostKind) +
7117            TTI.getShuffleCost(TargetTransformInfo::SK_Broadcast, VectorTy);
7118   }
7119   StoreInst *SI = cast<StoreInst>(I);
7120 
7121   bool isLoopInvariantStoreValue = Legal->isUniform(SI->getValueOperand());
7122   return TTI.getAddressComputationCost(ValTy) +
7123          TTI.getMemoryOpCost(Instruction::Store, ValTy, Alignment, AS,
7124                              CostKind) +
7125          (isLoopInvariantStoreValue
7126               ? 0
7127               : TTI.getVectorInstrCost(Instruction::ExtractElement, VectorTy,
7128                                        VF.getKnownMinValue() - 1));
7129 }
7130 
7131 InstructionCost
7132 LoopVectorizationCostModel::getGatherScatterCost(Instruction *I,
7133                                                  ElementCount VF) {
7134   Type *ValTy = getLoadStoreType(I);
7135   auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
7136   const Align Alignment = getLoadStoreAlignment(I);
7137   const Value *Ptr = getLoadStorePointerOperand(I);
7138 
7139   return TTI.getAddressComputationCost(VectorTy) +
7140          TTI.getGatherScatterOpCost(
7141              I->getOpcode(), VectorTy, Ptr, Legal->isMaskRequired(I), Alignment,
7142              TargetTransformInfo::TCK_RecipThroughput, I);
7143 }
7144 
7145 InstructionCost
7146 LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I,
7147                                                    ElementCount VF) {
7148   // TODO: Once we have support for interleaving with scalable vectors
7149   // we can calculate the cost properly here.
7150   if (VF.isScalable())
7151     return InstructionCost::getInvalid();
7152 
7153   Type *ValTy = getLoadStoreType(I);
7154   auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
7155   unsigned AS = getLoadStoreAddressSpace(I);
7156 
7157   auto Group = getInterleavedAccessGroup(I);
7158   assert(Group && "Fail to get an interleaved access group.");
7159 
7160   unsigned InterleaveFactor = Group->getFactor();
7161   auto *WideVecTy = VectorType::get(ValTy, VF * InterleaveFactor);
7162 
7163   // Holds the indices of existing members in the interleaved group.
7164   SmallVector<unsigned, 4> Indices;
7165   for (unsigned IF = 0; IF < InterleaveFactor; IF++)
7166     if (Group->getMember(IF))
7167       Indices.push_back(IF);
7168 
7169   // Calculate the cost of the whole interleaved group.
7170   bool UseMaskForGaps =
7171       (Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed()) ||
7172       (isa<StoreInst>(I) && (Group->getNumMembers() < Group->getFactor()));
7173   InstructionCost Cost = TTI.getInterleavedMemoryOpCost(
7174       I->getOpcode(), WideVecTy, Group->getFactor(), Indices, Group->getAlign(),
7175       AS, TTI::TCK_RecipThroughput, Legal->isMaskRequired(I), UseMaskForGaps);
7176 
7177   if (Group->isReverse()) {
7178     // TODO: Add support for reversed masked interleaved access.
7179     assert(!Legal->isMaskRequired(I) &&
7180            "Reverse masked interleaved access not supported.");
7181     Cost +=
7182         Group->getNumMembers() *
7183         TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, None, 0);
7184   }
7185   return Cost;
7186 }
7187 
7188 Optional<InstructionCost> LoopVectorizationCostModel::getReductionPatternCost(
7189     Instruction *I, ElementCount VF, Type *Ty, TTI::TargetCostKind CostKind) {
7190   using namespace llvm::PatternMatch;
7191   // Early exit for no inloop reductions
7192   if (InLoopReductionChains.empty() || VF.isScalar() || !isa<VectorType>(Ty))
7193     return None;
7194   auto *VectorTy = cast<VectorType>(Ty);
7195 
7196   // We are looking for a pattern of, and finding the minimal acceptable cost:
7197   //  reduce(mul(ext(A), ext(B))) or
7198   //  reduce(mul(A, B)) or
7199   //  reduce(ext(A)) or
7200   //  reduce(A).
7201   // The basic idea is that we walk down the tree to do that, finding the root
7202   // reduction instruction in InLoopReductionImmediateChains. From there we find
7203   // the pattern of mul/ext and test the cost of the entire pattern vs the cost
7204   // of the components. If the reduction cost is lower then we return it for the
7205   // reduction instruction and 0 for the other instructions in the pattern. If
7206   // it is not we return an invalid cost specifying the orignal cost method
7207   // should be used.
7208   Instruction *RetI = I;
7209   if (match(RetI, m_ZExtOrSExt(m_Value()))) {
7210     if (!RetI->hasOneUser())
7211       return None;
7212     RetI = RetI->user_back();
7213   }
7214   if (match(RetI, m_Mul(m_Value(), m_Value())) &&
7215       RetI->user_back()->getOpcode() == Instruction::Add) {
7216     if (!RetI->hasOneUser())
7217       return None;
7218     RetI = RetI->user_back();
7219   }
7220 
7221   // Test if the found instruction is a reduction, and if not return an invalid
7222   // cost specifying the parent to use the original cost modelling.
7223   if (!InLoopReductionImmediateChains.count(RetI))
7224     return None;
7225 
7226   // Find the reduction this chain is a part of and calculate the basic cost of
7227   // the reduction on its own.
7228   Instruction *LastChain = InLoopReductionImmediateChains[RetI];
7229   Instruction *ReductionPhi = LastChain;
7230   while (!isa<PHINode>(ReductionPhi))
7231     ReductionPhi = InLoopReductionImmediateChains[ReductionPhi];
7232 
7233   const RecurrenceDescriptor &RdxDesc =
7234       Legal->getReductionVars()[cast<PHINode>(ReductionPhi)];
7235 
7236   InstructionCost BaseCost = TTI.getArithmeticReductionCost(
7237       RdxDesc.getOpcode(), VectorTy, RdxDesc.getFastMathFlags(), CostKind);
7238 
7239   // If we're using ordered reductions then we can just return the base cost
7240   // here, since getArithmeticReductionCost calculates the full ordered
7241   // reduction cost when FP reassociation is not allowed.
7242   if (useOrderedReductions(RdxDesc))
7243     return BaseCost;
7244 
7245   // Get the operand that was not the reduction chain and match it to one of the
7246   // patterns, returning the better cost if it is found.
7247   Instruction *RedOp = RetI->getOperand(1) == LastChain
7248                            ? dyn_cast<Instruction>(RetI->getOperand(0))
7249                            : dyn_cast<Instruction>(RetI->getOperand(1));
7250 
7251   VectorTy = VectorType::get(I->getOperand(0)->getType(), VectorTy);
7252 
7253   Instruction *Op0, *Op1;
7254   if (RedOp &&
7255       match(RedOp,
7256             m_ZExtOrSExt(m_Mul(m_Instruction(Op0), m_Instruction(Op1)))) &&
7257       match(Op0, m_ZExtOrSExt(m_Value())) &&
7258       Op0->getOpcode() == Op1->getOpcode() &&
7259       Op0->getOperand(0)->getType() == Op1->getOperand(0)->getType() &&
7260       !TheLoop->isLoopInvariant(Op0) && !TheLoop->isLoopInvariant(Op1) &&
7261       (Op0->getOpcode() == RedOp->getOpcode() || Op0 == Op1)) {
7262 
7263     // Matched reduce(ext(mul(ext(A), ext(B)))
7264     // Note that the extend opcodes need to all match, or if A==B they will have
7265     // been converted to zext(mul(sext(A), sext(A))) as it is known positive,
7266     // which is equally fine.
7267     bool IsUnsigned = isa<ZExtInst>(Op0);
7268     auto *ExtType = VectorType::get(Op0->getOperand(0)->getType(), VectorTy);
7269     auto *MulType = VectorType::get(Op0->getType(), VectorTy);
7270 
7271     InstructionCost ExtCost =
7272         TTI.getCastInstrCost(Op0->getOpcode(), MulType, ExtType,
7273                              TTI::CastContextHint::None, CostKind, Op0);
7274     InstructionCost MulCost =
7275         TTI.getArithmeticInstrCost(Instruction::Mul, MulType, CostKind);
7276     InstructionCost Ext2Cost =
7277         TTI.getCastInstrCost(RedOp->getOpcode(), VectorTy, MulType,
7278                              TTI::CastContextHint::None, CostKind, RedOp);
7279 
7280     InstructionCost RedCost = TTI.getExtendedAddReductionCost(
7281         /*IsMLA=*/true, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType,
7282         CostKind);
7283 
7284     if (RedCost.isValid() &&
7285         RedCost < ExtCost * 2 + MulCost + Ext2Cost + BaseCost)
7286       return I == RetI ? RedCost : 0;
7287   } else if (RedOp && match(RedOp, m_ZExtOrSExt(m_Value())) &&
7288              !TheLoop->isLoopInvariant(RedOp)) {
7289     // Matched reduce(ext(A))
7290     bool IsUnsigned = isa<ZExtInst>(RedOp);
7291     auto *ExtType = VectorType::get(RedOp->getOperand(0)->getType(), VectorTy);
7292     InstructionCost RedCost = TTI.getExtendedAddReductionCost(
7293         /*IsMLA=*/false, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType,
7294         CostKind);
7295 
7296     InstructionCost ExtCost =
7297         TTI.getCastInstrCost(RedOp->getOpcode(), VectorTy, ExtType,
7298                              TTI::CastContextHint::None, CostKind, RedOp);
7299     if (RedCost.isValid() && RedCost < BaseCost + ExtCost)
7300       return I == RetI ? RedCost : 0;
7301   } else if (RedOp &&
7302              match(RedOp, m_Mul(m_Instruction(Op0), m_Instruction(Op1)))) {
7303     if (match(Op0, m_ZExtOrSExt(m_Value())) &&
7304         Op0->getOpcode() == Op1->getOpcode() &&
7305         Op0->getOperand(0)->getType() == Op1->getOperand(0)->getType() &&
7306         !TheLoop->isLoopInvariant(Op0) && !TheLoop->isLoopInvariant(Op1)) {
7307       bool IsUnsigned = isa<ZExtInst>(Op0);
7308       auto *ExtType = VectorType::get(Op0->getOperand(0)->getType(), VectorTy);
7309       // Matched reduce(mul(ext, ext))
7310       InstructionCost ExtCost =
7311           TTI.getCastInstrCost(Op0->getOpcode(), VectorTy, ExtType,
7312                                TTI::CastContextHint::None, CostKind, Op0);
7313       InstructionCost MulCost =
7314           TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind);
7315 
7316       InstructionCost RedCost = TTI.getExtendedAddReductionCost(
7317           /*IsMLA=*/true, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType,
7318           CostKind);
7319 
7320       if (RedCost.isValid() && RedCost < ExtCost * 2 + MulCost + BaseCost)
7321         return I == RetI ? RedCost : 0;
7322     } else if (!match(I, m_ZExtOrSExt(m_Value()))) {
7323       // Matched reduce(mul())
7324       InstructionCost MulCost =
7325           TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind);
7326 
7327       InstructionCost RedCost = TTI.getExtendedAddReductionCost(
7328           /*IsMLA=*/true, true, RdxDesc.getRecurrenceType(), VectorTy,
7329           CostKind);
7330 
7331       if (RedCost.isValid() && RedCost < MulCost + BaseCost)
7332         return I == RetI ? RedCost : 0;
7333     }
7334   }
7335 
7336   return I == RetI ? Optional<InstructionCost>(BaseCost) : None;
7337 }
7338 
7339 InstructionCost
7340 LoopVectorizationCostModel::getMemoryInstructionCost(Instruction *I,
7341                                                      ElementCount VF) {
7342   // Calculate scalar cost only. Vectorization cost should be ready at this
7343   // moment.
7344   if (VF.isScalar()) {
7345     Type *ValTy = getLoadStoreType(I);
7346     const Align Alignment = getLoadStoreAlignment(I);
7347     unsigned AS = getLoadStoreAddressSpace(I);
7348 
7349     return TTI.getAddressComputationCost(ValTy) +
7350            TTI.getMemoryOpCost(I->getOpcode(), ValTy, Alignment, AS,
7351                                TTI::TCK_RecipThroughput, I);
7352   }
7353   return getWideningCost(I, VF);
7354 }
7355 
7356 LoopVectorizationCostModel::VectorizationCostTy
7357 LoopVectorizationCostModel::getInstructionCost(Instruction *I,
7358                                                ElementCount VF) {
7359   // If we know that this instruction will remain uniform, check the cost of
7360   // the scalar version.
7361   if (isUniformAfterVectorization(I, VF))
7362     VF = ElementCount::getFixed(1);
7363 
7364   if (VF.isVector() && isProfitableToScalarize(I, VF))
7365     return VectorizationCostTy(InstsToScalarize[VF][I], false);
7366 
7367   // Forced scalars do not have any scalarization overhead.
7368   auto ForcedScalar = ForcedScalars.find(VF);
7369   if (VF.isVector() && ForcedScalar != ForcedScalars.end()) {
7370     auto InstSet = ForcedScalar->second;
7371     if (InstSet.count(I))
7372       return VectorizationCostTy(
7373           (getInstructionCost(I, ElementCount::getFixed(1)).first *
7374            VF.getKnownMinValue()),
7375           false);
7376   }
7377 
7378   Type *VectorTy;
7379   InstructionCost C = getInstructionCost(I, VF, VectorTy);
7380 
7381   bool TypeNotScalarized =
7382       VF.isVector() && VectorTy->isVectorTy() &&
7383       TTI.getNumberOfParts(VectorTy) < VF.getKnownMinValue();
7384   return VectorizationCostTy(C, TypeNotScalarized);
7385 }
7386 
7387 InstructionCost
7388 LoopVectorizationCostModel::getScalarizationOverhead(Instruction *I,
7389                                                      ElementCount VF) const {
7390 
7391   // There is no mechanism yet to create a scalable scalarization loop,
7392   // so this is currently Invalid.
7393   if (VF.isScalable())
7394     return InstructionCost::getInvalid();
7395 
7396   if (VF.isScalar())
7397     return 0;
7398 
7399   InstructionCost Cost = 0;
7400   Type *RetTy = ToVectorTy(I->getType(), VF);
7401   if (!RetTy->isVoidTy() &&
7402       (!isa<LoadInst>(I) || !TTI.supportsEfficientVectorElementLoadStore()))
7403     Cost += TTI.getScalarizationOverhead(
7404         cast<VectorType>(RetTy), APInt::getAllOnesValue(VF.getKnownMinValue()),
7405         true, false);
7406 
7407   // Some targets keep addresses scalar.
7408   if (isa<LoadInst>(I) && !TTI.prefersVectorizedAddressing())
7409     return Cost;
7410 
7411   // Some targets support efficient element stores.
7412   if (isa<StoreInst>(I) && TTI.supportsEfficientVectorElementLoadStore())
7413     return Cost;
7414 
7415   // Collect operands to consider.
7416   CallInst *CI = dyn_cast<CallInst>(I);
7417   Instruction::op_range Ops = CI ? CI->arg_operands() : I->operands();
7418 
7419   // Skip operands that do not require extraction/scalarization and do not incur
7420   // any overhead.
7421   SmallVector<Type *> Tys;
7422   for (auto *V : filterExtractingOperands(Ops, VF))
7423     Tys.push_back(MaybeVectorizeType(V->getType(), VF));
7424   return Cost + TTI.getOperandsScalarizationOverhead(
7425                     filterExtractingOperands(Ops, VF), Tys);
7426 }
7427 
7428 void LoopVectorizationCostModel::setCostBasedWideningDecision(ElementCount VF) {
7429   if (VF.isScalar())
7430     return;
7431   NumPredStores = 0;
7432   for (BasicBlock *BB : TheLoop->blocks()) {
7433     // For each instruction in the old loop.
7434     for (Instruction &I : *BB) {
7435       Value *Ptr =  getLoadStorePointerOperand(&I);
7436       if (!Ptr)
7437         continue;
7438 
7439       // TODO: We should generate better code and update the cost model for
7440       // predicated uniform stores. Today they are treated as any other
7441       // predicated store (see added test cases in
7442       // invariant-store-vectorization.ll).
7443       if (isa<StoreInst>(&I) && isScalarWithPredication(&I))
7444         NumPredStores++;
7445 
7446       if (Legal->isUniformMemOp(I)) {
7447         // TODO: Avoid replicating loads and stores instead of
7448         // relying on instcombine to remove them.
7449         // Load: Scalar load + broadcast
7450         // Store: Scalar store + isLoopInvariantStoreValue ? 0 : extract
7451         InstructionCost Cost;
7452         if (isa<StoreInst>(&I) && VF.isScalable() &&
7453             isLegalGatherOrScatter(&I)) {
7454           Cost = getGatherScatterCost(&I, VF);
7455           setWideningDecision(&I, VF, CM_GatherScatter, Cost);
7456         } else {
7457           assert((isa<LoadInst>(&I) || !VF.isScalable()) &&
7458                  "Cannot yet scalarize uniform stores");
7459           Cost = getUniformMemOpCost(&I, VF);
7460           setWideningDecision(&I, VF, CM_Scalarize, Cost);
7461         }
7462         continue;
7463       }
7464 
7465       // We assume that widening is the best solution when possible.
7466       if (memoryInstructionCanBeWidened(&I, VF)) {
7467         InstructionCost Cost = getConsecutiveMemOpCost(&I, VF);
7468         int ConsecutiveStride =
7469                Legal->isConsecutivePtr(getLoadStorePointerOperand(&I));
7470         assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) &&
7471                "Expected consecutive stride.");
7472         InstWidening Decision =
7473             ConsecutiveStride == 1 ? CM_Widen : CM_Widen_Reverse;
7474         setWideningDecision(&I, VF, Decision, Cost);
7475         continue;
7476       }
7477 
7478       // Choose between Interleaving, Gather/Scatter or Scalarization.
7479       InstructionCost InterleaveCost = InstructionCost::getInvalid();
7480       unsigned NumAccesses = 1;
7481       if (isAccessInterleaved(&I)) {
7482         auto Group = getInterleavedAccessGroup(&I);
7483         assert(Group && "Fail to get an interleaved access group.");
7484 
7485         // Make one decision for the whole group.
7486         if (getWideningDecision(&I, VF) != CM_Unknown)
7487           continue;
7488 
7489         NumAccesses = Group->getNumMembers();
7490         if (interleavedAccessCanBeWidened(&I, VF))
7491           InterleaveCost = getInterleaveGroupCost(&I, VF);
7492       }
7493 
7494       InstructionCost GatherScatterCost =
7495           isLegalGatherOrScatter(&I)
7496               ? getGatherScatterCost(&I, VF) * NumAccesses
7497               : InstructionCost::getInvalid();
7498 
7499       InstructionCost ScalarizationCost =
7500           getMemInstScalarizationCost(&I, VF) * NumAccesses;
7501 
7502       // Choose better solution for the current VF,
7503       // write down this decision and use it during vectorization.
7504       InstructionCost Cost;
7505       InstWidening Decision;
7506       if (InterleaveCost <= GatherScatterCost &&
7507           InterleaveCost < ScalarizationCost) {
7508         Decision = CM_Interleave;
7509         Cost = InterleaveCost;
7510       } else if (GatherScatterCost < ScalarizationCost) {
7511         Decision = CM_GatherScatter;
7512         Cost = GatherScatterCost;
7513       } else {
7514         Decision = CM_Scalarize;
7515         Cost = ScalarizationCost;
7516       }
7517       // If the instructions belongs to an interleave group, the whole group
7518       // receives the same decision. The whole group receives the cost, but
7519       // the cost will actually be assigned to one instruction.
7520       if (auto Group = getInterleavedAccessGroup(&I))
7521         setWideningDecision(Group, VF, Decision, Cost);
7522       else
7523         setWideningDecision(&I, VF, Decision, Cost);
7524     }
7525   }
7526 
7527   // Make sure that any load of address and any other address computation
7528   // remains scalar unless there is gather/scatter support. This avoids
7529   // inevitable extracts into address registers, and also has the benefit of
7530   // activating LSR more, since that pass can't optimize vectorized
7531   // addresses.
7532   if (TTI.prefersVectorizedAddressing())
7533     return;
7534 
7535   // Start with all scalar pointer uses.
7536   SmallPtrSet<Instruction *, 8> AddrDefs;
7537   for (BasicBlock *BB : TheLoop->blocks())
7538     for (Instruction &I : *BB) {
7539       Instruction *PtrDef =
7540         dyn_cast_or_null<Instruction>(getLoadStorePointerOperand(&I));
7541       if (PtrDef && TheLoop->contains(PtrDef) &&
7542           getWideningDecision(&I, VF) != CM_GatherScatter)
7543         AddrDefs.insert(PtrDef);
7544     }
7545 
7546   // Add all instructions used to generate the addresses.
7547   SmallVector<Instruction *, 4> Worklist;
7548   append_range(Worklist, AddrDefs);
7549   while (!Worklist.empty()) {
7550     Instruction *I = Worklist.pop_back_val();
7551     for (auto &Op : I->operands())
7552       if (auto *InstOp = dyn_cast<Instruction>(Op))
7553         if ((InstOp->getParent() == I->getParent()) && !isa<PHINode>(InstOp) &&
7554             AddrDefs.insert(InstOp).second)
7555           Worklist.push_back(InstOp);
7556   }
7557 
7558   for (auto *I : AddrDefs) {
7559     if (isa<LoadInst>(I)) {
7560       // Setting the desired widening decision should ideally be handled in
7561       // by cost functions, but since this involves the task of finding out
7562       // if the loaded register is involved in an address computation, it is
7563       // instead changed here when we know this is the case.
7564       InstWidening Decision = getWideningDecision(I, VF);
7565       if (Decision == CM_Widen || Decision == CM_Widen_Reverse)
7566         // Scalarize a widened load of address.
7567         setWideningDecision(
7568             I, VF, CM_Scalarize,
7569             (VF.getKnownMinValue() *
7570              getMemoryInstructionCost(I, ElementCount::getFixed(1))));
7571       else if (auto Group = getInterleavedAccessGroup(I)) {
7572         // Scalarize an interleave group of address loads.
7573         for (unsigned I = 0; I < Group->getFactor(); ++I) {
7574           if (Instruction *Member = Group->getMember(I))
7575             setWideningDecision(
7576                 Member, VF, CM_Scalarize,
7577                 (VF.getKnownMinValue() *
7578                  getMemoryInstructionCost(Member, ElementCount::getFixed(1))));
7579         }
7580       }
7581     } else
7582       // Make sure I gets scalarized and a cost estimate without
7583       // scalarization overhead.
7584       ForcedScalars[VF].insert(I);
7585   }
7586 }
7587 
7588 InstructionCost
7589 LoopVectorizationCostModel::getInstructionCost(Instruction *I, ElementCount VF,
7590                                                Type *&VectorTy) {
7591   Type *RetTy = I->getType();
7592   if (canTruncateToMinimalBitwidth(I, VF))
7593     RetTy = IntegerType::get(RetTy->getContext(), MinBWs[I]);
7594   auto SE = PSE.getSE();
7595   TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
7596 
7597   auto hasSingleCopyAfterVectorization = [this](Instruction *I,
7598                                                 ElementCount VF) -> bool {
7599     if (VF.isScalar())
7600       return true;
7601 
7602     auto Scalarized = InstsToScalarize.find(VF);
7603     assert(Scalarized != InstsToScalarize.end() &&
7604            "VF not yet analyzed for scalarization profitability");
7605     return !Scalarized->second.count(I) &&
7606            llvm::all_of(I->users(), [&](User *U) {
7607              auto *UI = cast<Instruction>(U);
7608              return !Scalarized->second.count(UI);
7609            });
7610   };
7611   (void) hasSingleCopyAfterVectorization;
7612 
7613   if (isScalarAfterVectorization(I, VF)) {
7614     // With the exception of GEPs and PHIs, after scalarization there should
7615     // only be one copy of the instruction generated in the loop. This is
7616     // because the VF is either 1, or any instructions that need scalarizing
7617     // have already been dealt with by the the time we get here. As a result,
7618     // it means we don't have to multiply the instruction cost by VF.
7619     assert(I->getOpcode() == Instruction::GetElementPtr ||
7620            I->getOpcode() == Instruction::PHI ||
7621            (I->getOpcode() == Instruction::BitCast &&
7622             I->getType()->isPointerTy()) ||
7623            hasSingleCopyAfterVectorization(I, VF));
7624     VectorTy = RetTy;
7625   } else
7626     VectorTy = ToVectorTy(RetTy, VF);
7627 
7628   // TODO: We need to estimate the cost of intrinsic calls.
7629   switch (I->getOpcode()) {
7630   case Instruction::GetElementPtr:
7631     // We mark this instruction as zero-cost because the cost of GEPs in
7632     // vectorized code depends on whether the corresponding memory instruction
7633     // is scalarized or not. Therefore, we handle GEPs with the memory
7634     // instruction cost.
7635     return 0;
7636   case Instruction::Br: {
7637     // In cases of scalarized and predicated instructions, there will be VF
7638     // predicated blocks in the vectorized loop. Each branch around these
7639     // blocks requires also an extract of its vector compare i1 element.
7640     bool ScalarPredicatedBB = false;
7641     BranchInst *BI = cast<BranchInst>(I);
7642     if (VF.isVector() && BI->isConditional() &&
7643         (PredicatedBBsAfterVectorization.count(BI->getSuccessor(0)) ||
7644          PredicatedBBsAfterVectorization.count(BI->getSuccessor(1))))
7645       ScalarPredicatedBB = true;
7646 
7647     if (ScalarPredicatedBB) {
7648       // Not possible to scalarize scalable vector with predicated instructions.
7649       if (VF.isScalable())
7650         return InstructionCost::getInvalid();
7651       // Return cost for branches around scalarized and predicated blocks.
7652       auto *Vec_i1Ty =
7653           VectorType::get(IntegerType::getInt1Ty(RetTy->getContext()), VF);
7654       return (
7655           TTI.getScalarizationOverhead(
7656               Vec_i1Ty, APInt::getAllOnesValue(VF.getFixedValue()), false,
7657               true) +
7658           (TTI.getCFInstrCost(Instruction::Br, CostKind) * VF.getFixedValue()));
7659     } else if (I->getParent() == TheLoop->getLoopLatch() || VF.isScalar())
7660       // The back-edge branch will remain, as will all scalar branches.
7661       return TTI.getCFInstrCost(Instruction::Br, CostKind);
7662     else
7663       // This branch will be eliminated by if-conversion.
7664       return 0;
7665     // Note: We currently assume zero cost for an unconditional branch inside
7666     // a predicated block since it will become a fall-through, although we
7667     // may decide in the future to call TTI for all branches.
7668   }
7669   case Instruction::PHI: {
7670     auto *Phi = cast<PHINode>(I);
7671 
7672     // First-order recurrences are replaced by vector shuffles inside the loop.
7673     // NOTE: Don't use ToVectorTy as SK_ExtractSubvector expects a vector type.
7674     if (VF.isVector() && Legal->isFirstOrderRecurrence(Phi))
7675       return TTI.getShuffleCost(
7676           TargetTransformInfo::SK_ExtractSubvector, cast<VectorType>(VectorTy),
7677           None, VF.getKnownMinValue() - 1, FixedVectorType::get(RetTy, 1));
7678 
7679     // Phi nodes in non-header blocks (not inductions, reductions, etc.) are
7680     // converted into select instructions. We require N - 1 selects per phi
7681     // node, where N is the number of incoming values.
7682     if (VF.isVector() && Phi->getParent() != TheLoop->getHeader())
7683       return (Phi->getNumIncomingValues() - 1) *
7684              TTI.getCmpSelInstrCost(
7685                  Instruction::Select, ToVectorTy(Phi->getType(), VF),
7686                  ToVectorTy(Type::getInt1Ty(Phi->getContext()), VF),
7687                  CmpInst::BAD_ICMP_PREDICATE, CostKind);
7688 
7689     return TTI.getCFInstrCost(Instruction::PHI, CostKind);
7690   }
7691   case Instruction::UDiv:
7692   case Instruction::SDiv:
7693   case Instruction::URem:
7694   case Instruction::SRem:
7695     // If we have a predicated instruction, it may not be executed for each
7696     // vector lane. Get the scalarization cost and scale this amount by the
7697     // probability of executing the predicated block. If the instruction is not
7698     // predicated, we fall through to the next case.
7699     if (VF.isVector() && isScalarWithPredication(I)) {
7700       InstructionCost Cost = 0;
7701 
7702       // These instructions have a non-void type, so account for the phi nodes
7703       // that we will create. This cost is likely to be zero. The phi node
7704       // cost, if any, should be scaled by the block probability because it
7705       // models a copy at the end of each predicated block.
7706       Cost += VF.getKnownMinValue() *
7707               TTI.getCFInstrCost(Instruction::PHI, CostKind);
7708 
7709       // The cost of the non-predicated instruction.
7710       Cost += VF.getKnownMinValue() *
7711               TTI.getArithmeticInstrCost(I->getOpcode(), RetTy, CostKind);
7712 
7713       // The cost of insertelement and extractelement instructions needed for
7714       // scalarization.
7715       Cost += getScalarizationOverhead(I, VF);
7716 
7717       // Scale the cost by the probability of executing the predicated blocks.
7718       // This assumes the predicated block for each vector lane is equally
7719       // likely.
7720       return Cost / getReciprocalPredBlockProb();
7721     }
7722     LLVM_FALLTHROUGH;
7723   case Instruction::Add:
7724   case Instruction::FAdd:
7725   case Instruction::Sub:
7726   case Instruction::FSub:
7727   case Instruction::Mul:
7728   case Instruction::FMul:
7729   case Instruction::FDiv:
7730   case Instruction::FRem:
7731   case Instruction::Shl:
7732   case Instruction::LShr:
7733   case Instruction::AShr:
7734   case Instruction::And:
7735   case Instruction::Or:
7736   case Instruction::Xor: {
7737     // Since we will replace the stride by 1 the multiplication should go away.
7738     if (I->getOpcode() == Instruction::Mul && isStrideMul(I, Legal))
7739       return 0;
7740 
7741     // Detect reduction patterns
7742     if (auto RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind))
7743       return *RedCost;
7744 
7745     // Certain instructions can be cheaper to vectorize if they have a constant
7746     // second vector operand. One example of this are shifts on x86.
7747     Value *Op2 = I->getOperand(1);
7748     TargetTransformInfo::OperandValueProperties Op2VP;
7749     TargetTransformInfo::OperandValueKind Op2VK =
7750         TTI.getOperandInfo(Op2, Op2VP);
7751     if (Op2VK == TargetTransformInfo::OK_AnyValue && Legal->isUniform(Op2))
7752       Op2VK = TargetTransformInfo::OK_UniformValue;
7753 
7754     SmallVector<const Value *, 4> Operands(I->operand_values());
7755     return TTI.getArithmeticInstrCost(
7756         I->getOpcode(), VectorTy, CostKind, TargetTransformInfo::OK_AnyValue,
7757         Op2VK, TargetTransformInfo::OP_None, Op2VP, Operands, I);
7758   }
7759   case Instruction::FNeg: {
7760     return TTI.getArithmeticInstrCost(
7761         I->getOpcode(), VectorTy, CostKind, TargetTransformInfo::OK_AnyValue,
7762         TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None,
7763         TargetTransformInfo::OP_None, I->getOperand(0), I);
7764   }
7765   case Instruction::Select: {
7766     SelectInst *SI = cast<SelectInst>(I);
7767     const SCEV *CondSCEV = SE->getSCEV(SI->getCondition());
7768     bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop));
7769 
7770     const Value *Op0, *Op1;
7771     using namespace llvm::PatternMatch;
7772     if (!ScalarCond && (match(I, m_LogicalAnd(m_Value(Op0), m_Value(Op1))) ||
7773                         match(I, m_LogicalOr(m_Value(Op0), m_Value(Op1))))) {
7774       // select x, y, false --> x & y
7775       // select x, true, y --> x | y
7776       TTI::OperandValueProperties Op1VP = TTI::OP_None;
7777       TTI::OperandValueProperties Op2VP = TTI::OP_None;
7778       TTI::OperandValueKind Op1VK = TTI::getOperandInfo(Op0, Op1VP);
7779       TTI::OperandValueKind Op2VK = TTI::getOperandInfo(Op1, Op2VP);
7780       assert(Op0->getType()->getScalarSizeInBits() == 1 &&
7781               Op1->getType()->getScalarSizeInBits() == 1);
7782 
7783       SmallVector<const Value *, 2> Operands{Op0, Op1};
7784       return TTI.getArithmeticInstrCost(
7785           match(I, m_LogicalOr()) ? Instruction::Or : Instruction::And, VectorTy,
7786           CostKind, Op1VK, Op2VK, Op1VP, Op2VP, Operands, I);
7787     }
7788 
7789     Type *CondTy = SI->getCondition()->getType();
7790     if (!ScalarCond)
7791       CondTy = VectorType::get(CondTy, VF);
7792     return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, CondTy,
7793                                   CmpInst::BAD_ICMP_PREDICATE, CostKind, I);
7794   }
7795   case Instruction::ICmp:
7796   case Instruction::FCmp: {
7797     Type *ValTy = I->getOperand(0)->getType();
7798     Instruction *Op0AsInstruction = dyn_cast<Instruction>(I->getOperand(0));
7799     if (canTruncateToMinimalBitwidth(Op0AsInstruction, VF))
7800       ValTy = IntegerType::get(ValTy->getContext(), MinBWs[Op0AsInstruction]);
7801     VectorTy = ToVectorTy(ValTy, VF);
7802     return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, nullptr,
7803                                   CmpInst::BAD_ICMP_PREDICATE, CostKind, I);
7804   }
7805   case Instruction::Store:
7806   case Instruction::Load: {
7807     ElementCount Width = VF;
7808     if (Width.isVector()) {
7809       InstWidening Decision = getWideningDecision(I, Width);
7810       assert(Decision != CM_Unknown &&
7811              "CM decision should be taken at this point");
7812       if (Decision == CM_Scalarize)
7813         Width = ElementCount::getFixed(1);
7814     }
7815     VectorTy = ToVectorTy(getLoadStoreType(I), Width);
7816     return getMemoryInstructionCost(I, VF);
7817   }
7818   case Instruction::BitCast:
7819     if (I->getType()->isPointerTy())
7820       return 0;
7821     LLVM_FALLTHROUGH;
7822   case Instruction::ZExt:
7823   case Instruction::SExt:
7824   case Instruction::FPToUI:
7825   case Instruction::FPToSI:
7826   case Instruction::FPExt:
7827   case Instruction::PtrToInt:
7828   case Instruction::IntToPtr:
7829   case Instruction::SIToFP:
7830   case Instruction::UIToFP:
7831   case Instruction::Trunc:
7832   case Instruction::FPTrunc: {
7833     // Computes the CastContextHint from a Load/Store instruction.
7834     auto ComputeCCH = [&](Instruction *I) -> TTI::CastContextHint {
7835       assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
7836              "Expected a load or a store!");
7837 
7838       if (VF.isScalar() || !TheLoop->contains(I))
7839         return TTI::CastContextHint::Normal;
7840 
7841       switch (getWideningDecision(I, VF)) {
7842       case LoopVectorizationCostModel::CM_GatherScatter:
7843         return TTI::CastContextHint::GatherScatter;
7844       case LoopVectorizationCostModel::CM_Interleave:
7845         return TTI::CastContextHint::Interleave;
7846       case LoopVectorizationCostModel::CM_Scalarize:
7847       case LoopVectorizationCostModel::CM_Widen:
7848         return Legal->isMaskRequired(I) ? TTI::CastContextHint::Masked
7849                                         : TTI::CastContextHint::Normal;
7850       case LoopVectorizationCostModel::CM_Widen_Reverse:
7851         return TTI::CastContextHint::Reversed;
7852       case LoopVectorizationCostModel::CM_Unknown:
7853         llvm_unreachable("Instr did not go through cost modelling?");
7854       }
7855 
7856       llvm_unreachable("Unhandled case!");
7857     };
7858 
7859     unsigned Opcode = I->getOpcode();
7860     TTI::CastContextHint CCH = TTI::CastContextHint::None;
7861     // For Trunc, the context is the only user, which must be a StoreInst.
7862     if (Opcode == Instruction::Trunc || Opcode == Instruction::FPTrunc) {
7863       if (I->hasOneUse())
7864         if (StoreInst *Store = dyn_cast<StoreInst>(*I->user_begin()))
7865           CCH = ComputeCCH(Store);
7866     }
7867     // For Z/Sext, the context is the operand, which must be a LoadInst.
7868     else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt ||
7869              Opcode == Instruction::FPExt) {
7870       if (LoadInst *Load = dyn_cast<LoadInst>(I->getOperand(0)))
7871         CCH = ComputeCCH(Load);
7872     }
7873 
7874     // We optimize the truncation of induction variables having constant
7875     // integer steps. The cost of these truncations is the same as the scalar
7876     // operation.
7877     if (isOptimizableIVTruncate(I, VF)) {
7878       auto *Trunc = cast<TruncInst>(I);
7879       return TTI.getCastInstrCost(Instruction::Trunc, Trunc->getDestTy(),
7880                                   Trunc->getSrcTy(), CCH, CostKind, Trunc);
7881     }
7882 
7883     // Detect reduction patterns
7884     if (auto RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind))
7885       return *RedCost;
7886 
7887     Type *SrcScalarTy = I->getOperand(0)->getType();
7888     Type *SrcVecTy =
7889         VectorTy->isVectorTy() ? ToVectorTy(SrcScalarTy, VF) : SrcScalarTy;
7890     if (canTruncateToMinimalBitwidth(I, VF)) {
7891       // This cast is going to be shrunk. This may remove the cast or it might
7892       // turn it into slightly different cast. For example, if MinBW == 16,
7893       // "zext i8 %1 to i32" becomes "zext i8 %1 to i16".
7894       //
7895       // Calculate the modified src and dest types.
7896       Type *MinVecTy = VectorTy;
7897       if (Opcode == Instruction::Trunc) {
7898         SrcVecTy = smallestIntegerVectorType(SrcVecTy, MinVecTy);
7899         VectorTy =
7900             largestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy);
7901       } else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt) {
7902         SrcVecTy = largestIntegerVectorType(SrcVecTy, MinVecTy);
7903         VectorTy =
7904             smallestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy);
7905       }
7906     }
7907 
7908     return TTI.getCastInstrCost(Opcode, VectorTy, SrcVecTy, CCH, CostKind, I);
7909   }
7910   case Instruction::Call: {
7911     bool NeedToScalarize;
7912     CallInst *CI = cast<CallInst>(I);
7913     InstructionCost CallCost = getVectorCallCost(CI, VF, NeedToScalarize);
7914     if (getVectorIntrinsicIDForCall(CI, TLI)) {
7915       InstructionCost IntrinsicCost = getVectorIntrinsicCost(CI, VF);
7916       return std::min(CallCost, IntrinsicCost);
7917     }
7918     return CallCost;
7919   }
7920   case Instruction::ExtractValue:
7921     return TTI.getInstructionCost(I, TTI::TCK_RecipThroughput);
7922   case Instruction::Alloca:
7923     // We cannot easily widen alloca to a scalable alloca, as
7924     // the result would need to be a vector of pointers.
7925     if (VF.isScalable())
7926       return InstructionCost::getInvalid();
7927     LLVM_FALLTHROUGH;
7928   default:
7929     // This opcode is unknown. Assume that it is the same as 'mul'.
7930     return TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind);
7931   } // end of switch.
7932 }
7933 
7934 char LoopVectorize::ID = 0;
7935 
7936 static const char lv_name[] = "Loop Vectorization";
7937 
7938 INITIALIZE_PASS_BEGIN(LoopVectorize, LV_NAME, lv_name, false, false)
7939 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
7940 INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass)
7941 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
7942 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
7943 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
7944 INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass)
7945 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
7946 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
7947 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
7948 INITIALIZE_PASS_DEPENDENCY(LoopAccessLegacyAnalysis)
7949 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass)
7950 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass)
7951 INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass)
7952 INITIALIZE_PASS_DEPENDENCY(InjectTLIMappingsLegacy)
7953 INITIALIZE_PASS_END(LoopVectorize, LV_NAME, lv_name, false, false)
7954 
7955 namespace llvm {
7956 
7957 Pass *createLoopVectorizePass() { return new LoopVectorize(); }
7958 
7959 Pass *createLoopVectorizePass(bool InterleaveOnlyWhenForced,
7960                               bool VectorizeOnlyWhenForced) {
7961   return new LoopVectorize(InterleaveOnlyWhenForced, VectorizeOnlyWhenForced);
7962 }
7963 
7964 } // end namespace llvm
7965 
7966 bool LoopVectorizationCostModel::isConsecutiveLoadOrStore(Instruction *Inst) {
7967   // Check if the pointer operand of a load or store instruction is
7968   // consecutive.
7969   if (auto *Ptr = getLoadStorePointerOperand(Inst))
7970     return Legal->isConsecutivePtr(Ptr);
7971   return false;
7972 }
7973 
7974 void LoopVectorizationCostModel::collectValuesToIgnore() {
7975   // Ignore ephemeral values.
7976   CodeMetrics::collectEphemeralValues(TheLoop, AC, ValuesToIgnore);
7977 
7978   // Ignore type-promoting instructions we identified during reduction
7979   // detection.
7980   for (auto &Reduction : Legal->getReductionVars()) {
7981     RecurrenceDescriptor &RedDes = Reduction.second;
7982     const SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts();
7983     VecValuesToIgnore.insert(Casts.begin(), Casts.end());
7984   }
7985   // Ignore type-casting instructions we identified during induction
7986   // detection.
7987   for (auto &Induction : Legal->getInductionVars()) {
7988     InductionDescriptor &IndDes = Induction.second;
7989     const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts();
7990     VecValuesToIgnore.insert(Casts.begin(), Casts.end());
7991   }
7992 }
7993 
7994 void LoopVectorizationCostModel::collectInLoopReductions() {
7995   for (auto &Reduction : Legal->getReductionVars()) {
7996     PHINode *Phi = Reduction.first;
7997     RecurrenceDescriptor &RdxDesc = Reduction.second;
7998 
7999     // We don't collect reductions that are type promoted (yet).
8000     if (RdxDesc.getRecurrenceType() != Phi->getType())
8001       continue;
8002 
8003     // If the target would prefer this reduction to happen "in-loop", then we
8004     // want to record it as such.
8005     unsigned Opcode = RdxDesc.getOpcode();
8006     if (!PreferInLoopReductions && !useOrderedReductions(RdxDesc) &&
8007         !TTI.preferInLoopReduction(Opcode, Phi->getType(),
8008                                    TargetTransformInfo::ReductionFlags()))
8009       continue;
8010 
8011     // Check that we can correctly put the reductions into the loop, by
8012     // finding the chain of operations that leads from the phi to the loop
8013     // exit value.
8014     SmallVector<Instruction *, 4> ReductionOperations =
8015         RdxDesc.getReductionOpChain(Phi, TheLoop);
8016     bool InLoop = !ReductionOperations.empty();
8017     if (InLoop) {
8018       InLoopReductionChains[Phi] = ReductionOperations;
8019       // Add the elements to InLoopReductionImmediateChains for cost modelling.
8020       Instruction *LastChain = Phi;
8021       for (auto *I : ReductionOperations) {
8022         InLoopReductionImmediateChains[I] = LastChain;
8023         LastChain = I;
8024       }
8025     }
8026     LLVM_DEBUG(dbgs() << "LV: Using " << (InLoop ? "inloop" : "out of loop")
8027                       << " reduction for phi: " << *Phi << "\n");
8028   }
8029 }
8030 
8031 // TODO: we could return a pair of values that specify the max VF and
8032 // min VF, to be used in `buildVPlans(MinVF, MaxVF)` instead of
8033 // `buildVPlans(VF, VF)`. We cannot do it because VPLAN at the moment
8034 // doesn't have a cost model that can choose which plan to execute if
8035 // more than one is generated.
8036 static unsigned determineVPlanVF(const unsigned WidestVectorRegBits,
8037                                  LoopVectorizationCostModel &CM) {
8038   unsigned WidestType;
8039   std::tie(std::ignore, WidestType) = CM.getSmallestAndWidestTypes();
8040   return WidestVectorRegBits / WidestType;
8041 }
8042 
8043 VectorizationFactor
8044 LoopVectorizationPlanner::planInVPlanNativePath(ElementCount UserVF) {
8045   assert(!UserVF.isScalable() && "scalable vectors not yet supported");
8046   ElementCount VF = UserVF;
8047   // Outer loop handling: They may require CFG and instruction level
8048   // transformations before even evaluating whether vectorization is profitable.
8049   // Since we cannot modify the incoming IR, we need to build VPlan upfront in
8050   // the vectorization pipeline.
8051   if (!OrigLoop->isInnermost()) {
8052     // If the user doesn't provide a vectorization factor, determine a
8053     // reasonable one.
8054     if (UserVF.isZero()) {
8055       VF = ElementCount::getFixed(determineVPlanVF(
8056           TTI->getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector)
8057               .getFixedSize(),
8058           CM));
8059       LLVM_DEBUG(dbgs() << "LV: VPlan computed VF " << VF << ".\n");
8060 
8061       // Make sure we have a VF > 1 for stress testing.
8062       if (VPlanBuildStressTest && (VF.isScalar() || VF.isZero())) {
8063         LLVM_DEBUG(dbgs() << "LV: VPlan stress testing: "
8064                           << "overriding computed VF.\n");
8065         VF = ElementCount::getFixed(4);
8066       }
8067     }
8068     assert(EnableVPlanNativePath && "VPlan-native path is not enabled.");
8069     assert(isPowerOf2_32(VF.getKnownMinValue()) &&
8070            "VF needs to be a power of two");
8071     LLVM_DEBUG(dbgs() << "LV: Using " << (!UserVF.isZero() ? "user " : "")
8072                       << "VF " << VF << " to build VPlans.\n");
8073     buildVPlans(VF, VF);
8074 
8075     // For VPlan build stress testing, we bail out after VPlan construction.
8076     if (VPlanBuildStressTest)
8077       return VectorizationFactor::Disabled();
8078 
8079     return {VF, 0 /*Cost*/};
8080   }
8081 
8082   LLVM_DEBUG(
8083       dbgs() << "LV: Not vectorizing. Inner loops aren't supported in the "
8084                 "VPlan-native path.\n");
8085   return VectorizationFactor::Disabled();
8086 }
8087 
8088 Optional<VectorizationFactor>
8089 LoopVectorizationPlanner::plan(ElementCount UserVF, unsigned UserIC) {
8090   assert(OrigLoop->isInnermost() && "Inner loop expected.");
8091   FixedScalableVFPair MaxFactors = CM.computeMaxVF(UserVF, UserIC);
8092   if (!MaxFactors) // Cases that should not to be vectorized nor interleaved.
8093     return None;
8094 
8095   // Invalidate interleave groups if all blocks of loop will be predicated.
8096   if (CM.blockNeedsPredication(OrigLoop->getHeader()) &&
8097       !useMaskedInterleavedAccesses(*TTI)) {
8098     LLVM_DEBUG(
8099         dbgs()
8100         << "LV: Invalidate all interleaved groups due to fold-tail by masking "
8101            "which requires masked-interleaved support.\n");
8102     if (CM.InterleaveInfo.invalidateGroups())
8103       // Invalidating interleave groups also requires invalidating all decisions
8104       // based on them, which includes widening decisions and uniform and scalar
8105       // values.
8106       CM.invalidateCostModelingDecisions();
8107   }
8108 
8109   ElementCount MaxUserVF =
8110       UserVF.isScalable() ? MaxFactors.ScalableVF : MaxFactors.FixedVF;
8111   bool UserVFIsLegal = ElementCount::isKnownLE(UserVF, MaxUserVF);
8112   if (!UserVF.isZero() && UserVFIsLegal) {
8113     assert(isPowerOf2_32(UserVF.getKnownMinValue()) &&
8114            "VF needs to be a power of two");
8115     // Collect the instructions (and their associated costs) that will be more
8116     // profitable to scalarize.
8117     if (CM.selectUserVectorizationFactor(UserVF)) {
8118       LLVM_DEBUG(dbgs() << "LV: Using user VF " << UserVF << ".\n");
8119       CM.collectInLoopReductions();
8120       buildVPlansWithVPRecipes(UserVF, UserVF);
8121       LLVM_DEBUG(printPlans(dbgs()));
8122       return {{UserVF, 0}};
8123     } else
8124       reportVectorizationInfo("UserVF ignored because of invalid costs.",
8125                               "InvalidCost", ORE, OrigLoop);
8126   }
8127 
8128   // Populate the set of Vectorization Factor Candidates.
8129   ElementCountSet VFCandidates;
8130   for (auto VF = ElementCount::getFixed(1);
8131        ElementCount::isKnownLE(VF, MaxFactors.FixedVF); VF *= 2)
8132     VFCandidates.insert(VF);
8133   for (auto VF = ElementCount::getScalable(1);
8134        ElementCount::isKnownLE(VF, MaxFactors.ScalableVF); VF *= 2)
8135     VFCandidates.insert(VF);
8136 
8137   for (const auto &VF : VFCandidates) {
8138     // Collect Uniform and Scalar instructions after vectorization with VF.
8139     CM.collectUniformsAndScalars(VF);
8140 
8141     // Collect the instructions (and their associated costs) that will be more
8142     // profitable to scalarize.
8143     if (VF.isVector())
8144       CM.collectInstsToScalarize(VF);
8145   }
8146 
8147   CM.collectInLoopReductions();
8148   buildVPlansWithVPRecipes(ElementCount::getFixed(1), MaxFactors.FixedVF);
8149   buildVPlansWithVPRecipes(ElementCount::getScalable(1), MaxFactors.ScalableVF);
8150 
8151   LLVM_DEBUG(printPlans(dbgs()));
8152   if (!MaxFactors.hasVector())
8153     return VectorizationFactor::Disabled();
8154 
8155   // Select the optimal vectorization factor.
8156   auto SelectedVF = CM.selectVectorizationFactor(VFCandidates);
8157 
8158   // Check if it is profitable to vectorize with runtime checks.
8159   unsigned NumRuntimePointerChecks = Requirements.getNumRuntimePointerChecks();
8160   if (SelectedVF.Width.getKnownMinValue() > 1 && NumRuntimePointerChecks) {
8161     bool PragmaThresholdReached =
8162         NumRuntimePointerChecks > PragmaVectorizeMemoryCheckThreshold;
8163     bool ThresholdReached =
8164         NumRuntimePointerChecks > VectorizerParams::RuntimeMemoryCheckThreshold;
8165     if ((ThresholdReached && !Hints.allowReordering()) ||
8166         PragmaThresholdReached) {
8167       ORE->emit([&]() {
8168         return OptimizationRemarkAnalysisAliasing(
8169                    DEBUG_TYPE, "CantReorderMemOps", OrigLoop->getStartLoc(),
8170                    OrigLoop->getHeader())
8171                << "loop not vectorized: cannot prove it is safe to reorder "
8172                   "memory operations";
8173       });
8174       LLVM_DEBUG(dbgs() << "LV: Too many memory checks needed.\n");
8175       Hints.emitRemarkWithHints();
8176       return VectorizationFactor::Disabled();
8177     }
8178   }
8179   return SelectedVF;
8180 }
8181 
8182 void LoopVectorizationPlanner::setBestPlan(ElementCount VF, unsigned UF) {
8183   LLVM_DEBUG(dbgs() << "Setting best plan to VF=" << VF << ", UF=" << UF
8184                     << '\n');
8185   BestVF = VF;
8186   BestUF = UF;
8187 
8188   erase_if(VPlans, [VF](const VPlanPtr &Plan) {
8189     return !Plan->hasVF(VF);
8190   });
8191   assert(VPlans.size() == 1 && "Best VF has not a single VPlan.");
8192 }
8193 
8194 void LoopVectorizationPlanner::executePlan(InnerLoopVectorizer &ILV,
8195                                            DominatorTree *DT) {
8196   // Perform the actual loop transformation.
8197 
8198   // 1. Create a new empty loop. Unlink the old loop and connect the new one.
8199   assert(BestVF.hasValue() && "Vectorization Factor is missing");
8200   assert(VPlans.size() == 1 && "Not a single VPlan to execute.");
8201 
8202   VPTransformState State{
8203       *BestVF, BestUF, LI, DT, ILV.Builder, &ILV, VPlans.front().get()};
8204   State.CFG.PrevBB = ILV.createVectorizedLoopSkeleton();
8205   State.TripCount = ILV.getOrCreateTripCount(nullptr);
8206   State.CanonicalIV = ILV.Induction;
8207 
8208   ILV.printDebugTracesAtStart();
8209 
8210   //===------------------------------------------------===//
8211   //
8212   // Notice: any optimization or new instruction that go
8213   // into the code below should also be implemented in
8214   // the cost-model.
8215   //
8216   //===------------------------------------------------===//
8217 
8218   // 2. Copy and widen instructions from the old loop into the new loop.
8219   VPlans.front()->execute(&State);
8220 
8221   // 3. Fix the vectorized code: take care of header phi's, live-outs,
8222   //    predication, updating analyses.
8223   ILV.fixVectorizedLoop(State);
8224 
8225   ILV.printDebugTracesAtEnd();
8226 }
8227 
8228 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
8229 void LoopVectorizationPlanner::printPlans(raw_ostream &O) {
8230   for (const auto &Plan : VPlans)
8231     if (PrintVPlansInDotFormat)
8232       Plan->printDOT(O);
8233     else
8234       Plan->print(O);
8235 }
8236 #endif
8237 
8238 void LoopVectorizationPlanner::collectTriviallyDeadInstructions(
8239     SmallPtrSetImpl<Instruction *> &DeadInstructions) {
8240 
8241   // We create new control-flow for the vectorized loop, so the original exit
8242   // conditions will be dead after vectorization if it's only used by the
8243   // terminator
8244   SmallVector<BasicBlock*> ExitingBlocks;
8245   OrigLoop->getExitingBlocks(ExitingBlocks);
8246   for (auto *BB : ExitingBlocks) {
8247     auto *Cmp = dyn_cast<Instruction>(BB->getTerminator()->getOperand(0));
8248     if (!Cmp || !Cmp->hasOneUse())
8249       continue;
8250 
8251     // TODO: we should introduce a getUniqueExitingBlocks on Loop
8252     if (!DeadInstructions.insert(Cmp).second)
8253       continue;
8254 
8255     // The operands of the icmp is often a dead trunc, used by IndUpdate.
8256     // TODO: can recurse through operands in general
8257     for (Value *Op : Cmp->operands()) {
8258       if (isa<TruncInst>(Op) && Op->hasOneUse())
8259           DeadInstructions.insert(cast<Instruction>(Op));
8260     }
8261   }
8262 
8263   // We create new "steps" for induction variable updates to which the original
8264   // induction variables map. An original update instruction will be dead if
8265   // all its users except the induction variable are dead.
8266   auto *Latch = OrigLoop->getLoopLatch();
8267   for (auto &Induction : Legal->getInductionVars()) {
8268     PHINode *Ind = Induction.first;
8269     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
8270 
8271     // If the tail is to be folded by masking, the primary induction variable,
8272     // if exists, isn't dead: it will be used for masking. Don't kill it.
8273     if (CM.foldTailByMasking() && IndUpdate == Legal->getPrimaryInduction())
8274       continue;
8275 
8276     if (llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
8277           return U == Ind || DeadInstructions.count(cast<Instruction>(U));
8278         }))
8279       DeadInstructions.insert(IndUpdate);
8280 
8281     // We record as "Dead" also the type-casting instructions we had identified
8282     // during induction analysis. We don't need any handling for them in the
8283     // vectorized loop because we have proven that, under a proper runtime
8284     // test guarding the vectorized loop, the value of the phi, and the casted
8285     // value of the phi, are the same. The last instruction in this casting chain
8286     // will get its scalar/vector/widened def from the scalar/vector/widened def
8287     // of the respective phi node. Any other casts in the induction def-use chain
8288     // have no other uses outside the phi update chain, and will be ignored.
8289     InductionDescriptor &IndDes = Induction.second;
8290     const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts();
8291     DeadInstructions.insert(Casts.begin(), Casts.end());
8292   }
8293 }
8294 
8295 Value *InnerLoopUnroller::reverseVector(Value *Vec) { return Vec; }
8296 
8297 Value *InnerLoopUnroller::getBroadcastInstrs(Value *V) { return V; }
8298 
8299 Value *InnerLoopUnroller::getStepVector(Value *Val, int StartIdx, Value *Step,
8300                                         Instruction::BinaryOps BinOp) {
8301   // When unrolling and the VF is 1, we only need to add a simple scalar.
8302   Type *Ty = Val->getType();
8303   assert(!Ty->isVectorTy() && "Val must be a scalar");
8304 
8305   if (Ty->isFloatingPointTy()) {
8306     Constant *C = ConstantFP::get(Ty, (double)StartIdx);
8307 
8308     // Floating-point operations inherit FMF via the builder's flags.
8309     Value *MulOp = Builder.CreateFMul(C, Step);
8310     return Builder.CreateBinOp(BinOp, Val, MulOp);
8311   }
8312   Constant *C = ConstantInt::get(Ty, StartIdx);
8313   return Builder.CreateAdd(Val, Builder.CreateMul(C, Step), "induction");
8314 }
8315 
8316 static void AddRuntimeUnrollDisableMetaData(Loop *L) {
8317   SmallVector<Metadata *, 4> MDs;
8318   // Reserve first location for self reference to the LoopID metadata node.
8319   MDs.push_back(nullptr);
8320   bool IsUnrollMetadata = false;
8321   MDNode *LoopID = L->getLoopID();
8322   if (LoopID) {
8323     // First find existing loop unrolling disable metadata.
8324     for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) {
8325       auto *MD = dyn_cast<MDNode>(LoopID->getOperand(i));
8326       if (MD) {
8327         const auto *S = dyn_cast<MDString>(MD->getOperand(0));
8328         IsUnrollMetadata =
8329             S && S->getString().startswith("llvm.loop.unroll.disable");
8330       }
8331       MDs.push_back(LoopID->getOperand(i));
8332     }
8333   }
8334 
8335   if (!IsUnrollMetadata) {
8336     // Add runtime unroll disable metadata.
8337     LLVMContext &Context = L->getHeader()->getContext();
8338     SmallVector<Metadata *, 1> DisableOperands;
8339     DisableOperands.push_back(
8340         MDString::get(Context, "llvm.loop.unroll.runtime.disable"));
8341     MDNode *DisableNode = MDNode::get(Context, DisableOperands);
8342     MDs.push_back(DisableNode);
8343     MDNode *NewLoopID = MDNode::get(Context, MDs);
8344     // Set operand 0 to refer to the loop id itself.
8345     NewLoopID->replaceOperandWith(0, NewLoopID);
8346     L->setLoopID(NewLoopID);
8347   }
8348 }
8349 
8350 //===--------------------------------------------------------------------===//
8351 // EpilogueVectorizerMainLoop
8352 //===--------------------------------------------------------------------===//
8353 
8354 /// This function is partially responsible for generating the control flow
8355 /// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization.
8356 BasicBlock *EpilogueVectorizerMainLoop::createEpilogueVectorizedLoopSkeleton() {
8357   MDNode *OrigLoopID = OrigLoop->getLoopID();
8358   Loop *Lp = createVectorLoopSkeleton("");
8359 
8360   // Generate the code to check the minimum iteration count of the vector
8361   // epilogue (see below).
8362   EPI.EpilogueIterationCountCheck =
8363       emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader, true);
8364   EPI.EpilogueIterationCountCheck->setName("iter.check");
8365 
8366   // Generate the code to check any assumptions that we've made for SCEV
8367   // expressions.
8368   EPI.SCEVSafetyCheck = emitSCEVChecks(Lp, LoopScalarPreHeader);
8369 
8370   // Generate the code that checks at runtime if arrays overlap. We put the
8371   // checks into a separate block to make the more common case of few elements
8372   // faster.
8373   EPI.MemSafetyCheck = emitMemRuntimeChecks(Lp, LoopScalarPreHeader);
8374 
8375   // Generate the iteration count check for the main loop, *after* the check
8376   // for the epilogue loop, so that the path-length is shorter for the case
8377   // that goes directly through the vector epilogue. The longer-path length for
8378   // the main loop is compensated for, by the gain from vectorizing the larger
8379   // trip count. Note: the branch will get updated later on when we vectorize
8380   // the epilogue.
8381   EPI.MainLoopIterationCountCheck =
8382       emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader, false);
8383 
8384   // Generate the induction variable.
8385   OldInduction = Legal->getPrimaryInduction();
8386   Type *IdxTy = Legal->getWidestInductionType();
8387   Value *StartIdx = ConstantInt::get(IdxTy, 0);
8388   Constant *Step = ConstantInt::get(IdxTy, VF.getKnownMinValue() * UF);
8389   Value *CountRoundDown = getOrCreateVectorTripCount(Lp);
8390   EPI.VectorTripCount = CountRoundDown;
8391   Induction =
8392       createInductionVariable(Lp, StartIdx, CountRoundDown, Step,
8393                               getDebugLocFromInstOrOperands(OldInduction));
8394 
8395   // Skip induction resume value creation here because they will be created in
8396   // the second pass. If we created them here, they wouldn't be used anyway,
8397   // because the vplan in the second pass still contains the inductions from the
8398   // original loop.
8399 
8400   return completeLoopSkeleton(Lp, OrigLoopID);
8401 }
8402 
8403 void EpilogueVectorizerMainLoop::printDebugTracesAtStart() {
8404   LLVM_DEBUG({
8405     dbgs() << "Create Skeleton for epilogue vectorized loop (first pass)\n"
8406            << "Main Loop VF:" << EPI.MainLoopVF.getKnownMinValue()
8407            << ", Main Loop UF:" << EPI.MainLoopUF
8408            << ", Epilogue Loop VF:" << EPI.EpilogueVF.getKnownMinValue()
8409            << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n";
8410   });
8411 }
8412 
8413 void EpilogueVectorizerMainLoop::printDebugTracesAtEnd() {
8414   DEBUG_WITH_TYPE(VerboseDebug, {
8415     dbgs() << "intermediate fn:\n" << *Induction->getFunction() << "\n";
8416   });
8417 }
8418 
8419 BasicBlock *EpilogueVectorizerMainLoop::emitMinimumIterationCountCheck(
8420     Loop *L, BasicBlock *Bypass, bool ForEpilogue) {
8421   assert(L && "Expected valid Loop.");
8422   assert(Bypass && "Expected valid bypass basic block.");
8423   unsigned VFactor =
8424       ForEpilogue ? EPI.EpilogueVF.getKnownMinValue() : VF.getKnownMinValue();
8425   unsigned UFactor = ForEpilogue ? EPI.EpilogueUF : UF;
8426   Value *Count = getOrCreateTripCount(L);
8427   // Reuse existing vector loop preheader for TC checks.
8428   // Note that new preheader block is generated for vector loop.
8429   BasicBlock *const TCCheckBlock = LoopVectorPreHeader;
8430   IRBuilder<> Builder(TCCheckBlock->getTerminator());
8431 
8432   // Generate code to check if the loop's trip count is less than VF * UF of the
8433   // main vector loop.
8434   auto P = Cost->requiresScalarEpilogue(ForEpilogue ? EPI.EpilogueVF : VF) ?
8435       ICmpInst::ICMP_ULE : ICmpInst::ICMP_ULT;
8436 
8437   Value *CheckMinIters = Builder.CreateICmp(
8438       P, Count, ConstantInt::get(Count->getType(), VFactor * UFactor),
8439       "min.iters.check");
8440 
8441   if (!ForEpilogue)
8442     TCCheckBlock->setName("vector.main.loop.iter.check");
8443 
8444   // Create new preheader for vector loop.
8445   LoopVectorPreHeader = SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(),
8446                                    DT, LI, nullptr, "vector.ph");
8447 
8448   if (ForEpilogue) {
8449     assert(DT->properlyDominates(DT->getNode(TCCheckBlock),
8450                                  DT->getNode(Bypass)->getIDom()) &&
8451            "TC check is expected to dominate Bypass");
8452 
8453     // Update dominator for Bypass & LoopExit.
8454     DT->changeImmediateDominator(Bypass, TCCheckBlock);
8455     if (!Cost->requiresScalarEpilogue(EPI.EpilogueVF))
8456       // For loops with multiple exits, there's no edge from the middle block
8457       // to exit blocks (as the epilogue must run) and thus no need to update
8458       // the immediate dominator of the exit blocks.
8459       DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock);
8460 
8461     LoopBypassBlocks.push_back(TCCheckBlock);
8462 
8463     // Save the trip count so we don't have to regenerate it in the
8464     // vec.epilog.iter.check. This is safe to do because the trip count
8465     // generated here dominates the vector epilog iter check.
8466     EPI.TripCount = Count;
8467   }
8468 
8469   ReplaceInstWithInst(
8470       TCCheckBlock->getTerminator(),
8471       BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters));
8472 
8473   return TCCheckBlock;
8474 }
8475 
8476 //===--------------------------------------------------------------------===//
8477 // EpilogueVectorizerEpilogueLoop
8478 //===--------------------------------------------------------------------===//
8479 
8480 /// This function is partially responsible for generating the control flow
8481 /// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization.
8482 BasicBlock *
8483 EpilogueVectorizerEpilogueLoop::createEpilogueVectorizedLoopSkeleton() {
8484   MDNode *OrigLoopID = OrigLoop->getLoopID();
8485   Loop *Lp = createVectorLoopSkeleton("vec.epilog.");
8486 
8487   // Now, compare the remaining count and if there aren't enough iterations to
8488   // execute the vectorized epilogue skip to the scalar part.
8489   BasicBlock *VecEpilogueIterationCountCheck = LoopVectorPreHeader;
8490   VecEpilogueIterationCountCheck->setName("vec.epilog.iter.check");
8491   LoopVectorPreHeader =
8492       SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT,
8493                  LI, nullptr, "vec.epilog.ph");
8494   emitMinimumVectorEpilogueIterCountCheck(Lp, LoopScalarPreHeader,
8495                                           VecEpilogueIterationCountCheck);
8496 
8497   // Adjust the control flow taking the state info from the main loop
8498   // vectorization into account.
8499   assert(EPI.MainLoopIterationCountCheck && EPI.EpilogueIterationCountCheck &&
8500          "expected this to be saved from the previous pass.");
8501   EPI.MainLoopIterationCountCheck->getTerminator()->replaceUsesOfWith(
8502       VecEpilogueIterationCountCheck, LoopVectorPreHeader);
8503 
8504   DT->changeImmediateDominator(LoopVectorPreHeader,
8505                                EPI.MainLoopIterationCountCheck);
8506 
8507   EPI.EpilogueIterationCountCheck->getTerminator()->replaceUsesOfWith(
8508       VecEpilogueIterationCountCheck, LoopScalarPreHeader);
8509 
8510   if (EPI.SCEVSafetyCheck)
8511     EPI.SCEVSafetyCheck->getTerminator()->replaceUsesOfWith(
8512         VecEpilogueIterationCountCheck, LoopScalarPreHeader);
8513   if (EPI.MemSafetyCheck)
8514     EPI.MemSafetyCheck->getTerminator()->replaceUsesOfWith(
8515         VecEpilogueIterationCountCheck, LoopScalarPreHeader);
8516 
8517   DT->changeImmediateDominator(
8518       VecEpilogueIterationCountCheck,
8519       VecEpilogueIterationCountCheck->getSinglePredecessor());
8520 
8521   DT->changeImmediateDominator(LoopScalarPreHeader,
8522                                EPI.EpilogueIterationCountCheck);
8523   if (!Cost->requiresScalarEpilogue(EPI.EpilogueVF))
8524     // If there is an epilogue which must run, there's no edge from the
8525     // middle block to exit blocks  and thus no need to update the immediate
8526     // dominator of the exit blocks.
8527     DT->changeImmediateDominator(LoopExitBlock,
8528                                  EPI.EpilogueIterationCountCheck);
8529 
8530   // Keep track of bypass blocks, as they feed start values to the induction
8531   // phis in the scalar loop preheader.
8532   if (EPI.SCEVSafetyCheck)
8533     LoopBypassBlocks.push_back(EPI.SCEVSafetyCheck);
8534   if (EPI.MemSafetyCheck)
8535     LoopBypassBlocks.push_back(EPI.MemSafetyCheck);
8536   LoopBypassBlocks.push_back(EPI.EpilogueIterationCountCheck);
8537 
8538   // Generate a resume induction for the vector epilogue and put it in the
8539   // vector epilogue preheader
8540   Type *IdxTy = Legal->getWidestInductionType();
8541   PHINode *EPResumeVal = PHINode::Create(IdxTy, 2, "vec.epilog.resume.val",
8542                                          LoopVectorPreHeader->getFirstNonPHI());
8543   EPResumeVal->addIncoming(EPI.VectorTripCount, VecEpilogueIterationCountCheck);
8544   EPResumeVal->addIncoming(ConstantInt::get(IdxTy, 0),
8545                            EPI.MainLoopIterationCountCheck);
8546 
8547   // Generate the induction variable.
8548   OldInduction = Legal->getPrimaryInduction();
8549   Value *CountRoundDown = getOrCreateVectorTripCount(Lp);
8550   Constant *Step = ConstantInt::get(IdxTy, VF.getKnownMinValue() * UF);
8551   Value *StartIdx = EPResumeVal;
8552   Induction =
8553       createInductionVariable(Lp, StartIdx, CountRoundDown, Step,
8554                               getDebugLocFromInstOrOperands(OldInduction));
8555 
8556   // Generate induction resume values. These variables save the new starting
8557   // indexes for the scalar loop. They are used to test if there are any tail
8558   // iterations left once the vector loop has completed.
8559   // Note that when the vectorized epilogue is skipped due to iteration count
8560   // check, then the resume value for the induction variable comes from
8561   // the trip count of the main vector loop, hence passing the AdditionalBypass
8562   // argument.
8563   createInductionResumeValues(Lp, CountRoundDown,
8564                               {VecEpilogueIterationCountCheck,
8565                                EPI.VectorTripCount} /* AdditionalBypass */);
8566 
8567   AddRuntimeUnrollDisableMetaData(Lp);
8568   return completeLoopSkeleton(Lp, OrigLoopID);
8569 }
8570 
8571 BasicBlock *
8572 EpilogueVectorizerEpilogueLoop::emitMinimumVectorEpilogueIterCountCheck(
8573     Loop *L, BasicBlock *Bypass, BasicBlock *Insert) {
8574 
8575   assert(EPI.TripCount &&
8576          "Expected trip count to have been safed in the first pass.");
8577   assert(
8578       (!isa<Instruction>(EPI.TripCount) ||
8579        DT->dominates(cast<Instruction>(EPI.TripCount)->getParent(), Insert)) &&
8580       "saved trip count does not dominate insertion point.");
8581   Value *TC = EPI.TripCount;
8582   IRBuilder<> Builder(Insert->getTerminator());
8583   Value *Count = Builder.CreateSub(TC, EPI.VectorTripCount, "n.vec.remaining");
8584 
8585   // Generate code to check if the loop's trip count is less than VF * UF of the
8586   // vector epilogue loop.
8587   auto P = Cost->requiresScalarEpilogue(EPI.EpilogueVF) ?
8588       ICmpInst::ICMP_ULE : ICmpInst::ICMP_ULT;
8589 
8590   Value *CheckMinIters = Builder.CreateICmp(
8591       P, Count,
8592       ConstantInt::get(Count->getType(),
8593                        EPI.EpilogueVF.getKnownMinValue() * EPI.EpilogueUF),
8594       "min.epilog.iters.check");
8595 
8596   ReplaceInstWithInst(
8597       Insert->getTerminator(),
8598       BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters));
8599 
8600   LoopBypassBlocks.push_back(Insert);
8601   return Insert;
8602 }
8603 
8604 void EpilogueVectorizerEpilogueLoop::printDebugTracesAtStart() {
8605   LLVM_DEBUG({
8606     dbgs() << "Create Skeleton for epilogue vectorized loop (second pass)\n"
8607            << "Epilogue Loop VF:" << EPI.EpilogueVF.getKnownMinValue()
8608            << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n";
8609   });
8610 }
8611 
8612 void EpilogueVectorizerEpilogueLoop::printDebugTracesAtEnd() {
8613   DEBUG_WITH_TYPE(VerboseDebug, {
8614     dbgs() << "final fn:\n" << *Induction->getFunction() << "\n";
8615   });
8616 }
8617 
8618 bool LoopVectorizationPlanner::getDecisionAndClampRange(
8619     const std::function<bool(ElementCount)> &Predicate, VFRange &Range) {
8620   assert(!Range.isEmpty() && "Trying to test an empty VF range.");
8621   bool PredicateAtRangeStart = Predicate(Range.Start);
8622 
8623   for (ElementCount TmpVF = Range.Start * 2;
8624        ElementCount::isKnownLT(TmpVF, Range.End); TmpVF *= 2)
8625     if (Predicate(TmpVF) != PredicateAtRangeStart) {
8626       Range.End = TmpVF;
8627       break;
8628     }
8629 
8630   return PredicateAtRangeStart;
8631 }
8632 
8633 /// Build VPlans for the full range of feasible VF's = {\p MinVF, 2 * \p MinVF,
8634 /// 4 * \p MinVF, ..., \p MaxVF} by repeatedly building a VPlan for a sub-range
8635 /// of VF's starting at a given VF and extending it as much as possible. Each
8636 /// vectorization decision can potentially shorten this sub-range during
8637 /// buildVPlan().
8638 void LoopVectorizationPlanner::buildVPlans(ElementCount MinVF,
8639                                            ElementCount MaxVF) {
8640   auto MaxVFPlusOne = MaxVF.getWithIncrement(1);
8641   for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFPlusOne);) {
8642     VFRange SubRange = {VF, MaxVFPlusOne};
8643     VPlans.push_back(buildVPlan(SubRange));
8644     VF = SubRange.End;
8645   }
8646 }
8647 
8648 VPValue *VPRecipeBuilder::createEdgeMask(BasicBlock *Src, BasicBlock *Dst,
8649                                          VPlanPtr &Plan) {
8650   assert(is_contained(predecessors(Dst), Src) && "Invalid edge");
8651 
8652   // Look for cached value.
8653   std::pair<BasicBlock *, BasicBlock *> Edge(Src, Dst);
8654   EdgeMaskCacheTy::iterator ECEntryIt = EdgeMaskCache.find(Edge);
8655   if (ECEntryIt != EdgeMaskCache.end())
8656     return ECEntryIt->second;
8657 
8658   VPValue *SrcMask = createBlockInMask(Src, Plan);
8659 
8660   // The terminator has to be a branch inst!
8661   BranchInst *BI = dyn_cast<BranchInst>(Src->getTerminator());
8662   assert(BI && "Unexpected terminator found");
8663 
8664   if (!BI->isConditional() || BI->getSuccessor(0) == BI->getSuccessor(1))
8665     return EdgeMaskCache[Edge] = SrcMask;
8666 
8667   // If source is an exiting block, we know the exit edge is dynamically dead
8668   // in the vector loop, and thus we don't need to restrict the mask.  Avoid
8669   // adding uses of an otherwise potentially dead instruction.
8670   if (OrigLoop->isLoopExiting(Src))
8671     return EdgeMaskCache[Edge] = SrcMask;
8672 
8673   VPValue *EdgeMask = Plan->getOrAddVPValue(BI->getCondition());
8674   assert(EdgeMask && "No Edge Mask found for condition");
8675 
8676   if (BI->getSuccessor(0) != Dst)
8677     EdgeMask = Builder.createNot(EdgeMask);
8678 
8679   if (SrcMask) { // Otherwise block in-mask is all-one, no need to AND.
8680     // The condition is 'SrcMask && EdgeMask', which is equivalent to
8681     // 'select i1 SrcMask, i1 EdgeMask, i1 false'.
8682     // The select version does not introduce new UB if SrcMask is false and
8683     // EdgeMask is poison. Using 'and' here introduces undefined behavior.
8684     VPValue *False = Plan->getOrAddVPValue(
8685         ConstantInt::getFalse(BI->getCondition()->getType()));
8686     EdgeMask = Builder.createSelect(SrcMask, EdgeMask, False);
8687   }
8688 
8689   return EdgeMaskCache[Edge] = EdgeMask;
8690 }
8691 
8692 VPValue *VPRecipeBuilder::createBlockInMask(BasicBlock *BB, VPlanPtr &Plan) {
8693   assert(OrigLoop->contains(BB) && "Block is not a part of a loop");
8694 
8695   // Look for cached value.
8696   BlockMaskCacheTy::iterator BCEntryIt = BlockMaskCache.find(BB);
8697   if (BCEntryIt != BlockMaskCache.end())
8698     return BCEntryIt->second;
8699 
8700   // All-one mask is modelled as no-mask following the convention for masked
8701   // load/store/gather/scatter. Initialize BlockMask to no-mask.
8702   VPValue *BlockMask = nullptr;
8703 
8704   if (OrigLoop->getHeader() == BB) {
8705     if (!CM.blockNeedsPredication(BB))
8706       return BlockMaskCache[BB] = BlockMask; // Loop incoming mask is all-one.
8707 
8708     // Create the block in mask as the first non-phi instruction in the block.
8709     VPBuilder::InsertPointGuard Guard(Builder);
8710     auto NewInsertionPoint = Builder.getInsertBlock()->getFirstNonPhi();
8711     Builder.setInsertPoint(Builder.getInsertBlock(), NewInsertionPoint);
8712 
8713     // Introduce the early-exit compare IV <= BTC to form header block mask.
8714     // This is used instead of IV < TC because TC may wrap, unlike BTC.
8715     // Start by constructing the desired canonical IV.
8716     VPValue *IV = nullptr;
8717     if (Legal->getPrimaryInduction())
8718       IV = Plan->getOrAddVPValue(Legal->getPrimaryInduction());
8719     else {
8720       auto IVRecipe = new VPWidenCanonicalIVRecipe();
8721       Builder.getInsertBlock()->insert(IVRecipe, NewInsertionPoint);
8722       IV = IVRecipe->getVPSingleValue();
8723     }
8724     VPValue *BTC = Plan->getOrCreateBackedgeTakenCount();
8725     bool TailFolded = !CM.isScalarEpilogueAllowed();
8726 
8727     if (TailFolded && CM.TTI.emitGetActiveLaneMask()) {
8728       // While ActiveLaneMask is a binary op that consumes the loop tripcount
8729       // as a second argument, we only pass the IV here and extract the
8730       // tripcount from the transform state where codegen of the VP instructions
8731       // happen.
8732       BlockMask = Builder.createNaryOp(VPInstruction::ActiveLaneMask, {IV});
8733     } else {
8734       BlockMask = Builder.createNaryOp(VPInstruction::ICmpULE, {IV, BTC});
8735     }
8736     return BlockMaskCache[BB] = BlockMask;
8737   }
8738 
8739   // This is the block mask. We OR all incoming edges.
8740   for (auto *Predecessor : predecessors(BB)) {
8741     VPValue *EdgeMask = createEdgeMask(Predecessor, BB, Plan);
8742     if (!EdgeMask) // Mask of predecessor is all-one so mask of block is too.
8743       return BlockMaskCache[BB] = EdgeMask;
8744 
8745     if (!BlockMask) { // BlockMask has its initialized nullptr value.
8746       BlockMask = EdgeMask;
8747       continue;
8748     }
8749 
8750     BlockMask = Builder.createOr(BlockMask, EdgeMask);
8751   }
8752 
8753   return BlockMaskCache[BB] = BlockMask;
8754 }
8755 
8756 VPRecipeBase *VPRecipeBuilder::tryToWidenMemory(Instruction *I,
8757                                                 ArrayRef<VPValue *> Operands,
8758                                                 VFRange &Range,
8759                                                 VPlanPtr &Plan) {
8760   assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
8761          "Must be called with either a load or store");
8762 
8763   auto willWiden = [&](ElementCount VF) -> bool {
8764     if (VF.isScalar())
8765       return false;
8766     LoopVectorizationCostModel::InstWidening Decision =
8767         CM.getWideningDecision(I, VF);
8768     assert(Decision != LoopVectorizationCostModel::CM_Unknown &&
8769            "CM decision should be taken at this point.");
8770     if (Decision == LoopVectorizationCostModel::CM_Interleave)
8771       return true;
8772     if (CM.isScalarAfterVectorization(I, VF) ||
8773         CM.isProfitableToScalarize(I, VF))
8774       return false;
8775     return Decision != LoopVectorizationCostModel::CM_Scalarize;
8776   };
8777 
8778   if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range))
8779     return nullptr;
8780 
8781   VPValue *Mask = nullptr;
8782   if (Legal->isMaskRequired(I))
8783     Mask = createBlockInMask(I->getParent(), Plan);
8784 
8785   if (LoadInst *Load = dyn_cast<LoadInst>(I))
8786     return new VPWidenMemoryInstructionRecipe(*Load, Operands[0], Mask);
8787 
8788   StoreInst *Store = cast<StoreInst>(I);
8789   return new VPWidenMemoryInstructionRecipe(*Store, Operands[1], Operands[0],
8790                                             Mask);
8791 }
8792 
8793 VPWidenIntOrFpInductionRecipe *
8794 VPRecipeBuilder::tryToOptimizeInductionPHI(PHINode *Phi,
8795                                            ArrayRef<VPValue *> Operands) const {
8796   // Check if this is an integer or fp induction. If so, build the recipe that
8797   // produces its scalar and vector values.
8798   InductionDescriptor II = Legal->getInductionVars().lookup(Phi);
8799   if (II.getKind() == InductionDescriptor::IK_IntInduction ||
8800       II.getKind() == InductionDescriptor::IK_FpInduction) {
8801     assert(II.getStartValue() ==
8802            Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader()));
8803     const SmallVectorImpl<Instruction *> &Casts = II.getCastInsts();
8804     return new VPWidenIntOrFpInductionRecipe(
8805         Phi, Operands[0], Casts.empty() ? nullptr : Casts.front());
8806   }
8807 
8808   return nullptr;
8809 }
8810 
8811 VPWidenIntOrFpInductionRecipe *VPRecipeBuilder::tryToOptimizeInductionTruncate(
8812     TruncInst *I, ArrayRef<VPValue *> Operands, VFRange &Range,
8813     VPlan &Plan) const {
8814   // Optimize the special case where the source is a constant integer
8815   // induction variable. Notice that we can only optimize the 'trunc' case
8816   // because (a) FP conversions lose precision, (b) sext/zext may wrap, and
8817   // (c) other casts depend on pointer size.
8818 
8819   // Determine whether \p K is a truncation based on an induction variable that
8820   // can be optimized.
8821   auto isOptimizableIVTruncate =
8822       [&](Instruction *K) -> std::function<bool(ElementCount)> {
8823     return [=](ElementCount VF) -> bool {
8824       return CM.isOptimizableIVTruncate(K, VF);
8825     };
8826   };
8827 
8828   if (LoopVectorizationPlanner::getDecisionAndClampRange(
8829           isOptimizableIVTruncate(I), Range)) {
8830 
8831     InductionDescriptor II =
8832         Legal->getInductionVars().lookup(cast<PHINode>(I->getOperand(0)));
8833     VPValue *Start = Plan.getOrAddVPValue(II.getStartValue());
8834     return new VPWidenIntOrFpInductionRecipe(cast<PHINode>(I->getOperand(0)),
8835                                              Start, nullptr, I);
8836   }
8837   return nullptr;
8838 }
8839 
8840 VPRecipeOrVPValueTy VPRecipeBuilder::tryToBlend(PHINode *Phi,
8841                                                 ArrayRef<VPValue *> Operands,
8842                                                 VPlanPtr &Plan) {
8843   // If all incoming values are equal, the incoming VPValue can be used directly
8844   // instead of creating a new VPBlendRecipe.
8845   VPValue *FirstIncoming = Operands[0];
8846   if (all_of(Operands, [FirstIncoming](const VPValue *Inc) {
8847         return FirstIncoming == Inc;
8848       })) {
8849     return Operands[0];
8850   }
8851 
8852   // We know that all PHIs in non-header blocks are converted into selects, so
8853   // we don't have to worry about the insertion order and we can just use the
8854   // builder. At this point we generate the predication tree. There may be
8855   // duplications since this is a simple recursive scan, but future
8856   // optimizations will clean it up.
8857   SmallVector<VPValue *, 2> OperandsWithMask;
8858   unsigned NumIncoming = Phi->getNumIncomingValues();
8859 
8860   for (unsigned In = 0; In < NumIncoming; In++) {
8861     VPValue *EdgeMask =
8862       createEdgeMask(Phi->getIncomingBlock(In), Phi->getParent(), Plan);
8863     assert((EdgeMask || NumIncoming == 1) &&
8864            "Multiple predecessors with one having a full mask");
8865     OperandsWithMask.push_back(Operands[In]);
8866     if (EdgeMask)
8867       OperandsWithMask.push_back(EdgeMask);
8868   }
8869   return toVPRecipeResult(new VPBlendRecipe(Phi, OperandsWithMask));
8870 }
8871 
8872 VPWidenCallRecipe *VPRecipeBuilder::tryToWidenCall(CallInst *CI,
8873                                                    ArrayRef<VPValue *> Operands,
8874                                                    VFRange &Range) const {
8875 
8876   bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange(
8877       [this, CI](ElementCount VF) { return CM.isScalarWithPredication(CI); },
8878       Range);
8879 
8880   if (IsPredicated)
8881     return nullptr;
8882 
8883   Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
8884   if (ID && (ID == Intrinsic::assume || ID == Intrinsic::lifetime_end ||
8885              ID == Intrinsic::lifetime_start || ID == Intrinsic::sideeffect ||
8886              ID == Intrinsic::pseudoprobe ||
8887              ID == Intrinsic::experimental_noalias_scope_decl))
8888     return nullptr;
8889 
8890   auto willWiden = [&](ElementCount VF) -> bool {
8891     Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
8892     // The following case may be scalarized depending on the VF.
8893     // The flag shows whether we use Intrinsic or a usual Call for vectorized
8894     // version of the instruction.
8895     // Is it beneficial to perform intrinsic call compared to lib call?
8896     bool NeedToScalarize = false;
8897     InstructionCost CallCost = CM.getVectorCallCost(CI, VF, NeedToScalarize);
8898     InstructionCost IntrinsicCost = ID ? CM.getVectorIntrinsicCost(CI, VF) : 0;
8899     bool UseVectorIntrinsic = ID && IntrinsicCost <= CallCost;
8900     return UseVectorIntrinsic || !NeedToScalarize;
8901   };
8902 
8903   if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range))
8904     return nullptr;
8905 
8906   ArrayRef<VPValue *> Ops = Operands.take_front(CI->getNumArgOperands());
8907   return new VPWidenCallRecipe(*CI, make_range(Ops.begin(), Ops.end()));
8908 }
8909 
8910 bool VPRecipeBuilder::shouldWiden(Instruction *I, VFRange &Range) const {
8911   assert(!isa<BranchInst>(I) && !isa<PHINode>(I) && !isa<LoadInst>(I) &&
8912          !isa<StoreInst>(I) && "Instruction should have been handled earlier");
8913   // Instruction should be widened, unless it is scalar after vectorization,
8914   // scalarization is profitable or it is predicated.
8915   auto WillScalarize = [this, I](ElementCount VF) -> bool {
8916     return CM.isScalarAfterVectorization(I, VF) ||
8917            CM.isProfitableToScalarize(I, VF) || CM.isScalarWithPredication(I);
8918   };
8919   return !LoopVectorizationPlanner::getDecisionAndClampRange(WillScalarize,
8920                                                              Range);
8921 }
8922 
8923 VPWidenRecipe *VPRecipeBuilder::tryToWiden(Instruction *I,
8924                                            ArrayRef<VPValue *> Operands) const {
8925   auto IsVectorizableOpcode = [](unsigned Opcode) {
8926     switch (Opcode) {
8927     case Instruction::Add:
8928     case Instruction::And:
8929     case Instruction::AShr:
8930     case Instruction::BitCast:
8931     case Instruction::FAdd:
8932     case Instruction::FCmp:
8933     case Instruction::FDiv:
8934     case Instruction::FMul:
8935     case Instruction::FNeg:
8936     case Instruction::FPExt:
8937     case Instruction::FPToSI:
8938     case Instruction::FPToUI:
8939     case Instruction::FPTrunc:
8940     case Instruction::FRem:
8941     case Instruction::FSub:
8942     case Instruction::ICmp:
8943     case Instruction::IntToPtr:
8944     case Instruction::LShr:
8945     case Instruction::Mul:
8946     case Instruction::Or:
8947     case Instruction::PtrToInt:
8948     case Instruction::SDiv:
8949     case Instruction::Select:
8950     case Instruction::SExt:
8951     case Instruction::Shl:
8952     case Instruction::SIToFP:
8953     case Instruction::SRem:
8954     case Instruction::Sub:
8955     case Instruction::Trunc:
8956     case Instruction::UDiv:
8957     case Instruction::UIToFP:
8958     case Instruction::URem:
8959     case Instruction::Xor:
8960     case Instruction::ZExt:
8961       return true;
8962     }
8963     return false;
8964   };
8965 
8966   if (!IsVectorizableOpcode(I->getOpcode()))
8967     return nullptr;
8968 
8969   // Success: widen this instruction.
8970   return new VPWidenRecipe(*I, make_range(Operands.begin(), Operands.end()));
8971 }
8972 
8973 void VPRecipeBuilder::fixHeaderPhis() {
8974   BasicBlock *OrigLatch = OrigLoop->getLoopLatch();
8975   for (VPWidenPHIRecipe *R : PhisToFix) {
8976     auto *PN = cast<PHINode>(R->getUnderlyingValue());
8977     VPRecipeBase *IncR =
8978         getRecipe(cast<Instruction>(PN->getIncomingValueForBlock(OrigLatch)));
8979     R->addOperand(IncR->getVPSingleValue());
8980   }
8981 }
8982 
8983 VPBasicBlock *VPRecipeBuilder::handleReplication(
8984     Instruction *I, VFRange &Range, VPBasicBlock *VPBB,
8985     VPlanPtr &Plan) {
8986   bool IsUniform = LoopVectorizationPlanner::getDecisionAndClampRange(
8987       [&](ElementCount VF) { return CM.isUniformAfterVectorization(I, VF); },
8988       Range);
8989 
8990   bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange(
8991       [&](ElementCount VF) { return CM.isPredicatedInst(I); }, Range);
8992 
8993   // Even if the instruction is not marked as uniform, there are certain
8994   // intrinsic calls that can be effectively treated as such, so we check for
8995   // them here. Conservatively, we only do this for scalable vectors, since
8996   // for fixed-width VFs we can always fall back on full scalarization.
8997   if (!IsUniform && Range.Start.isScalable() && isa<IntrinsicInst>(I)) {
8998     switch (cast<IntrinsicInst>(I)->getIntrinsicID()) {
8999     case Intrinsic::assume:
9000     case Intrinsic::lifetime_start:
9001     case Intrinsic::lifetime_end:
9002       // For scalable vectors if one of the operands is variant then we still
9003       // want to mark as uniform, which will generate one instruction for just
9004       // the first lane of the vector. We can't scalarize the call in the same
9005       // way as for fixed-width vectors because we don't know how many lanes
9006       // there are.
9007       //
9008       // The reasons for doing it this way for scalable vectors are:
9009       //   1. For the assume intrinsic generating the instruction for the first
9010       //      lane is still be better than not generating any at all. For
9011       //      example, the input may be a splat across all lanes.
9012       //   2. For the lifetime start/end intrinsics the pointer operand only
9013       //      does anything useful when the input comes from a stack object,
9014       //      which suggests it should always be uniform. For non-stack objects
9015       //      the effect is to poison the object, which still allows us to
9016       //      remove the call.
9017       IsUniform = true;
9018       break;
9019     default:
9020       break;
9021     }
9022   }
9023 
9024   auto *Recipe = new VPReplicateRecipe(I, Plan->mapToVPValues(I->operands()),
9025                                        IsUniform, IsPredicated);
9026   setRecipe(I, Recipe);
9027   Plan->addVPValue(I, Recipe);
9028 
9029   // Find if I uses a predicated instruction. If so, it will use its scalar
9030   // value. Avoid hoisting the insert-element which packs the scalar value into
9031   // a vector value, as that happens iff all users use the vector value.
9032   for (VPValue *Op : Recipe->operands()) {
9033     auto *PredR = dyn_cast_or_null<VPPredInstPHIRecipe>(Op->getDef());
9034     if (!PredR)
9035       continue;
9036     auto *RepR =
9037         cast_or_null<VPReplicateRecipe>(PredR->getOperand(0)->getDef());
9038     assert(RepR->isPredicated() &&
9039            "expected Replicate recipe to be predicated");
9040     RepR->setAlsoPack(false);
9041   }
9042 
9043   // Finalize the recipe for Instr, first if it is not predicated.
9044   if (!IsPredicated) {
9045     LLVM_DEBUG(dbgs() << "LV: Scalarizing:" << *I << "\n");
9046     VPBB->appendRecipe(Recipe);
9047     return VPBB;
9048   }
9049   LLVM_DEBUG(dbgs() << "LV: Scalarizing and predicating:" << *I << "\n");
9050   assert(VPBB->getSuccessors().empty() &&
9051          "VPBB has successors when handling predicated replication.");
9052   // Record predicated instructions for above packing optimizations.
9053   VPBlockBase *Region = createReplicateRegion(I, Recipe, Plan);
9054   VPBlockUtils::insertBlockAfter(Region, VPBB);
9055   auto *RegSucc = new VPBasicBlock();
9056   VPBlockUtils::insertBlockAfter(RegSucc, Region);
9057   return RegSucc;
9058 }
9059 
9060 VPRegionBlock *VPRecipeBuilder::createReplicateRegion(Instruction *Instr,
9061                                                       VPRecipeBase *PredRecipe,
9062                                                       VPlanPtr &Plan) {
9063   // Instructions marked for predication are replicated and placed under an
9064   // if-then construct to prevent side-effects.
9065 
9066   // Generate recipes to compute the block mask for this region.
9067   VPValue *BlockInMask = createBlockInMask(Instr->getParent(), Plan);
9068 
9069   // Build the triangular if-then region.
9070   std::string RegionName = (Twine("pred.") + Instr->getOpcodeName()).str();
9071   assert(Instr->getParent() && "Predicated instruction not in any basic block");
9072   auto *BOMRecipe = new VPBranchOnMaskRecipe(BlockInMask);
9073   auto *Entry = new VPBasicBlock(Twine(RegionName) + ".entry", BOMRecipe);
9074   auto *PHIRecipe = Instr->getType()->isVoidTy()
9075                         ? nullptr
9076                         : new VPPredInstPHIRecipe(Plan->getOrAddVPValue(Instr));
9077   if (PHIRecipe) {
9078     Plan->removeVPValueFor(Instr);
9079     Plan->addVPValue(Instr, PHIRecipe);
9080   }
9081   auto *Exit = new VPBasicBlock(Twine(RegionName) + ".continue", PHIRecipe);
9082   auto *Pred = new VPBasicBlock(Twine(RegionName) + ".if", PredRecipe);
9083   VPRegionBlock *Region = new VPRegionBlock(Entry, Exit, RegionName, true);
9084 
9085   // Note: first set Entry as region entry and then connect successors starting
9086   // from it in order, to propagate the "parent" of each VPBasicBlock.
9087   VPBlockUtils::insertTwoBlocksAfter(Pred, Exit, BlockInMask, Entry);
9088   VPBlockUtils::connectBlocks(Pred, Exit);
9089 
9090   return Region;
9091 }
9092 
9093 VPRecipeOrVPValueTy
9094 VPRecipeBuilder::tryToCreateWidenRecipe(Instruction *Instr,
9095                                         ArrayRef<VPValue *> Operands,
9096                                         VFRange &Range, VPlanPtr &Plan) {
9097   // First, check for specific widening recipes that deal with calls, memory
9098   // operations, inductions and Phi nodes.
9099   if (auto *CI = dyn_cast<CallInst>(Instr))
9100     return toVPRecipeResult(tryToWidenCall(CI, Operands, Range));
9101 
9102   if (isa<LoadInst>(Instr) || isa<StoreInst>(Instr))
9103     return toVPRecipeResult(tryToWidenMemory(Instr, Operands, Range, Plan));
9104 
9105   VPRecipeBase *Recipe;
9106   if (auto Phi = dyn_cast<PHINode>(Instr)) {
9107     if (Phi->getParent() != OrigLoop->getHeader())
9108       return tryToBlend(Phi, Operands, Plan);
9109     if ((Recipe = tryToOptimizeInductionPHI(Phi, Operands)))
9110       return toVPRecipeResult(Recipe);
9111 
9112     VPWidenPHIRecipe *PhiRecipe = nullptr;
9113     if (Legal->isReductionVariable(Phi) || Legal->isFirstOrderRecurrence(Phi)) {
9114       VPValue *StartV = Operands[0];
9115       if (Legal->isReductionVariable(Phi)) {
9116         RecurrenceDescriptor &RdxDesc = Legal->getReductionVars()[Phi];
9117         assert(RdxDesc.getRecurrenceStartValue() ==
9118                Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader()));
9119         PhiRecipe = new VPReductionPHIRecipe(Phi, RdxDesc, *StartV,
9120                                              CM.isInLoopReduction(Phi),
9121                                              CM.useOrderedReductions(RdxDesc));
9122       } else {
9123         PhiRecipe = new VPFirstOrderRecurrencePHIRecipe(Phi, *StartV);
9124       }
9125 
9126       // Record the incoming value from the backedge, so we can add the incoming
9127       // value from the backedge after all recipes have been created.
9128       recordRecipeOf(cast<Instruction>(
9129           Phi->getIncomingValueForBlock(OrigLoop->getLoopLatch())));
9130       PhisToFix.push_back(PhiRecipe);
9131     } else {
9132       // TODO: record start and backedge value for remaining pointer induction
9133       // phis.
9134       assert(Phi->getType()->isPointerTy() &&
9135              "only pointer phis should be handled here");
9136       PhiRecipe = new VPWidenPHIRecipe(Phi);
9137     }
9138 
9139     return toVPRecipeResult(PhiRecipe);
9140   }
9141 
9142   if (isa<TruncInst>(Instr) &&
9143       (Recipe = tryToOptimizeInductionTruncate(cast<TruncInst>(Instr), Operands,
9144                                                Range, *Plan)))
9145     return toVPRecipeResult(Recipe);
9146 
9147   if (!shouldWiden(Instr, Range))
9148     return nullptr;
9149 
9150   if (auto GEP = dyn_cast<GetElementPtrInst>(Instr))
9151     return toVPRecipeResult(new VPWidenGEPRecipe(
9152         GEP, make_range(Operands.begin(), Operands.end()), OrigLoop));
9153 
9154   if (auto *SI = dyn_cast<SelectInst>(Instr)) {
9155     bool InvariantCond =
9156         PSE.getSE()->isLoopInvariant(PSE.getSCEV(SI->getOperand(0)), OrigLoop);
9157     return toVPRecipeResult(new VPWidenSelectRecipe(
9158         *SI, make_range(Operands.begin(), Operands.end()), InvariantCond));
9159   }
9160 
9161   return toVPRecipeResult(tryToWiden(Instr, Operands));
9162 }
9163 
9164 void LoopVectorizationPlanner::buildVPlansWithVPRecipes(ElementCount MinVF,
9165                                                         ElementCount MaxVF) {
9166   assert(OrigLoop->isInnermost() && "Inner loop expected.");
9167 
9168   // Collect instructions from the original loop that will become trivially dead
9169   // in the vectorized loop. We don't need to vectorize these instructions. For
9170   // example, original induction update instructions can become dead because we
9171   // separately emit induction "steps" when generating code for the new loop.
9172   // Similarly, we create a new latch condition when setting up the structure
9173   // of the new loop, so the old one can become dead.
9174   SmallPtrSet<Instruction *, 4> DeadInstructions;
9175   collectTriviallyDeadInstructions(DeadInstructions);
9176 
9177   // Add assume instructions we need to drop to DeadInstructions, to prevent
9178   // them from being added to the VPlan.
9179   // TODO: We only need to drop assumes in blocks that get flattend. If the
9180   // control flow is preserved, we should keep them.
9181   auto &ConditionalAssumes = Legal->getConditionalAssumes();
9182   DeadInstructions.insert(ConditionalAssumes.begin(), ConditionalAssumes.end());
9183 
9184   MapVector<Instruction *, Instruction *> &SinkAfter = Legal->getSinkAfter();
9185   // Dead instructions do not need sinking. Remove them from SinkAfter.
9186   for (Instruction *I : DeadInstructions)
9187     SinkAfter.erase(I);
9188 
9189   // Cannot sink instructions after dead instructions (there won't be any
9190   // recipes for them). Instead, find the first non-dead previous instruction.
9191   for (auto &P : Legal->getSinkAfter()) {
9192     Instruction *SinkTarget = P.second;
9193     Instruction *FirstInst = &*SinkTarget->getParent()->begin();
9194     (void)FirstInst;
9195     while (DeadInstructions.contains(SinkTarget)) {
9196       assert(
9197           SinkTarget != FirstInst &&
9198           "Must find a live instruction (at least the one feeding the "
9199           "first-order recurrence PHI) before reaching beginning of the block");
9200       SinkTarget = SinkTarget->getPrevNode();
9201       assert(SinkTarget != P.first &&
9202              "sink source equals target, no sinking required");
9203     }
9204     P.second = SinkTarget;
9205   }
9206 
9207   auto MaxVFPlusOne = MaxVF.getWithIncrement(1);
9208   for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFPlusOne);) {
9209     VFRange SubRange = {VF, MaxVFPlusOne};
9210     VPlans.push_back(
9211         buildVPlanWithVPRecipes(SubRange, DeadInstructions, SinkAfter));
9212     VF = SubRange.End;
9213   }
9214 }
9215 
9216 VPlanPtr LoopVectorizationPlanner::buildVPlanWithVPRecipes(
9217     VFRange &Range, SmallPtrSetImpl<Instruction *> &DeadInstructions,
9218     const MapVector<Instruction *, Instruction *> &SinkAfter) {
9219 
9220   SmallPtrSet<const InterleaveGroup<Instruction> *, 1> InterleaveGroups;
9221 
9222   VPRecipeBuilder RecipeBuilder(OrigLoop, TLI, Legal, CM, PSE, Builder);
9223 
9224   // ---------------------------------------------------------------------------
9225   // Pre-construction: record ingredients whose recipes we'll need to further
9226   // process after constructing the initial VPlan.
9227   // ---------------------------------------------------------------------------
9228 
9229   // Mark instructions we'll need to sink later and their targets as
9230   // ingredients whose recipe we'll need to record.
9231   for (auto &Entry : SinkAfter) {
9232     RecipeBuilder.recordRecipeOf(Entry.first);
9233     RecipeBuilder.recordRecipeOf(Entry.second);
9234   }
9235   for (auto &Reduction : CM.getInLoopReductionChains()) {
9236     PHINode *Phi = Reduction.first;
9237     RecurKind Kind = Legal->getReductionVars()[Phi].getRecurrenceKind();
9238     const SmallVector<Instruction *, 4> &ReductionOperations = Reduction.second;
9239 
9240     RecipeBuilder.recordRecipeOf(Phi);
9241     for (auto &R : ReductionOperations) {
9242       RecipeBuilder.recordRecipeOf(R);
9243       // For min/max reducitons, where we have a pair of icmp/select, we also
9244       // need to record the ICmp recipe, so it can be removed later.
9245       if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind))
9246         RecipeBuilder.recordRecipeOf(cast<Instruction>(R->getOperand(0)));
9247     }
9248   }
9249 
9250   // For each interleave group which is relevant for this (possibly trimmed)
9251   // Range, add it to the set of groups to be later applied to the VPlan and add
9252   // placeholders for its members' Recipes which we'll be replacing with a
9253   // single VPInterleaveRecipe.
9254   for (InterleaveGroup<Instruction> *IG : IAI.getInterleaveGroups()) {
9255     auto applyIG = [IG, this](ElementCount VF) -> bool {
9256       return (VF.isVector() && // Query is illegal for VF == 1
9257               CM.getWideningDecision(IG->getInsertPos(), VF) ==
9258                   LoopVectorizationCostModel::CM_Interleave);
9259     };
9260     if (!getDecisionAndClampRange(applyIG, Range))
9261       continue;
9262     InterleaveGroups.insert(IG);
9263     for (unsigned i = 0; i < IG->getFactor(); i++)
9264       if (Instruction *Member = IG->getMember(i))
9265         RecipeBuilder.recordRecipeOf(Member);
9266   };
9267 
9268   // ---------------------------------------------------------------------------
9269   // Build initial VPlan: Scan the body of the loop in a topological order to
9270   // visit each basic block after having visited its predecessor basic blocks.
9271   // ---------------------------------------------------------------------------
9272 
9273   // Create a dummy pre-entry VPBasicBlock to start building the VPlan.
9274   auto Plan = std::make_unique<VPlan>();
9275   VPBasicBlock *VPBB = new VPBasicBlock("Pre-Entry");
9276   Plan->setEntry(VPBB);
9277 
9278   // Scan the body of the loop in a topological order to visit each basic block
9279   // after having visited its predecessor basic blocks.
9280   LoopBlocksDFS DFS(OrigLoop);
9281   DFS.perform(LI);
9282 
9283   for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) {
9284     // Relevant instructions from basic block BB will be grouped into VPRecipe
9285     // ingredients and fill a new VPBasicBlock.
9286     unsigned VPBBsForBB = 0;
9287     auto *FirstVPBBForBB = new VPBasicBlock(BB->getName());
9288     VPBlockUtils::insertBlockAfter(FirstVPBBForBB, VPBB);
9289     VPBB = FirstVPBBForBB;
9290     Builder.setInsertPoint(VPBB);
9291 
9292     // Introduce each ingredient into VPlan.
9293     // TODO: Model and preserve debug instrinsics in VPlan.
9294     for (Instruction &I : BB->instructionsWithoutDebug()) {
9295       Instruction *Instr = &I;
9296 
9297       // First filter out irrelevant instructions, to ensure no recipes are
9298       // built for them.
9299       if (isa<BranchInst>(Instr) || DeadInstructions.count(Instr))
9300         continue;
9301 
9302       SmallVector<VPValue *, 4> Operands;
9303       auto *Phi = dyn_cast<PHINode>(Instr);
9304       if (Phi && Phi->getParent() == OrigLoop->getHeader()) {
9305         Operands.push_back(Plan->getOrAddVPValue(
9306             Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader())));
9307       } else {
9308         auto OpRange = Plan->mapToVPValues(Instr->operands());
9309         Operands = {OpRange.begin(), OpRange.end()};
9310       }
9311       if (auto RecipeOrValue = RecipeBuilder.tryToCreateWidenRecipe(
9312               Instr, Operands, Range, Plan)) {
9313         // If Instr can be simplified to an existing VPValue, use it.
9314         if (RecipeOrValue.is<VPValue *>()) {
9315           auto *VPV = RecipeOrValue.get<VPValue *>();
9316           Plan->addVPValue(Instr, VPV);
9317           // If the re-used value is a recipe, register the recipe for the
9318           // instruction, in case the recipe for Instr needs to be recorded.
9319           if (auto *R = dyn_cast_or_null<VPRecipeBase>(VPV->getDef()))
9320             RecipeBuilder.setRecipe(Instr, R);
9321           continue;
9322         }
9323         // Otherwise, add the new recipe.
9324         VPRecipeBase *Recipe = RecipeOrValue.get<VPRecipeBase *>();
9325         for (auto *Def : Recipe->definedValues()) {
9326           auto *UV = Def->getUnderlyingValue();
9327           Plan->addVPValue(UV, Def);
9328         }
9329 
9330         RecipeBuilder.setRecipe(Instr, Recipe);
9331         VPBB->appendRecipe(Recipe);
9332         continue;
9333       }
9334 
9335       // Otherwise, if all widening options failed, Instruction is to be
9336       // replicated. This may create a successor for VPBB.
9337       VPBasicBlock *NextVPBB =
9338           RecipeBuilder.handleReplication(Instr, Range, VPBB, Plan);
9339       if (NextVPBB != VPBB) {
9340         VPBB = NextVPBB;
9341         VPBB->setName(BB->hasName() ? BB->getName() + "." + Twine(VPBBsForBB++)
9342                                     : "");
9343       }
9344     }
9345   }
9346 
9347   RecipeBuilder.fixHeaderPhis();
9348 
9349   // Discard empty dummy pre-entry VPBasicBlock. Note that other VPBasicBlocks
9350   // may also be empty, such as the last one VPBB, reflecting original
9351   // basic-blocks with no recipes.
9352   VPBasicBlock *PreEntry = cast<VPBasicBlock>(Plan->getEntry());
9353   assert(PreEntry->empty() && "Expecting empty pre-entry block.");
9354   VPBlockBase *Entry = Plan->setEntry(PreEntry->getSingleSuccessor());
9355   VPBlockUtils::disconnectBlocks(PreEntry, Entry);
9356   delete PreEntry;
9357 
9358   // ---------------------------------------------------------------------------
9359   // Transform initial VPlan: Apply previously taken decisions, in order, to
9360   // bring the VPlan to its final state.
9361   // ---------------------------------------------------------------------------
9362 
9363   // Apply Sink-After legal constraints.
9364   auto GetReplicateRegion = [](VPRecipeBase *R) -> VPRegionBlock * {
9365     auto *Region = dyn_cast_or_null<VPRegionBlock>(R->getParent()->getParent());
9366     if (Region && Region->isReplicator()) {
9367       assert(Region->getNumSuccessors() == 1 &&
9368              Region->getNumPredecessors() == 1 && "Expected SESE region!");
9369       assert(R->getParent()->size() == 1 &&
9370              "A recipe in an original replicator region must be the only "
9371              "recipe in its block");
9372       return Region;
9373     }
9374     return nullptr;
9375   };
9376   for (auto &Entry : SinkAfter) {
9377     VPRecipeBase *Sink = RecipeBuilder.getRecipe(Entry.first);
9378     VPRecipeBase *Target = RecipeBuilder.getRecipe(Entry.second);
9379 
9380     auto *TargetRegion = GetReplicateRegion(Target);
9381     auto *SinkRegion = GetReplicateRegion(Sink);
9382     if (!SinkRegion) {
9383       // If the sink source is not a replicate region, sink the recipe directly.
9384       if (TargetRegion) {
9385         // The target is in a replication region, make sure to move Sink to
9386         // the block after it, not into the replication region itself.
9387         VPBasicBlock *NextBlock =
9388             cast<VPBasicBlock>(TargetRegion->getSuccessors().front());
9389         Sink->moveBefore(*NextBlock, NextBlock->getFirstNonPhi());
9390       } else
9391         Sink->moveAfter(Target);
9392       continue;
9393     }
9394 
9395     // The sink source is in a replicate region. Unhook the region from the CFG.
9396     auto *SinkPred = SinkRegion->getSinglePredecessor();
9397     auto *SinkSucc = SinkRegion->getSingleSuccessor();
9398     VPBlockUtils::disconnectBlocks(SinkPred, SinkRegion);
9399     VPBlockUtils::disconnectBlocks(SinkRegion, SinkSucc);
9400     VPBlockUtils::connectBlocks(SinkPred, SinkSucc);
9401 
9402     if (TargetRegion) {
9403       // The target recipe is also in a replicate region, move the sink region
9404       // after the target region.
9405       auto *TargetSucc = TargetRegion->getSingleSuccessor();
9406       VPBlockUtils::disconnectBlocks(TargetRegion, TargetSucc);
9407       VPBlockUtils::connectBlocks(TargetRegion, SinkRegion);
9408       VPBlockUtils::connectBlocks(SinkRegion, TargetSucc);
9409     } else {
9410       // The sink source is in a replicate region, we need to move the whole
9411       // replicate region, which should only contain a single recipe in the
9412       // main block.
9413       auto *SplitBlock =
9414           Target->getParent()->splitAt(std::next(Target->getIterator()));
9415 
9416       auto *SplitPred = SplitBlock->getSinglePredecessor();
9417 
9418       VPBlockUtils::disconnectBlocks(SplitPred, SplitBlock);
9419       VPBlockUtils::connectBlocks(SplitPred, SinkRegion);
9420       VPBlockUtils::connectBlocks(SinkRegion, SplitBlock);
9421       if (VPBB == SplitPred)
9422         VPBB = SplitBlock;
9423     }
9424   }
9425 
9426   // Introduce a recipe to combine the incoming and previous values of a
9427   // first-order recurrence.
9428   for (VPRecipeBase &R : Plan->getEntry()->getEntryBasicBlock()->phis()) {
9429     auto *RecurPhi = dyn_cast<VPFirstOrderRecurrencePHIRecipe>(&R);
9430     if (!RecurPhi)
9431       continue;
9432 
9433     auto *RecurSplice = cast<VPInstruction>(
9434         Builder.createNaryOp(VPInstruction::FirstOrderRecurrenceSplice,
9435                              {RecurPhi, RecurPhi->getBackedgeValue()}));
9436 
9437     VPRecipeBase *PrevRecipe = RecurPhi->getBackedgeRecipe();
9438     if (auto *Region = GetReplicateRegion(PrevRecipe)) {
9439       VPBasicBlock *Succ = cast<VPBasicBlock>(Region->getSingleSuccessor());
9440       RecurSplice->moveBefore(*Succ, Succ->getFirstNonPhi());
9441     } else
9442       RecurSplice->moveAfter(PrevRecipe);
9443     RecurPhi->replaceAllUsesWith(RecurSplice);
9444     // Set the first operand of RecurSplice to RecurPhi again, after replacing
9445     // all users.
9446     RecurSplice->setOperand(0, RecurPhi);
9447   }
9448 
9449   // Interleave memory: for each Interleave Group we marked earlier as relevant
9450   // for this VPlan, replace the Recipes widening its memory instructions with a
9451   // single VPInterleaveRecipe at its insertion point.
9452   for (auto IG : InterleaveGroups) {
9453     auto *Recipe = cast<VPWidenMemoryInstructionRecipe>(
9454         RecipeBuilder.getRecipe(IG->getInsertPos()));
9455     SmallVector<VPValue *, 4> StoredValues;
9456     for (unsigned i = 0; i < IG->getFactor(); ++i)
9457       if (auto *SI = dyn_cast_or_null<StoreInst>(IG->getMember(i))) {
9458         auto *StoreR =
9459             cast<VPWidenMemoryInstructionRecipe>(RecipeBuilder.getRecipe(SI));
9460         StoredValues.push_back(StoreR->getStoredValue());
9461       }
9462 
9463     auto *VPIG = new VPInterleaveRecipe(IG, Recipe->getAddr(), StoredValues,
9464                                         Recipe->getMask());
9465     VPIG->insertBefore(Recipe);
9466     unsigned J = 0;
9467     for (unsigned i = 0; i < IG->getFactor(); ++i)
9468       if (Instruction *Member = IG->getMember(i)) {
9469         if (!Member->getType()->isVoidTy()) {
9470           VPValue *OriginalV = Plan->getVPValue(Member);
9471           Plan->removeVPValueFor(Member);
9472           Plan->addVPValue(Member, VPIG->getVPValue(J));
9473           OriginalV->replaceAllUsesWith(VPIG->getVPValue(J));
9474           J++;
9475         }
9476         RecipeBuilder.getRecipe(Member)->eraseFromParent();
9477       }
9478   }
9479 
9480   // Adjust the recipes for any inloop reductions.
9481   adjustRecipesForReductions(VPBB, Plan, RecipeBuilder, Range.Start);
9482 
9483   VPlanTransforms::sinkScalarOperands(*Plan);
9484   VPlanTransforms::mergeReplicateRegions(*Plan);
9485 
9486   std::string PlanName;
9487   raw_string_ostream RSO(PlanName);
9488   ElementCount VF = Range.Start;
9489   Plan->addVF(VF);
9490   RSO << "Initial VPlan for VF={" << VF;
9491   for (VF *= 2; ElementCount::isKnownLT(VF, Range.End); VF *= 2) {
9492     Plan->addVF(VF);
9493     RSO << "," << VF;
9494   }
9495   RSO << "},UF>=1";
9496   RSO.flush();
9497   Plan->setName(PlanName);
9498 
9499   return Plan;
9500 }
9501 
9502 VPlanPtr LoopVectorizationPlanner::buildVPlan(VFRange &Range) {
9503   // Outer loop handling: They may require CFG and instruction level
9504   // transformations before even evaluating whether vectorization is profitable.
9505   // Since we cannot modify the incoming IR, we need to build VPlan upfront in
9506   // the vectorization pipeline.
9507   assert(!OrigLoop->isInnermost());
9508   assert(EnableVPlanNativePath && "VPlan-native path is not enabled.");
9509 
9510   // Create new empty VPlan
9511   auto Plan = std::make_unique<VPlan>();
9512 
9513   // Build hierarchical CFG
9514   VPlanHCFGBuilder HCFGBuilder(OrigLoop, LI, *Plan);
9515   HCFGBuilder.buildHierarchicalCFG();
9516 
9517   for (ElementCount VF = Range.Start; ElementCount::isKnownLT(VF, Range.End);
9518        VF *= 2)
9519     Plan->addVF(VF);
9520 
9521   if (EnableVPlanPredication) {
9522     VPlanPredicator VPP(*Plan);
9523     VPP.predicate();
9524 
9525     // Avoid running transformation to recipes until masked code generation in
9526     // VPlan-native path is in place.
9527     return Plan;
9528   }
9529 
9530   SmallPtrSet<Instruction *, 1> DeadInstructions;
9531   VPlanTransforms::VPInstructionsToVPRecipes(OrigLoop, Plan,
9532                                              Legal->getInductionVars(),
9533                                              DeadInstructions, *PSE.getSE());
9534   return Plan;
9535 }
9536 
9537 // Adjust the recipes for reductions. For in-loop reductions the chain of
9538 // instructions leading from the loop exit instr to the phi need to be converted
9539 // to reductions, with one operand being vector and the other being the scalar
9540 // reduction chain. For other reductions, a select is introduced between the phi
9541 // and live-out recipes when folding the tail.
9542 void LoopVectorizationPlanner::adjustRecipesForReductions(
9543     VPBasicBlock *LatchVPBB, VPlanPtr &Plan, VPRecipeBuilder &RecipeBuilder,
9544     ElementCount MinVF) {
9545   for (auto &Reduction : CM.getInLoopReductionChains()) {
9546     PHINode *Phi = Reduction.first;
9547     RecurrenceDescriptor &RdxDesc = Legal->getReductionVars()[Phi];
9548     const SmallVector<Instruction *, 4> &ReductionOperations = Reduction.second;
9549 
9550     if (MinVF.isScalar() && !CM.useOrderedReductions(RdxDesc))
9551       continue;
9552 
9553     // ReductionOperations are orders top-down from the phi's use to the
9554     // LoopExitValue. We keep a track of the previous item (the Chain) to tell
9555     // which of the two operands will remain scalar and which will be reduced.
9556     // For minmax the chain will be the select instructions.
9557     Instruction *Chain = Phi;
9558     for (Instruction *R : ReductionOperations) {
9559       VPRecipeBase *WidenRecipe = RecipeBuilder.getRecipe(R);
9560       RecurKind Kind = RdxDesc.getRecurrenceKind();
9561 
9562       VPValue *ChainOp = Plan->getVPValue(Chain);
9563       unsigned FirstOpId;
9564       if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) {
9565         assert(isa<VPWidenSelectRecipe>(WidenRecipe) &&
9566                "Expected to replace a VPWidenSelectSC");
9567         FirstOpId = 1;
9568       } else {
9569         assert((MinVF.isScalar() || isa<VPWidenRecipe>(WidenRecipe)) &&
9570                "Expected to replace a VPWidenSC");
9571         FirstOpId = 0;
9572       }
9573       unsigned VecOpId =
9574           R->getOperand(FirstOpId) == Chain ? FirstOpId + 1 : FirstOpId;
9575       VPValue *VecOp = Plan->getVPValue(R->getOperand(VecOpId));
9576 
9577       auto *CondOp = CM.foldTailByMasking()
9578                          ? RecipeBuilder.createBlockInMask(R->getParent(), Plan)
9579                          : nullptr;
9580       VPReductionRecipe *RedRecipe = new VPReductionRecipe(
9581           &RdxDesc, R, ChainOp, VecOp, CondOp, TTI);
9582       WidenRecipe->getVPSingleValue()->replaceAllUsesWith(RedRecipe);
9583       Plan->removeVPValueFor(R);
9584       Plan->addVPValue(R, RedRecipe);
9585       WidenRecipe->getParent()->insert(RedRecipe, WidenRecipe->getIterator());
9586       WidenRecipe->getVPSingleValue()->replaceAllUsesWith(RedRecipe);
9587       WidenRecipe->eraseFromParent();
9588 
9589       if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) {
9590         VPRecipeBase *CompareRecipe =
9591             RecipeBuilder.getRecipe(cast<Instruction>(R->getOperand(0)));
9592         assert(isa<VPWidenRecipe>(CompareRecipe) &&
9593                "Expected to replace a VPWidenSC");
9594         assert(cast<VPWidenRecipe>(CompareRecipe)->getNumUsers() == 0 &&
9595                "Expected no remaining users");
9596         CompareRecipe->eraseFromParent();
9597       }
9598       Chain = R;
9599     }
9600   }
9601 
9602   // If tail is folded by masking, introduce selects between the phi
9603   // and the live-out instruction of each reduction, at the end of the latch.
9604   if (CM.foldTailByMasking()) {
9605     for (VPRecipeBase &R : Plan->getEntry()->getEntryBasicBlock()->phis()) {
9606       VPReductionPHIRecipe *PhiR = dyn_cast<VPReductionPHIRecipe>(&R);
9607       if (!PhiR || PhiR->isInLoop())
9608         continue;
9609       Builder.setInsertPoint(LatchVPBB);
9610       VPValue *Cond =
9611           RecipeBuilder.createBlockInMask(OrigLoop->getHeader(), Plan);
9612       VPValue *Red = PhiR->getBackedgeValue();
9613       Builder.createNaryOp(Instruction::Select, {Cond, Red, PhiR});
9614     }
9615   }
9616 }
9617 
9618 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
9619 void VPInterleaveRecipe::print(raw_ostream &O, const Twine &Indent,
9620                                VPSlotTracker &SlotTracker) const {
9621   O << Indent << "INTERLEAVE-GROUP with factor " << IG->getFactor() << " at ";
9622   IG->getInsertPos()->printAsOperand(O, false);
9623   O << ", ";
9624   getAddr()->printAsOperand(O, SlotTracker);
9625   VPValue *Mask = getMask();
9626   if (Mask) {
9627     O << ", ";
9628     Mask->printAsOperand(O, SlotTracker);
9629   }
9630 
9631   unsigned OpIdx = 0;
9632   for (unsigned i = 0; i < IG->getFactor(); ++i) {
9633     if (!IG->getMember(i))
9634       continue;
9635     if (getNumStoreOperands() > 0) {
9636       O << "\n" << Indent << "  store ";
9637       getOperand(1 + OpIdx)->printAsOperand(O, SlotTracker);
9638       O << " to index " << i;
9639     } else {
9640       O << "\n" << Indent << "  ";
9641       getVPValue(OpIdx)->printAsOperand(O, SlotTracker);
9642       O << " = load from index " << i;
9643     }
9644     ++OpIdx;
9645   }
9646 }
9647 #endif
9648 
9649 void VPWidenCallRecipe::execute(VPTransformState &State) {
9650   State.ILV->widenCallInstruction(*cast<CallInst>(getUnderlyingInstr()), this,
9651                                   *this, State);
9652 }
9653 
9654 void VPWidenSelectRecipe::execute(VPTransformState &State) {
9655   State.ILV->widenSelectInstruction(*cast<SelectInst>(getUnderlyingInstr()),
9656                                     this, *this, InvariantCond, State);
9657 }
9658 
9659 void VPWidenRecipe::execute(VPTransformState &State) {
9660   State.ILV->widenInstruction(*getUnderlyingInstr(), this, *this, State);
9661 }
9662 
9663 void VPWidenGEPRecipe::execute(VPTransformState &State) {
9664   State.ILV->widenGEP(cast<GetElementPtrInst>(getUnderlyingInstr()), this,
9665                       *this, State.UF, State.VF, IsPtrLoopInvariant,
9666                       IsIndexLoopInvariant, State);
9667 }
9668 
9669 void VPWidenIntOrFpInductionRecipe::execute(VPTransformState &State) {
9670   assert(!State.Instance && "Int or FP induction being replicated.");
9671   State.ILV->widenIntOrFpInduction(IV, getStartValue()->getLiveInIRValue(),
9672                                    getTruncInst(), getVPValue(0),
9673                                    getCastValue(), State);
9674 }
9675 
9676 void VPWidenPHIRecipe::execute(VPTransformState &State) {
9677   State.ILV->widenPHIInstruction(cast<PHINode>(getUnderlyingValue()), this,
9678                                  State);
9679 }
9680 
9681 void VPBlendRecipe::execute(VPTransformState &State) {
9682   State.ILV->setDebugLocFromInst(Phi, &State.Builder);
9683   // We know that all PHIs in non-header blocks are converted into
9684   // selects, so we don't have to worry about the insertion order and we
9685   // can just use the builder.
9686   // At this point we generate the predication tree. There may be
9687   // duplications since this is a simple recursive scan, but future
9688   // optimizations will clean it up.
9689 
9690   unsigned NumIncoming = getNumIncomingValues();
9691 
9692   // Generate a sequence of selects of the form:
9693   // SELECT(Mask3, In3,
9694   //        SELECT(Mask2, In2,
9695   //               SELECT(Mask1, In1,
9696   //                      In0)))
9697   // Note that Mask0 is never used: lanes for which no path reaches this phi and
9698   // are essentially undef are taken from In0.
9699   InnerLoopVectorizer::VectorParts Entry(State.UF);
9700   for (unsigned In = 0; In < NumIncoming; ++In) {
9701     for (unsigned Part = 0; Part < State.UF; ++Part) {
9702       // We might have single edge PHIs (blocks) - use an identity
9703       // 'select' for the first PHI operand.
9704       Value *In0 = State.get(getIncomingValue(In), Part);
9705       if (In == 0)
9706         Entry[Part] = In0; // Initialize with the first incoming value.
9707       else {
9708         // Select between the current value and the previous incoming edge
9709         // based on the incoming mask.
9710         Value *Cond = State.get(getMask(In), Part);
9711         Entry[Part] =
9712             State.Builder.CreateSelect(Cond, In0, Entry[Part], "predphi");
9713       }
9714     }
9715   }
9716   for (unsigned Part = 0; Part < State.UF; ++Part)
9717     State.set(this, Entry[Part], Part);
9718 }
9719 
9720 void VPInterleaveRecipe::execute(VPTransformState &State) {
9721   assert(!State.Instance && "Interleave group being replicated.");
9722   State.ILV->vectorizeInterleaveGroup(IG, definedValues(), State, getAddr(),
9723                                       getStoredValues(), getMask());
9724 }
9725 
9726 void VPReductionRecipe::execute(VPTransformState &State) {
9727   assert(!State.Instance && "Reduction being replicated.");
9728   Value *PrevInChain = State.get(getChainOp(), 0);
9729   for (unsigned Part = 0; Part < State.UF; ++Part) {
9730     RecurKind Kind = RdxDesc->getRecurrenceKind();
9731     bool IsOrdered = State.ILV->useOrderedReductions(*RdxDesc);
9732     Value *NewVecOp = State.get(getVecOp(), Part);
9733     if (VPValue *Cond = getCondOp()) {
9734       Value *NewCond = State.get(Cond, Part);
9735       VectorType *VecTy = cast<VectorType>(NewVecOp->getType());
9736       Constant *Iden = RecurrenceDescriptor::getRecurrenceIdentity(
9737           Kind, VecTy->getElementType(), RdxDesc->getFastMathFlags());
9738       Constant *IdenVec =
9739           ConstantVector::getSplat(VecTy->getElementCount(), Iden);
9740       Value *Select = State.Builder.CreateSelect(NewCond, NewVecOp, IdenVec);
9741       NewVecOp = Select;
9742     }
9743     Value *NewRed;
9744     Value *NextInChain;
9745     if (IsOrdered) {
9746       if (State.VF.isVector())
9747         NewRed = createOrderedReduction(State.Builder, *RdxDesc, NewVecOp,
9748                                         PrevInChain);
9749       else
9750         NewRed = State.Builder.CreateBinOp(
9751             (Instruction::BinaryOps)getUnderlyingInstr()->getOpcode(),
9752             PrevInChain, NewVecOp);
9753       PrevInChain = NewRed;
9754     } else {
9755       PrevInChain = State.get(getChainOp(), Part);
9756       NewRed = createTargetReduction(State.Builder, TTI, *RdxDesc, NewVecOp);
9757     }
9758     if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) {
9759       NextInChain =
9760           createMinMaxOp(State.Builder, RdxDesc->getRecurrenceKind(),
9761                          NewRed, PrevInChain);
9762     } else if (IsOrdered)
9763       NextInChain = NewRed;
9764     else {
9765       NextInChain = State.Builder.CreateBinOp(
9766           (Instruction::BinaryOps)getUnderlyingInstr()->getOpcode(), NewRed,
9767           PrevInChain);
9768     }
9769     State.set(this, NextInChain, Part);
9770   }
9771 }
9772 
9773 void VPReplicateRecipe::execute(VPTransformState &State) {
9774   if (State.Instance) { // Generate a single instance.
9775     assert(!State.VF.isScalable() && "Can't scalarize a scalable vector");
9776     State.ILV->scalarizeInstruction(getUnderlyingInstr(), this, *this,
9777                                     *State.Instance, IsPredicated, State);
9778     // Insert scalar instance packing it into a vector.
9779     if (AlsoPack && State.VF.isVector()) {
9780       // If we're constructing lane 0, initialize to start from poison.
9781       if (State.Instance->Lane.isFirstLane()) {
9782         assert(!State.VF.isScalable() && "VF is assumed to be non scalable.");
9783         Value *Poison = PoisonValue::get(
9784             VectorType::get(getUnderlyingValue()->getType(), State.VF));
9785         State.set(this, Poison, State.Instance->Part);
9786       }
9787       State.ILV->packScalarIntoVectorValue(this, *State.Instance, State);
9788     }
9789     return;
9790   }
9791 
9792   // Generate scalar instances for all VF lanes of all UF parts, unless the
9793   // instruction is uniform inwhich case generate only the first lane for each
9794   // of the UF parts.
9795   unsigned EndLane = IsUniform ? 1 : State.VF.getKnownMinValue();
9796   assert((!State.VF.isScalable() || IsUniform) &&
9797          "Can't scalarize a scalable vector");
9798   for (unsigned Part = 0; Part < State.UF; ++Part)
9799     for (unsigned Lane = 0; Lane < EndLane; ++Lane)
9800       State.ILV->scalarizeInstruction(getUnderlyingInstr(), this, *this,
9801                                       VPIteration(Part, Lane), IsPredicated,
9802                                       State);
9803 }
9804 
9805 void VPBranchOnMaskRecipe::execute(VPTransformState &State) {
9806   assert(State.Instance && "Branch on Mask works only on single instance.");
9807 
9808   unsigned Part = State.Instance->Part;
9809   unsigned Lane = State.Instance->Lane.getKnownLane();
9810 
9811   Value *ConditionBit = nullptr;
9812   VPValue *BlockInMask = getMask();
9813   if (BlockInMask) {
9814     ConditionBit = State.get(BlockInMask, Part);
9815     if (ConditionBit->getType()->isVectorTy())
9816       ConditionBit = State.Builder.CreateExtractElement(
9817           ConditionBit, State.Builder.getInt32(Lane));
9818   } else // Block in mask is all-one.
9819     ConditionBit = State.Builder.getTrue();
9820 
9821   // Replace the temporary unreachable terminator with a new conditional branch,
9822   // whose two destinations will be set later when they are created.
9823   auto *CurrentTerminator = State.CFG.PrevBB->getTerminator();
9824   assert(isa<UnreachableInst>(CurrentTerminator) &&
9825          "Expected to replace unreachable terminator with conditional branch.");
9826   auto *CondBr = BranchInst::Create(State.CFG.PrevBB, nullptr, ConditionBit);
9827   CondBr->setSuccessor(0, nullptr);
9828   ReplaceInstWithInst(CurrentTerminator, CondBr);
9829 }
9830 
9831 void VPPredInstPHIRecipe::execute(VPTransformState &State) {
9832   assert(State.Instance && "Predicated instruction PHI works per instance.");
9833   Instruction *ScalarPredInst =
9834       cast<Instruction>(State.get(getOperand(0), *State.Instance));
9835   BasicBlock *PredicatedBB = ScalarPredInst->getParent();
9836   BasicBlock *PredicatingBB = PredicatedBB->getSinglePredecessor();
9837   assert(PredicatingBB && "Predicated block has no single predecessor.");
9838   assert(isa<VPReplicateRecipe>(getOperand(0)) &&
9839          "operand must be VPReplicateRecipe");
9840 
9841   // By current pack/unpack logic we need to generate only a single phi node: if
9842   // a vector value for the predicated instruction exists at this point it means
9843   // the instruction has vector users only, and a phi for the vector value is
9844   // needed. In this case the recipe of the predicated instruction is marked to
9845   // also do that packing, thereby "hoisting" the insert-element sequence.
9846   // Otherwise, a phi node for the scalar value is needed.
9847   unsigned Part = State.Instance->Part;
9848   if (State.hasVectorValue(getOperand(0), Part)) {
9849     Value *VectorValue = State.get(getOperand(0), Part);
9850     InsertElementInst *IEI = cast<InsertElementInst>(VectorValue);
9851     PHINode *VPhi = State.Builder.CreatePHI(IEI->getType(), 2);
9852     VPhi->addIncoming(IEI->getOperand(0), PredicatingBB); // Unmodified vector.
9853     VPhi->addIncoming(IEI, PredicatedBB); // New vector with inserted element.
9854     if (State.hasVectorValue(this, Part))
9855       State.reset(this, VPhi, Part);
9856     else
9857       State.set(this, VPhi, Part);
9858     // NOTE: Currently we need to update the value of the operand, so the next
9859     // predicated iteration inserts its generated value in the correct vector.
9860     State.reset(getOperand(0), VPhi, Part);
9861   } else {
9862     Type *PredInstType = getOperand(0)->getUnderlyingValue()->getType();
9863     PHINode *Phi = State.Builder.CreatePHI(PredInstType, 2);
9864     Phi->addIncoming(PoisonValue::get(ScalarPredInst->getType()),
9865                      PredicatingBB);
9866     Phi->addIncoming(ScalarPredInst, PredicatedBB);
9867     if (State.hasScalarValue(this, *State.Instance))
9868       State.reset(this, Phi, *State.Instance);
9869     else
9870       State.set(this, Phi, *State.Instance);
9871     // NOTE: Currently we need to update the value of the operand, so the next
9872     // predicated iteration inserts its generated value in the correct vector.
9873     State.reset(getOperand(0), Phi, *State.Instance);
9874   }
9875 }
9876 
9877 void VPWidenMemoryInstructionRecipe::execute(VPTransformState &State) {
9878   VPValue *StoredValue = isStore() ? getStoredValue() : nullptr;
9879   State.ILV->vectorizeMemoryInstruction(
9880       &Ingredient, State, StoredValue ? nullptr : getVPSingleValue(), getAddr(),
9881       StoredValue, getMask());
9882 }
9883 
9884 // Determine how to lower the scalar epilogue, which depends on 1) optimising
9885 // for minimum code-size, 2) predicate compiler options, 3) loop hints forcing
9886 // predication, and 4) a TTI hook that analyses whether the loop is suitable
9887 // for predication.
9888 static ScalarEpilogueLowering getScalarEpilogueLowering(
9889     Function *F, Loop *L, LoopVectorizeHints &Hints, ProfileSummaryInfo *PSI,
9890     BlockFrequencyInfo *BFI, TargetTransformInfo *TTI, TargetLibraryInfo *TLI,
9891     AssumptionCache *AC, LoopInfo *LI, ScalarEvolution *SE, DominatorTree *DT,
9892     LoopVectorizationLegality &LVL) {
9893   // 1) OptSize takes precedence over all other options, i.e. if this is set,
9894   // don't look at hints or options, and don't request a scalar epilogue.
9895   // (For PGSO, as shouldOptimizeForSize isn't currently accessible from
9896   // LoopAccessInfo (due to code dependency and not being able to reliably get
9897   // PSI/BFI from a loop analysis under NPM), we cannot suppress the collection
9898   // of strides in LoopAccessInfo::analyzeLoop() and vectorize without
9899   // versioning when the vectorization is forced, unlike hasOptSize. So revert
9900   // back to the old way and vectorize with versioning when forced. See D81345.)
9901   if (F->hasOptSize() || (llvm::shouldOptimizeForSize(L->getHeader(), PSI, BFI,
9902                                                       PGSOQueryType::IRPass) &&
9903                           Hints.getForce() != LoopVectorizeHints::FK_Enabled))
9904     return CM_ScalarEpilogueNotAllowedOptSize;
9905 
9906   // 2) If set, obey the directives
9907   if (PreferPredicateOverEpilogue.getNumOccurrences()) {
9908     switch (PreferPredicateOverEpilogue) {
9909     case PreferPredicateTy::ScalarEpilogue:
9910       return CM_ScalarEpilogueAllowed;
9911     case PreferPredicateTy::PredicateElseScalarEpilogue:
9912       return CM_ScalarEpilogueNotNeededUsePredicate;
9913     case PreferPredicateTy::PredicateOrDontVectorize:
9914       return CM_ScalarEpilogueNotAllowedUsePredicate;
9915     };
9916   }
9917 
9918   // 3) If set, obey the hints
9919   switch (Hints.getPredicate()) {
9920   case LoopVectorizeHints::FK_Enabled:
9921     return CM_ScalarEpilogueNotNeededUsePredicate;
9922   case LoopVectorizeHints::FK_Disabled:
9923     return CM_ScalarEpilogueAllowed;
9924   };
9925 
9926   // 4) if the TTI hook indicates this is profitable, request predication.
9927   if (TTI->preferPredicateOverEpilogue(L, LI, *SE, *AC, TLI, DT,
9928                                        LVL.getLAI()))
9929     return CM_ScalarEpilogueNotNeededUsePredicate;
9930 
9931   return CM_ScalarEpilogueAllowed;
9932 }
9933 
9934 Value *VPTransformState::get(VPValue *Def, unsigned Part) {
9935   // If Values have been set for this Def return the one relevant for \p Part.
9936   if (hasVectorValue(Def, Part))
9937     return Data.PerPartOutput[Def][Part];
9938 
9939   if (!hasScalarValue(Def, {Part, 0})) {
9940     Value *IRV = Def->getLiveInIRValue();
9941     Value *B = ILV->getBroadcastInstrs(IRV);
9942     set(Def, B, Part);
9943     return B;
9944   }
9945 
9946   Value *ScalarValue = get(Def, {Part, 0});
9947   // If we aren't vectorizing, we can just copy the scalar map values over
9948   // to the vector map.
9949   if (VF.isScalar()) {
9950     set(Def, ScalarValue, Part);
9951     return ScalarValue;
9952   }
9953 
9954   auto *RepR = dyn_cast<VPReplicateRecipe>(Def);
9955   bool IsUniform = RepR && RepR->isUniform();
9956 
9957   unsigned LastLane = IsUniform ? 0 : VF.getKnownMinValue() - 1;
9958   // Check if there is a scalar value for the selected lane.
9959   if (!hasScalarValue(Def, {Part, LastLane})) {
9960     // At the moment, VPWidenIntOrFpInductionRecipes can also be uniform.
9961     assert(isa<VPWidenIntOrFpInductionRecipe>(Def->getDef()) &&
9962            "unexpected recipe found to be invariant");
9963     IsUniform = true;
9964     LastLane = 0;
9965   }
9966 
9967   auto *LastInst = cast<Instruction>(get(Def, {Part, LastLane}));
9968   // Set the insert point after the last scalarized instruction or after the
9969   // last PHI, if LastInst is a PHI. This ensures the insertelement sequence
9970   // will directly follow the scalar definitions.
9971   auto OldIP = Builder.saveIP();
9972   auto NewIP =
9973       isa<PHINode>(LastInst)
9974           ? BasicBlock::iterator(LastInst->getParent()->getFirstNonPHI())
9975           : std::next(BasicBlock::iterator(LastInst));
9976   Builder.SetInsertPoint(&*NewIP);
9977 
9978   // However, if we are vectorizing, we need to construct the vector values.
9979   // If the value is known to be uniform after vectorization, we can just
9980   // broadcast the scalar value corresponding to lane zero for each unroll
9981   // iteration. Otherwise, we construct the vector values using
9982   // insertelement instructions. Since the resulting vectors are stored in
9983   // State, we will only generate the insertelements once.
9984   Value *VectorValue = nullptr;
9985   if (IsUniform) {
9986     VectorValue = ILV->getBroadcastInstrs(ScalarValue);
9987     set(Def, VectorValue, Part);
9988   } else {
9989     // Initialize packing with insertelements to start from undef.
9990     assert(!VF.isScalable() && "VF is assumed to be non scalable.");
9991     Value *Undef = PoisonValue::get(VectorType::get(LastInst->getType(), VF));
9992     set(Def, Undef, Part);
9993     for (unsigned Lane = 0; Lane < VF.getKnownMinValue(); ++Lane)
9994       ILV->packScalarIntoVectorValue(Def, {Part, Lane}, *this);
9995     VectorValue = get(Def, Part);
9996   }
9997   Builder.restoreIP(OldIP);
9998   return VectorValue;
9999 }
10000 
10001 // Process the loop in the VPlan-native vectorization path. This path builds
10002 // VPlan upfront in the vectorization pipeline, which allows to apply
10003 // VPlan-to-VPlan transformations from the very beginning without modifying the
10004 // input LLVM IR.
10005 static bool processLoopInVPlanNativePath(
10006     Loop *L, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT,
10007     LoopVectorizationLegality *LVL, TargetTransformInfo *TTI,
10008     TargetLibraryInfo *TLI, DemandedBits *DB, AssumptionCache *AC,
10009     OptimizationRemarkEmitter *ORE, BlockFrequencyInfo *BFI,
10010     ProfileSummaryInfo *PSI, LoopVectorizeHints &Hints,
10011     LoopVectorizationRequirements &Requirements) {
10012 
10013   if (isa<SCEVCouldNotCompute>(PSE.getBackedgeTakenCount())) {
10014     LLVM_DEBUG(dbgs() << "LV: cannot compute the outer-loop trip count\n");
10015     return false;
10016   }
10017   assert(EnableVPlanNativePath && "VPlan-native path is disabled.");
10018   Function *F = L->getHeader()->getParent();
10019   InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL->getLAI());
10020 
10021   ScalarEpilogueLowering SEL = getScalarEpilogueLowering(
10022       F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, *LVL);
10023 
10024   LoopVectorizationCostModel CM(SEL, L, PSE, LI, LVL, *TTI, TLI, DB, AC, ORE, F,
10025                                 &Hints, IAI);
10026   // Use the planner for outer loop vectorization.
10027   // TODO: CM is not used at this point inside the planner. Turn CM into an
10028   // optional argument if we don't need it in the future.
10029   LoopVectorizationPlanner LVP(L, LI, TLI, TTI, LVL, CM, IAI, PSE, Hints,
10030                                Requirements, ORE);
10031 
10032   // Get user vectorization factor.
10033   ElementCount UserVF = Hints.getWidth();
10034 
10035   CM.collectElementTypesForWidening();
10036 
10037   // Plan how to best vectorize, return the best VF and its cost.
10038   const VectorizationFactor VF = LVP.planInVPlanNativePath(UserVF);
10039 
10040   // If we are stress testing VPlan builds, do not attempt to generate vector
10041   // code. Masked vector code generation support will follow soon.
10042   // Also, do not attempt to vectorize if no vector code will be produced.
10043   if (VPlanBuildStressTest || EnableVPlanPredication ||
10044       VectorizationFactor::Disabled() == VF)
10045     return false;
10046 
10047   LVP.setBestPlan(VF.Width, 1);
10048 
10049   {
10050     GeneratedRTChecks Checks(*PSE.getSE(), DT, LI,
10051                              F->getParent()->getDataLayout());
10052     InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, 1, LVL,
10053                            &CM, BFI, PSI, Checks);
10054     LLVM_DEBUG(dbgs() << "Vectorizing outer loop in \""
10055                       << L->getHeader()->getParent()->getName() << "\"\n");
10056     LVP.executePlan(LB, DT);
10057   }
10058 
10059   // Mark the loop as already vectorized to avoid vectorizing again.
10060   Hints.setAlreadyVectorized();
10061   assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs()));
10062   return true;
10063 }
10064 
10065 // Emit a remark if there are stores to floats that required a floating point
10066 // extension. If the vectorized loop was generated with floating point there
10067 // will be a performance penalty from the conversion overhead and the change in
10068 // the vector width.
10069 static void checkMixedPrecision(Loop *L, OptimizationRemarkEmitter *ORE) {
10070   SmallVector<Instruction *, 4> Worklist;
10071   for (BasicBlock *BB : L->getBlocks()) {
10072     for (Instruction &Inst : *BB) {
10073       if (auto *S = dyn_cast<StoreInst>(&Inst)) {
10074         if (S->getValueOperand()->getType()->isFloatTy())
10075           Worklist.push_back(S);
10076       }
10077     }
10078   }
10079 
10080   // Traverse the floating point stores upwards searching, for floating point
10081   // conversions.
10082   SmallPtrSet<const Instruction *, 4> Visited;
10083   SmallPtrSet<const Instruction *, 4> EmittedRemark;
10084   while (!Worklist.empty()) {
10085     auto *I = Worklist.pop_back_val();
10086     if (!L->contains(I))
10087       continue;
10088     if (!Visited.insert(I).second)
10089       continue;
10090 
10091     // Emit a remark if the floating point store required a floating
10092     // point conversion.
10093     // TODO: More work could be done to identify the root cause such as a
10094     // constant or a function return type and point the user to it.
10095     if (isa<FPExtInst>(I) && EmittedRemark.insert(I).second)
10096       ORE->emit([&]() {
10097         return OptimizationRemarkAnalysis(LV_NAME, "VectorMixedPrecision",
10098                                           I->getDebugLoc(), L->getHeader())
10099                << "floating point conversion changes vector width. "
10100                << "Mixed floating point precision requires an up/down "
10101                << "cast that will negatively impact performance.";
10102       });
10103 
10104     for (Use &Op : I->operands())
10105       if (auto *OpI = dyn_cast<Instruction>(Op))
10106         Worklist.push_back(OpI);
10107   }
10108 }
10109 
10110 LoopVectorizePass::LoopVectorizePass(LoopVectorizeOptions Opts)
10111     : InterleaveOnlyWhenForced(Opts.InterleaveOnlyWhenForced ||
10112                                !EnableLoopInterleaving),
10113       VectorizeOnlyWhenForced(Opts.VectorizeOnlyWhenForced ||
10114                               !EnableLoopVectorization) {}
10115 
10116 bool LoopVectorizePass::processLoop(Loop *L) {
10117   assert((EnableVPlanNativePath || L->isInnermost()) &&
10118          "VPlan-native path is not enabled. Only process inner loops.");
10119 
10120 #ifndef NDEBUG
10121   const std::string DebugLocStr = getDebugLocString(L);
10122 #endif /* NDEBUG */
10123 
10124   LLVM_DEBUG(dbgs() << "\nLV: Checking a loop in \""
10125                     << L->getHeader()->getParent()->getName() << "\" from "
10126                     << DebugLocStr << "\n");
10127 
10128   LoopVectorizeHints Hints(L, InterleaveOnlyWhenForced, *ORE);
10129 
10130   LLVM_DEBUG(
10131       dbgs() << "LV: Loop hints:"
10132              << " force="
10133              << (Hints.getForce() == LoopVectorizeHints::FK_Disabled
10134                      ? "disabled"
10135                      : (Hints.getForce() == LoopVectorizeHints::FK_Enabled
10136                             ? "enabled"
10137                             : "?"))
10138              << " width=" << Hints.getWidth()
10139              << " interleave=" << Hints.getInterleave() << "\n");
10140 
10141   // Function containing loop
10142   Function *F = L->getHeader()->getParent();
10143 
10144   // Looking at the diagnostic output is the only way to determine if a loop
10145   // was vectorized (other than looking at the IR or machine code), so it
10146   // is important to generate an optimization remark for each loop. Most of
10147   // these messages are generated as OptimizationRemarkAnalysis. Remarks
10148   // generated as OptimizationRemark and OptimizationRemarkMissed are
10149   // less verbose reporting vectorized loops and unvectorized loops that may
10150   // benefit from vectorization, respectively.
10151 
10152   if (!Hints.allowVectorization(F, L, VectorizeOnlyWhenForced)) {
10153     LLVM_DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n");
10154     return false;
10155   }
10156 
10157   PredicatedScalarEvolution PSE(*SE, *L);
10158 
10159   // Check if it is legal to vectorize the loop.
10160   LoopVectorizationRequirements Requirements;
10161   LoopVectorizationLegality LVL(L, PSE, DT, TTI, TLI, AA, F, GetLAA, LI, ORE,
10162                                 &Requirements, &Hints, DB, AC, BFI, PSI);
10163   if (!LVL.canVectorize(EnableVPlanNativePath)) {
10164     LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n");
10165     Hints.emitRemarkWithHints();
10166     return false;
10167   }
10168 
10169   // Check the function attributes and profiles to find out if this function
10170   // should be optimized for size.
10171   ScalarEpilogueLowering SEL = getScalarEpilogueLowering(
10172       F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, LVL);
10173 
10174   // Entrance to the VPlan-native vectorization path. Outer loops are processed
10175   // here. They may require CFG and instruction level transformations before
10176   // even evaluating whether vectorization is profitable. Since we cannot modify
10177   // the incoming IR, we need to build VPlan upfront in the vectorization
10178   // pipeline.
10179   if (!L->isInnermost())
10180     return processLoopInVPlanNativePath(L, PSE, LI, DT, &LVL, TTI, TLI, DB, AC,
10181                                         ORE, BFI, PSI, Hints, Requirements);
10182 
10183   assert(L->isInnermost() && "Inner loop expected.");
10184 
10185   // Check the loop for a trip count threshold: vectorize loops with a tiny trip
10186   // count by optimizing for size, to minimize overheads.
10187   auto ExpectedTC = getSmallBestKnownTC(*SE, L);
10188   if (ExpectedTC && *ExpectedTC < TinyTripCountVectorThreshold) {
10189     LLVM_DEBUG(dbgs() << "LV: Found a loop with a very small trip count. "
10190                       << "This loop is worth vectorizing only if no scalar "
10191                       << "iteration overheads are incurred.");
10192     if (Hints.getForce() == LoopVectorizeHints::FK_Enabled)
10193       LLVM_DEBUG(dbgs() << " But vectorizing was explicitly forced.\n");
10194     else {
10195       LLVM_DEBUG(dbgs() << "\n");
10196       SEL = CM_ScalarEpilogueNotAllowedLowTripLoop;
10197     }
10198   }
10199 
10200   // Check the function attributes to see if implicit floats are allowed.
10201   // FIXME: This check doesn't seem possibly correct -- what if the loop is
10202   // an integer loop and the vector instructions selected are purely integer
10203   // vector instructions?
10204   if (F->hasFnAttribute(Attribute::NoImplicitFloat)) {
10205     reportVectorizationFailure(
10206         "Can't vectorize when the NoImplicitFloat attribute is used",
10207         "loop not vectorized due to NoImplicitFloat attribute",
10208         "NoImplicitFloat", ORE, L);
10209     Hints.emitRemarkWithHints();
10210     return false;
10211   }
10212 
10213   // Check if the target supports potentially unsafe FP vectorization.
10214   // FIXME: Add a check for the type of safety issue (denormal, signaling)
10215   // for the target we're vectorizing for, to make sure none of the
10216   // additional fp-math flags can help.
10217   if (Hints.isPotentiallyUnsafe() &&
10218       TTI->isFPVectorizationPotentiallyUnsafe()) {
10219     reportVectorizationFailure(
10220         "Potentially unsafe FP op prevents vectorization",
10221         "loop not vectorized due to unsafe FP support.",
10222         "UnsafeFP", ORE, L);
10223     Hints.emitRemarkWithHints();
10224     return false;
10225   }
10226 
10227   bool AllowOrderedReductions;
10228   // If the flag is set, use that instead and override the TTI behaviour.
10229   if (ForceOrderedReductions.getNumOccurrences() > 0)
10230     AllowOrderedReductions = ForceOrderedReductions;
10231   else
10232     AllowOrderedReductions = TTI->enableOrderedReductions();
10233   if (!LVL.canVectorizeFPMath(AllowOrderedReductions)) {
10234     ORE->emit([&]() {
10235       auto *ExactFPMathInst = Requirements.getExactFPInst();
10236       return OptimizationRemarkAnalysisFPCommute(DEBUG_TYPE, "CantReorderFPOps",
10237                                                  ExactFPMathInst->getDebugLoc(),
10238                                                  ExactFPMathInst->getParent())
10239              << "loop not vectorized: cannot prove it is safe to reorder "
10240                 "floating-point operations";
10241     });
10242     LLVM_DEBUG(dbgs() << "LV: loop not vectorized: cannot prove it is safe to "
10243                          "reorder floating-point operations\n");
10244     Hints.emitRemarkWithHints();
10245     return false;
10246   }
10247 
10248   bool UseInterleaved = TTI->enableInterleavedAccessVectorization();
10249   InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL.getLAI());
10250 
10251   // If an override option has been passed in for interleaved accesses, use it.
10252   if (EnableInterleavedMemAccesses.getNumOccurrences() > 0)
10253     UseInterleaved = EnableInterleavedMemAccesses;
10254 
10255   // Analyze interleaved memory accesses.
10256   if (UseInterleaved) {
10257     IAI.analyzeInterleaving(useMaskedInterleavedAccesses(*TTI));
10258   }
10259 
10260   // Use the cost model.
10261   LoopVectorizationCostModel CM(SEL, L, PSE, LI, &LVL, *TTI, TLI, DB, AC, ORE,
10262                                 F, &Hints, IAI);
10263   CM.collectValuesToIgnore();
10264   CM.collectElementTypesForWidening();
10265 
10266   // Use the planner for vectorization.
10267   LoopVectorizationPlanner LVP(L, LI, TLI, TTI, &LVL, CM, IAI, PSE, Hints,
10268                                Requirements, ORE);
10269 
10270   // Get user vectorization factor and interleave count.
10271   ElementCount UserVF = Hints.getWidth();
10272   unsigned UserIC = Hints.getInterleave();
10273 
10274   // Plan how to best vectorize, return the best VF and its cost.
10275   Optional<VectorizationFactor> MaybeVF = LVP.plan(UserVF, UserIC);
10276 
10277   VectorizationFactor VF = VectorizationFactor::Disabled();
10278   unsigned IC = 1;
10279 
10280   if (MaybeVF) {
10281     VF = *MaybeVF;
10282     // Select the interleave count.
10283     IC = CM.selectInterleaveCount(VF.Width, *VF.Cost.getValue());
10284   }
10285 
10286   // Identify the diagnostic messages that should be produced.
10287   std::pair<StringRef, std::string> VecDiagMsg, IntDiagMsg;
10288   bool VectorizeLoop = true, InterleaveLoop = true;
10289   if (VF.Width.isScalar()) {
10290     LLVM_DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n");
10291     VecDiagMsg = std::make_pair(
10292         "VectorizationNotBeneficial",
10293         "the cost-model indicates that vectorization is not beneficial");
10294     VectorizeLoop = false;
10295   }
10296 
10297   if (!MaybeVF && UserIC > 1) {
10298     // Tell the user interleaving was avoided up-front, despite being explicitly
10299     // requested.
10300     LLVM_DEBUG(dbgs() << "LV: Ignoring UserIC, because vectorization and "
10301                          "interleaving should be avoided up front\n");
10302     IntDiagMsg = std::make_pair(
10303         "InterleavingAvoided",
10304         "Ignoring UserIC, because interleaving was avoided up front");
10305     InterleaveLoop = false;
10306   } else if (IC == 1 && UserIC <= 1) {
10307     // Tell the user interleaving is not beneficial.
10308     LLVM_DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n");
10309     IntDiagMsg = std::make_pair(
10310         "InterleavingNotBeneficial",
10311         "the cost-model indicates that interleaving is not beneficial");
10312     InterleaveLoop = false;
10313     if (UserIC == 1) {
10314       IntDiagMsg.first = "InterleavingNotBeneficialAndDisabled";
10315       IntDiagMsg.second +=
10316           " and is explicitly disabled or interleave count is set to 1";
10317     }
10318   } else if (IC > 1 && UserIC == 1) {
10319     // Tell the user interleaving is beneficial, but it explicitly disabled.
10320     LLVM_DEBUG(
10321         dbgs() << "LV: Interleaving is beneficial but is explicitly disabled.");
10322     IntDiagMsg = std::make_pair(
10323         "InterleavingBeneficialButDisabled",
10324         "the cost-model indicates that interleaving is beneficial "
10325         "but is explicitly disabled or interleave count is set to 1");
10326     InterleaveLoop = false;
10327   }
10328 
10329   // Override IC if user provided an interleave count.
10330   IC = UserIC > 0 ? UserIC : IC;
10331 
10332   // Emit diagnostic messages, if any.
10333   const char *VAPassName = Hints.vectorizeAnalysisPassName();
10334   if (!VectorizeLoop && !InterleaveLoop) {
10335     // Do not vectorize or interleaving the loop.
10336     ORE->emit([&]() {
10337       return OptimizationRemarkMissed(VAPassName, VecDiagMsg.first,
10338                                       L->getStartLoc(), L->getHeader())
10339              << VecDiagMsg.second;
10340     });
10341     ORE->emit([&]() {
10342       return OptimizationRemarkMissed(LV_NAME, IntDiagMsg.first,
10343                                       L->getStartLoc(), L->getHeader())
10344              << IntDiagMsg.second;
10345     });
10346     return false;
10347   } else if (!VectorizeLoop && InterleaveLoop) {
10348     LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n');
10349     ORE->emit([&]() {
10350       return OptimizationRemarkAnalysis(VAPassName, VecDiagMsg.first,
10351                                         L->getStartLoc(), L->getHeader())
10352              << VecDiagMsg.second;
10353     });
10354   } else if (VectorizeLoop && !InterleaveLoop) {
10355     LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width
10356                       << ") in " << DebugLocStr << '\n');
10357     ORE->emit([&]() {
10358       return OptimizationRemarkAnalysis(LV_NAME, IntDiagMsg.first,
10359                                         L->getStartLoc(), L->getHeader())
10360              << IntDiagMsg.second;
10361     });
10362   } else if (VectorizeLoop && InterleaveLoop) {
10363     LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width
10364                       << ") in " << DebugLocStr << '\n');
10365     LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n');
10366   }
10367 
10368   bool DisableRuntimeUnroll = false;
10369   MDNode *OrigLoopID = L->getLoopID();
10370   {
10371     // Optimistically generate runtime checks. Drop them if they turn out to not
10372     // be profitable. Limit the scope of Checks, so the cleanup happens
10373     // immediately after vector codegeneration is done.
10374     GeneratedRTChecks Checks(*PSE.getSE(), DT, LI,
10375                              F->getParent()->getDataLayout());
10376     if (!VF.Width.isScalar() || IC > 1)
10377       Checks.Create(L, *LVL.getLAI(), PSE.getUnionPredicate());
10378     LVP.setBestPlan(VF.Width, IC);
10379 
10380     using namespace ore;
10381     if (!VectorizeLoop) {
10382       assert(IC > 1 && "interleave count should not be 1 or 0");
10383       // If we decided that it is not legal to vectorize the loop, then
10384       // interleave it.
10385       InnerLoopUnroller Unroller(L, PSE, LI, DT, TLI, TTI, AC, ORE, IC, &LVL,
10386                                  &CM, BFI, PSI, Checks);
10387       LVP.executePlan(Unroller, DT);
10388 
10389       ORE->emit([&]() {
10390         return OptimizationRemark(LV_NAME, "Interleaved", L->getStartLoc(),
10391                                   L->getHeader())
10392                << "interleaved loop (interleaved count: "
10393                << NV("InterleaveCount", IC) << ")";
10394       });
10395     } else {
10396       // If we decided that it is *legal* to vectorize the loop, then do it.
10397 
10398       // Consider vectorizing the epilogue too if it's profitable.
10399       VectorizationFactor EpilogueVF =
10400           CM.selectEpilogueVectorizationFactor(VF.Width, LVP);
10401       if (EpilogueVF.Width.isVector()) {
10402 
10403         // The first pass vectorizes the main loop and creates a scalar epilogue
10404         // to be vectorized by executing the plan (potentially with a different
10405         // factor) again shortly afterwards.
10406         EpilogueLoopVectorizationInfo EPI(VF.Width.getKnownMinValue(), IC,
10407                                           EpilogueVF.Width.getKnownMinValue(),
10408                                           1);
10409         EpilogueVectorizerMainLoop MainILV(L, PSE, LI, DT, TLI, TTI, AC, ORE,
10410                                            EPI, &LVL, &CM, BFI, PSI, Checks);
10411 
10412         LVP.setBestPlan(EPI.MainLoopVF, EPI.MainLoopUF);
10413         LVP.executePlan(MainILV, DT);
10414         ++LoopsVectorized;
10415 
10416         simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */);
10417         formLCSSARecursively(*L, *DT, LI, SE);
10418 
10419         // Second pass vectorizes the epilogue and adjusts the control flow
10420         // edges from the first pass.
10421         LVP.setBestPlan(EPI.EpilogueVF, EPI.EpilogueUF);
10422         EPI.MainLoopVF = EPI.EpilogueVF;
10423         EPI.MainLoopUF = EPI.EpilogueUF;
10424         EpilogueVectorizerEpilogueLoop EpilogILV(L, PSE, LI, DT, TLI, TTI, AC,
10425                                                  ORE, EPI, &LVL, &CM, BFI, PSI,
10426                                                  Checks);
10427         LVP.executePlan(EpilogILV, DT);
10428         ++LoopsEpilogueVectorized;
10429 
10430         if (!MainILV.areSafetyChecksAdded())
10431           DisableRuntimeUnroll = true;
10432       } else {
10433         InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, IC,
10434                                &LVL, &CM, BFI, PSI, Checks);
10435         LVP.executePlan(LB, DT);
10436         ++LoopsVectorized;
10437 
10438         // Add metadata to disable runtime unrolling a scalar loop when there
10439         // are no runtime checks about strides and memory. A scalar loop that is
10440         // rarely used is not worth unrolling.
10441         if (!LB.areSafetyChecksAdded())
10442           DisableRuntimeUnroll = true;
10443       }
10444       // Report the vectorization decision.
10445       ORE->emit([&]() {
10446         return OptimizationRemark(LV_NAME, "Vectorized", L->getStartLoc(),
10447                                   L->getHeader())
10448                << "vectorized loop (vectorization width: "
10449                << NV("VectorizationFactor", VF.Width)
10450                << ", interleaved count: " << NV("InterleaveCount", IC) << ")";
10451       });
10452     }
10453 
10454     if (ORE->allowExtraAnalysis(LV_NAME))
10455       checkMixedPrecision(L, ORE);
10456   }
10457 
10458   Optional<MDNode *> RemainderLoopID =
10459       makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll,
10460                                       LLVMLoopVectorizeFollowupEpilogue});
10461   if (RemainderLoopID.hasValue()) {
10462     L->setLoopID(RemainderLoopID.getValue());
10463   } else {
10464     if (DisableRuntimeUnroll)
10465       AddRuntimeUnrollDisableMetaData(L);
10466 
10467     // Mark the loop as already vectorized to avoid vectorizing again.
10468     Hints.setAlreadyVectorized();
10469   }
10470 
10471   assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs()));
10472   return true;
10473 }
10474 
10475 LoopVectorizeResult LoopVectorizePass::runImpl(
10476     Function &F, ScalarEvolution &SE_, LoopInfo &LI_, TargetTransformInfo &TTI_,
10477     DominatorTree &DT_, BlockFrequencyInfo &BFI_, TargetLibraryInfo *TLI_,
10478     DemandedBits &DB_, AAResults &AA_, AssumptionCache &AC_,
10479     std::function<const LoopAccessInfo &(Loop &)> &GetLAA_,
10480     OptimizationRemarkEmitter &ORE_, ProfileSummaryInfo *PSI_) {
10481   SE = &SE_;
10482   LI = &LI_;
10483   TTI = &TTI_;
10484   DT = &DT_;
10485   BFI = &BFI_;
10486   TLI = TLI_;
10487   AA = &AA_;
10488   AC = &AC_;
10489   GetLAA = &GetLAA_;
10490   DB = &DB_;
10491   ORE = &ORE_;
10492   PSI = PSI_;
10493 
10494   // Don't attempt if
10495   // 1. the target claims to have no vector registers, and
10496   // 2. interleaving won't help ILP.
10497   //
10498   // The second condition is necessary because, even if the target has no
10499   // vector registers, loop vectorization may still enable scalar
10500   // interleaving.
10501   if (!TTI->getNumberOfRegisters(TTI->getRegisterClassForType(true)) &&
10502       TTI->getMaxInterleaveFactor(1) < 2)
10503     return LoopVectorizeResult(false, false);
10504 
10505   bool Changed = false, CFGChanged = false;
10506 
10507   // The vectorizer requires loops to be in simplified form.
10508   // Since simplification may add new inner loops, it has to run before the
10509   // legality and profitability checks. This means running the loop vectorizer
10510   // will simplify all loops, regardless of whether anything end up being
10511   // vectorized.
10512   for (auto &L : *LI)
10513     Changed |= CFGChanged |=
10514         simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */);
10515 
10516   // Build up a worklist of inner-loops to vectorize. This is necessary as
10517   // the act of vectorizing or partially unrolling a loop creates new loops
10518   // and can invalidate iterators across the loops.
10519   SmallVector<Loop *, 8> Worklist;
10520 
10521   for (Loop *L : *LI)
10522     collectSupportedLoops(*L, LI, ORE, Worklist);
10523 
10524   LoopsAnalyzed += Worklist.size();
10525 
10526   // Now walk the identified inner loops.
10527   while (!Worklist.empty()) {
10528     Loop *L = Worklist.pop_back_val();
10529 
10530     // For the inner loops we actually process, form LCSSA to simplify the
10531     // transform.
10532     Changed |= formLCSSARecursively(*L, *DT, LI, SE);
10533 
10534     Changed |= CFGChanged |= processLoop(L);
10535   }
10536 
10537   // Process each loop nest in the function.
10538   return LoopVectorizeResult(Changed, CFGChanged);
10539 }
10540 
10541 PreservedAnalyses LoopVectorizePass::run(Function &F,
10542                                          FunctionAnalysisManager &AM) {
10543     auto &SE = AM.getResult<ScalarEvolutionAnalysis>(F);
10544     auto &LI = AM.getResult<LoopAnalysis>(F);
10545     auto &TTI = AM.getResult<TargetIRAnalysis>(F);
10546     auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
10547     auto &BFI = AM.getResult<BlockFrequencyAnalysis>(F);
10548     auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
10549     auto &AA = AM.getResult<AAManager>(F);
10550     auto &AC = AM.getResult<AssumptionAnalysis>(F);
10551     auto &DB = AM.getResult<DemandedBitsAnalysis>(F);
10552     auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F);
10553 
10554     auto &LAM = AM.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager();
10555     std::function<const LoopAccessInfo &(Loop &)> GetLAA =
10556         [&](Loop &L) -> const LoopAccessInfo & {
10557       LoopStandardAnalysisResults AR = {AA,  AC,  DT,      LI,  SE,
10558                                         TLI, TTI, nullptr, nullptr};
10559       return LAM.getResult<LoopAccessAnalysis>(L, AR);
10560     };
10561     auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F);
10562     ProfileSummaryInfo *PSI =
10563         MAMProxy.getCachedResult<ProfileSummaryAnalysis>(*F.getParent());
10564     LoopVectorizeResult Result =
10565         runImpl(F, SE, LI, TTI, DT, BFI, &TLI, DB, AA, AC, GetLAA, ORE, PSI);
10566     if (!Result.MadeAnyChange)
10567       return PreservedAnalyses::all();
10568     PreservedAnalyses PA;
10569 
10570     // We currently do not preserve loopinfo/dominator analyses with outer loop
10571     // vectorization. Until this is addressed, mark these analyses as preserved
10572     // only for non-VPlan-native path.
10573     // TODO: Preserve Loop and Dominator analyses for VPlan-native path.
10574     if (!EnableVPlanNativePath) {
10575       PA.preserve<LoopAnalysis>();
10576       PA.preserve<DominatorTreeAnalysis>();
10577     }
10578     if (!Result.MadeCFGChange)
10579       PA.preserveSet<CFGAnalyses>();
10580     return PA;
10581 }
10582