1 //===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops
10 // and generates target-independent LLVM-IR.
11 // The vectorizer uses the TargetTransformInfo analysis to estimate the costs
12 // of instructions in order to estimate the profitability of vectorization.
13 //
14 // The loop vectorizer combines consecutive loop iterations into a single
15 // 'wide' iteration. After this transformation the index is incremented
16 // by the SIMD vector width, and not by one.
17 //
18 // This pass has three parts:
19 // 1. The main loop pass that drives the different parts.
20 // 2. LoopVectorizationLegality - A unit that checks for the legality
21 //    of the vectorization.
22 // 3. InnerLoopVectorizer - A unit that performs the actual
23 //    widening of instructions.
24 // 4. LoopVectorizationCostModel - A unit that checks for the profitability
25 //    of vectorization. It decides on the optimal vector width, which
26 //    can be one, if vectorization is not profitable.
27 //
28 // There is a development effort going on to migrate loop vectorizer to the
29 // VPlan infrastructure and to introduce outer loop vectorization support (see
30 // docs/Proposal/VectorizationPlan.rst and
31 // http://lists.llvm.org/pipermail/llvm-dev/2017-December/119523.html). For this
32 // purpose, we temporarily introduced the VPlan-native vectorization path: an
33 // alternative vectorization path that is natively implemented on top of the
34 // VPlan infrastructure. See EnableVPlanNativePath for enabling.
35 //
36 //===----------------------------------------------------------------------===//
37 //
38 // The reduction-variable vectorization is based on the paper:
39 //  D. Nuzman and R. Henderson. Multi-platform Auto-vectorization.
40 //
41 // Variable uniformity checks are inspired by:
42 //  Karrenberg, R. and Hack, S. Whole Function Vectorization.
43 //
44 // The interleaved access vectorization is based on the paper:
45 //  Dorit Nuzman, Ira Rosen and Ayal Zaks.  Auto-Vectorization of Interleaved
46 //  Data for SIMD
47 //
48 // Other ideas/concepts are from:
49 //  A. Zaks and D. Nuzman. Autovectorization in GCC-two years later.
50 //
51 //  S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua.  An Evaluation of
52 //  Vectorizing Compilers.
53 //
54 //===----------------------------------------------------------------------===//
55 
56 #include "llvm/Transforms/Vectorize/LoopVectorize.h"
57 #include "LoopVectorizationPlanner.h"
58 #include "VPRecipeBuilder.h"
59 #include "VPlan.h"
60 #include "VPlanHCFGBuilder.h"
61 #include "VPlanPredicator.h"
62 #include "VPlanTransforms.h"
63 #include "llvm/ADT/APInt.h"
64 #include "llvm/ADT/ArrayRef.h"
65 #include "llvm/ADT/DenseMap.h"
66 #include "llvm/ADT/DenseMapInfo.h"
67 #include "llvm/ADT/Hashing.h"
68 #include "llvm/ADT/MapVector.h"
69 #include "llvm/ADT/None.h"
70 #include "llvm/ADT/Optional.h"
71 #include "llvm/ADT/STLExtras.h"
72 #include "llvm/ADT/SmallPtrSet.h"
73 #include "llvm/ADT/SmallVector.h"
74 #include "llvm/ADT/Statistic.h"
75 #include "llvm/ADT/StringRef.h"
76 #include "llvm/ADT/Twine.h"
77 #include "llvm/ADT/iterator_range.h"
78 #include "llvm/Analysis/AssumptionCache.h"
79 #include "llvm/Analysis/BasicAliasAnalysis.h"
80 #include "llvm/Analysis/BlockFrequencyInfo.h"
81 #include "llvm/Analysis/CFG.h"
82 #include "llvm/Analysis/CodeMetrics.h"
83 #include "llvm/Analysis/DemandedBits.h"
84 #include "llvm/Analysis/GlobalsModRef.h"
85 #include "llvm/Analysis/LoopAccessAnalysis.h"
86 #include "llvm/Analysis/LoopAnalysisManager.h"
87 #include "llvm/Analysis/LoopInfo.h"
88 #include "llvm/Analysis/LoopIterator.h"
89 #include "llvm/Analysis/MemorySSA.h"
90 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
91 #include "llvm/Analysis/ProfileSummaryInfo.h"
92 #include "llvm/Analysis/ScalarEvolution.h"
93 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
94 #include "llvm/Analysis/TargetLibraryInfo.h"
95 #include "llvm/Analysis/TargetTransformInfo.h"
96 #include "llvm/Analysis/VectorUtils.h"
97 #include "llvm/IR/Attributes.h"
98 #include "llvm/IR/BasicBlock.h"
99 #include "llvm/IR/CFG.h"
100 #include "llvm/IR/Constant.h"
101 #include "llvm/IR/Constants.h"
102 #include "llvm/IR/DataLayout.h"
103 #include "llvm/IR/DebugInfoMetadata.h"
104 #include "llvm/IR/DebugLoc.h"
105 #include "llvm/IR/DerivedTypes.h"
106 #include "llvm/IR/DiagnosticInfo.h"
107 #include "llvm/IR/Dominators.h"
108 #include "llvm/IR/Function.h"
109 #include "llvm/IR/IRBuilder.h"
110 #include "llvm/IR/InstrTypes.h"
111 #include "llvm/IR/Instruction.h"
112 #include "llvm/IR/Instructions.h"
113 #include "llvm/IR/IntrinsicInst.h"
114 #include "llvm/IR/Intrinsics.h"
115 #include "llvm/IR/LLVMContext.h"
116 #include "llvm/IR/Metadata.h"
117 #include "llvm/IR/Module.h"
118 #include "llvm/IR/Operator.h"
119 #include "llvm/IR/Type.h"
120 #include "llvm/IR/Use.h"
121 #include "llvm/IR/User.h"
122 #include "llvm/IR/Value.h"
123 #include "llvm/IR/ValueHandle.h"
124 #include "llvm/IR/Verifier.h"
125 #include "llvm/InitializePasses.h"
126 #include "llvm/Pass.h"
127 #include "llvm/Support/Casting.h"
128 #include "llvm/Support/CommandLine.h"
129 #include "llvm/Support/Compiler.h"
130 #include "llvm/Support/Debug.h"
131 #include "llvm/Support/ErrorHandling.h"
132 #include "llvm/Support/InstructionCost.h"
133 #include "llvm/Support/MathExtras.h"
134 #include "llvm/Support/raw_ostream.h"
135 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
136 #include "llvm/Transforms/Utils/InjectTLIMappings.h"
137 #include "llvm/Transforms/Utils/LoopSimplify.h"
138 #include "llvm/Transforms/Utils/LoopUtils.h"
139 #include "llvm/Transforms/Utils/LoopVersioning.h"
140 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
141 #include "llvm/Transforms/Utils/SizeOpts.h"
142 #include "llvm/Transforms/Vectorize/LoopVectorizationLegality.h"
143 #include <algorithm>
144 #include <cassert>
145 #include <cstdint>
146 #include <cstdlib>
147 #include <functional>
148 #include <iterator>
149 #include <limits>
150 #include <memory>
151 #include <string>
152 #include <tuple>
153 #include <utility>
154 
155 using namespace llvm;
156 
157 #define LV_NAME "loop-vectorize"
158 #define DEBUG_TYPE LV_NAME
159 
160 #ifndef NDEBUG
161 const char VerboseDebug[] = DEBUG_TYPE "-verbose";
162 #endif
163 
164 /// @{
165 /// Metadata attribute names
166 const char LLVMLoopVectorizeFollowupAll[] = "llvm.loop.vectorize.followup_all";
167 const char LLVMLoopVectorizeFollowupVectorized[] =
168     "llvm.loop.vectorize.followup_vectorized";
169 const char LLVMLoopVectorizeFollowupEpilogue[] =
170     "llvm.loop.vectorize.followup_epilogue";
171 /// @}
172 
173 STATISTIC(LoopsVectorized, "Number of loops vectorized");
174 STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization");
175 STATISTIC(LoopsEpilogueVectorized, "Number of epilogues vectorized");
176 
177 static cl::opt<bool> EnableEpilogueVectorization(
178     "enable-epilogue-vectorization", cl::init(true), cl::Hidden,
179     cl::desc("Enable vectorization of epilogue loops."));
180 
181 static cl::opt<unsigned> EpilogueVectorizationForceVF(
182     "epilogue-vectorization-force-VF", cl::init(1), cl::Hidden,
183     cl::desc("When epilogue vectorization is enabled, and a value greater than "
184              "1 is specified, forces the given VF for all applicable epilogue "
185              "loops."));
186 
187 static cl::opt<unsigned> EpilogueVectorizationMinVF(
188     "epilogue-vectorization-minimum-VF", cl::init(16), cl::Hidden,
189     cl::desc("Only loops with vectorization factor equal to or larger than "
190              "the specified value are considered for epilogue vectorization."));
191 
192 /// Loops with a known constant trip count below this number are vectorized only
193 /// if no scalar iteration overheads are incurred.
194 static cl::opt<unsigned> TinyTripCountVectorThreshold(
195     "vectorizer-min-trip-count", cl::init(16), cl::Hidden,
196     cl::desc("Loops with a constant trip count that is smaller than this "
197              "value are vectorized only if no scalar iteration overheads "
198              "are incurred."));
199 
200 static cl::opt<unsigned> PragmaVectorizeMemoryCheckThreshold(
201     "pragma-vectorize-memory-check-threshold", cl::init(128), cl::Hidden,
202     cl::desc("The maximum allowed number of runtime memory checks with a "
203              "vectorize(enable) pragma."));
204 
205 // Option prefer-predicate-over-epilogue indicates that an epilogue is undesired,
206 // that predication is preferred, and this lists all options. I.e., the
207 // vectorizer will try to fold the tail-loop (epilogue) into the vector body
208 // and predicate the instructions accordingly. If tail-folding fails, there are
209 // different fallback strategies depending on these values:
210 namespace PreferPredicateTy {
211   enum Option {
212     ScalarEpilogue = 0,
213     PredicateElseScalarEpilogue,
214     PredicateOrDontVectorize
215   };
216 } // namespace PreferPredicateTy
217 
218 static cl::opt<PreferPredicateTy::Option> PreferPredicateOverEpilogue(
219     "prefer-predicate-over-epilogue",
220     cl::init(PreferPredicateTy::ScalarEpilogue),
221     cl::Hidden,
222     cl::desc("Tail-folding and predication preferences over creating a scalar "
223              "epilogue loop."),
224     cl::values(clEnumValN(PreferPredicateTy::ScalarEpilogue,
225                          "scalar-epilogue",
226                          "Don't tail-predicate loops, create scalar epilogue"),
227               clEnumValN(PreferPredicateTy::PredicateElseScalarEpilogue,
228                          "predicate-else-scalar-epilogue",
229                          "prefer tail-folding, create scalar epilogue if tail "
230                          "folding fails."),
231               clEnumValN(PreferPredicateTy::PredicateOrDontVectorize,
232                          "predicate-dont-vectorize",
233                          "prefers tail-folding, don't attempt vectorization if "
234                          "tail-folding fails.")));
235 
236 static cl::opt<bool> MaximizeBandwidth(
237     "vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden,
238     cl::desc("Maximize bandwidth when selecting vectorization factor which "
239              "will be determined by the smallest type in loop."));
240 
241 static cl::opt<bool> EnableInterleavedMemAccesses(
242     "enable-interleaved-mem-accesses", cl::init(false), cl::Hidden,
243     cl::desc("Enable vectorization on interleaved memory accesses in a loop"));
244 
245 /// An interleave-group may need masking if it resides in a block that needs
246 /// predication, or in order to mask away gaps.
247 static cl::opt<bool> EnableMaskedInterleavedMemAccesses(
248     "enable-masked-interleaved-mem-accesses", cl::init(false), cl::Hidden,
249     cl::desc("Enable vectorization on masked interleaved memory accesses in a loop"));
250 
251 static cl::opt<unsigned> TinyTripCountInterleaveThreshold(
252     "tiny-trip-count-interleave-threshold", cl::init(128), cl::Hidden,
253     cl::desc("We don't interleave loops with a estimated constant trip count "
254              "below this number"));
255 
256 static cl::opt<unsigned> ForceTargetNumScalarRegs(
257     "force-target-num-scalar-regs", cl::init(0), cl::Hidden,
258     cl::desc("A flag that overrides the target's number of scalar registers."));
259 
260 static cl::opt<unsigned> ForceTargetNumVectorRegs(
261     "force-target-num-vector-regs", cl::init(0), cl::Hidden,
262     cl::desc("A flag that overrides the target's number of vector registers."));
263 
264 static cl::opt<unsigned> ForceTargetMaxScalarInterleaveFactor(
265     "force-target-max-scalar-interleave", cl::init(0), cl::Hidden,
266     cl::desc("A flag that overrides the target's max interleave factor for "
267              "scalar loops."));
268 
269 static cl::opt<unsigned> ForceTargetMaxVectorInterleaveFactor(
270     "force-target-max-vector-interleave", cl::init(0), cl::Hidden,
271     cl::desc("A flag that overrides the target's max interleave factor for "
272              "vectorized loops."));
273 
274 static cl::opt<unsigned> ForceTargetInstructionCost(
275     "force-target-instruction-cost", cl::init(0), cl::Hidden,
276     cl::desc("A flag that overrides the target's expected cost for "
277              "an instruction to a single constant value. Mostly "
278              "useful for getting consistent testing."));
279 
280 static cl::opt<bool> ForceTargetSupportsScalableVectors(
281     "force-target-supports-scalable-vectors", cl::init(false), cl::Hidden,
282     cl::desc(
283         "Pretend that scalable vectors are supported, even if the target does "
284         "not support them. This flag should only be used for testing."));
285 
286 static cl::opt<unsigned> SmallLoopCost(
287     "small-loop-cost", cl::init(20), cl::Hidden,
288     cl::desc(
289         "The cost of a loop that is considered 'small' by the interleaver."));
290 
291 static cl::opt<bool> LoopVectorizeWithBlockFrequency(
292     "loop-vectorize-with-block-frequency", cl::init(true), cl::Hidden,
293     cl::desc("Enable the use of the block frequency analysis to access PGO "
294              "heuristics minimizing code growth in cold regions and being more "
295              "aggressive in hot regions."));
296 
297 // Runtime interleave loops for load/store throughput.
298 static cl::opt<bool> EnableLoadStoreRuntimeInterleave(
299     "enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden,
300     cl::desc(
301         "Enable runtime interleaving until load/store ports are saturated"));
302 
303 /// Interleave small loops with scalar reductions.
304 static cl::opt<bool> InterleaveSmallLoopScalarReduction(
305     "interleave-small-loop-scalar-reduction", cl::init(false), cl::Hidden,
306     cl::desc("Enable interleaving for loops with small iteration counts that "
307              "contain scalar reductions to expose ILP."));
308 
309 /// The number of stores in a loop that are allowed to need predication.
310 static cl::opt<unsigned> NumberOfStoresToPredicate(
311     "vectorize-num-stores-pred", cl::init(1), cl::Hidden,
312     cl::desc("Max number of stores to be predicated behind an if."));
313 
314 static cl::opt<bool> EnableIndVarRegisterHeur(
315     "enable-ind-var-reg-heur", cl::init(true), cl::Hidden,
316     cl::desc("Count the induction variable only once when interleaving"));
317 
318 static cl::opt<bool> EnableCondStoresVectorization(
319     "enable-cond-stores-vec", cl::init(true), cl::Hidden,
320     cl::desc("Enable if predication of stores during vectorization."));
321 
322 static cl::opt<unsigned> MaxNestedScalarReductionIC(
323     "max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden,
324     cl::desc("The maximum interleave count to use when interleaving a scalar "
325              "reduction in a nested loop."));
326 
327 static cl::opt<bool>
328     PreferInLoopReductions("prefer-inloop-reductions", cl::init(false),
329                            cl::Hidden,
330                            cl::desc("Prefer in-loop vector reductions, "
331                                     "overriding the targets preference."));
332 
333 cl::opt<bool> EnableStrictReductions(
334     "enable-strict-reductions", cl::init(false), cl::Hidden,
335     cl::desc("Enable the vectorisation of loops with in-order (strict) "
336              "FP reductions"));
337 
338 static cl::opt<bool> PreferPredicatedReductionSelect(
339     "prefer-predicated-reduction-select", cl::init(false), cl::Hidden,
340     cl::desc(
341         "Prefer predicating a reduction operation over an after loop select."));
342 
343 cl::opt<bool> EnableVPlanNativePath(
344     "enable-vplan-native-path", cl::init(false), cl::Hidden,
345     cl::desc("Enable VPlan-native vectorization path with "
346              "support for outer loop vectorization."));
347 
348 // FIXME: Remove this switch once we have divergence analysis. Currently we
349 // assume divergent non-backedge branches when this switch is true.
350 cl::opt<bool> EnableVPlanPredication(
351     "enable-vplan-predication", cl::init(false), cl::Hidden,
352     cl::desc("Enable VPlan-native vectorization path predicator with "
353              "support for outer loop vectorization."));
354 
355 // This flag enables the stress testing of the VPlan H-CFG construction in the
356 // VPlan-native vectorization path. It must be used in conjuction with
357 // -enable-vplan-native-path. -vplan-verify-hcfg can also be used to enable the
358 // verification of the H-CFGs built.
359 static cl::opt<bool> VPlanBuildStressTest(
360     "vplan-build-stress-test", cl::init(false), cl::Hidden,
361     cl::desc(
362         "Build VPlan for every supported loop nest in the function and bail "
363         "out right after the build (stress test the VPlan H-CFG construction "
364         "in the VPlan-native vectorization path)."));
365 
366 cl::opt<bool> llvm::EnableLoopInterleaving(
367     "interleave-loops", cl::init(true), cl::Hidden,
368     cl::desc("Enable loop interleaving in Loop vectorization passes"));
369 cl::opt<bool> llvm::EnableLoopVectorization(
370     "vectorize-loops", cl::init(true), cl::Hidden,
371     cl::desc("Run the Loop vectorization passes"));
372 
373 cl::opt<bool> PrintVPlansInDotFormat(
374     "vplan-print-in-dot-format", cl::init(false), cl::Hidden,
375     cl::desc("Use dot format instead of plain text when dumping VPlans"));
376 
377 /// A helper function that returns the type of loaded or stored value.
378 static Type *getMemInstValueType(Value *I) {
379   assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
380          "Expected Load or Store instruction");
381   if (auto *LI = dyn_cast<LoadInst>(I))
382     return LI->getType();
383   return cast<StoreInst>(I)->getValueOperand()->getType();
384 }
385 
386 /// A helper function that returns true if the given type is irregular. The
387 /// type is irregular if its allocated size doesn't equal the store size of an
388 /// element of the corresponding vector type.
389 static bool hasIrregularType(Type *Ty, const DataLayout &DL) {
390   // Determine if an array of N elements of type Ty is "bitcast compatible"
391   // with a <N x Ty> vector.
392   // This is only true if there is no padding between the array elements.
393   return DL.getTypeAllocSizeInBits(Ty) != DL.getTypeSizeInBits(Ty);
394 }
395 
396 /// A helper function that returns the reciprocal of the block probability of
397 /// predicated blocks. If we return X, we are assuming the predicated block
398 /// will execute once for every X iterations of the loop header.
399 ///
400 /// TODO: We should use actual block probability here, if available. Currently,
401 ///       we always assume predicated blocks have a 50% chance of executing.
402 static unsigned getReciprocalPredBlockProb() { return 2; }
403 
404 /// A helper function that returns an integer or floating-point constant with
405 /// value C.
406 static Constant *getSignedIntOrFpConstant(Type *Ty, int64_t C) {
407   return Ty->isIntegerTy() ? ConstantInt::getSigned(Ty, C)
408                            : ConstantFP::get(Ty, C);
409 }
410 
411 /// Returns "best known" trip count for the specified loop \p L as defined by
412 /// the following procedure:
413 ///   1) Returns exact trip count if it is known.
414 ///   2) Returns expected trip count according to profile data if any.
415 ///   3) Returns upper bound estimate if it is known.
416 ///   4) Returns None if all of the above failed.
417 static Optional<unsigned> getSmallBestKnownTC(ScalarEvolution &SE, Loop *L) {
418   // Check if exact trip count is known.
419   if (unsigned ExpectedTC = SE.getSmallConstantTripCount(L))
420     return ExpectedTC;
421 
422   // Check if there is an expected trip count available from profile data.
423   if (LoopVectorizeWithBlockFrequency)
424     if (auto EstimatedTC = getLoopEstimatedTripCount(L))
425       return EstimatedTC;
426 
427   // Check if upper bound estimate is known.
428   if (unsigned ExpectedTC = SE.getSmallConstantMaxTripCount(L))
429     return ExpectedTC;
430 
431   return None;
432 }
433 
434 // Forward declare GeneratedRTChecks.
435 class GeneratedRTChecks;
436 
437 namespace llvm {
438 
439 /// InnerLoopVectorizer vectorizes loops which contain only one basic
440 /// block to a specified vectorization factor (VF).
441 /// This class performs the widening of scalars into vectors, or multiple
442 /// scalars. This class also implements the following features:
443 /// * It inserts an epilogue loop for handling loops that don't have iteration
444 ///   counts that are known to be a multiple of the vectorization factor.
445 /// * It handles the code generation for reduction variables.
446 /// * Scalarization (implementation using scalars) of un-vectorizable
447 ///   instructions.
448 /// InnerLoopVectorizer does not perform any vectorization-legality
449 /// checks, and relies on the caller to check for the different legality
450 /// aspects. The InnerLoopVectorizer relies on the
451 /// LoopVectorizationLegality class to provide information about the induction
452 /// and reduction variables that were found to a given vectorization factor.
453 class InnerLoopVectorizer {
454 public:
455   InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE,
456                       LoopInfo *LI, DominatorTree *DT,
457                       const TargetLibraryInfo *TLI,
458                       const TargetTransformInfo *TTI, AssumptionCache *AC,
459                       OptimizationRemarkEmitter *ORE, ElementCount VecWidth,
460                       unsigned UnrollFactor, LoopVectorizationLegality *LVL,
461                       LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI,
462                       ProfileSummaryInfo *PSI, GeneratedRTChecks &RTChecks)
463       : OrigLoop(OrigLoop), PSE(PSE), LI(LI), DT(DT), TLI(TLI), TTI(TTI),
464         AC(AC), ORE(ORE), VF(VecWidth), UF(UnrollFactor),
465         Builder(PSE.getSE()->getContext()), Legal(LVL), Cost(CM), BFI(BFI),
466         PSI(PSI), RTChecks(RTChecks) {
467     // Query this against the original loop and save it here because the profile
468     // of the original loop header may change as the transformation happens.
469     OptForSizeBasedOnProfile = llvm::shouldOptimizeForSize(
470         OrigLoop->getHeader(), PSI, BFI, PGSOQueryType::IRPass);
471   }
472 
473   virtual ~InnerLoopVectorizer() = default;
474 
475   /// Create a new empty loop that will contain vectorized instructions later
476   /// on, while the old loop will be used as the scalar remainder. Control flow
477   /// is generated around the vectorized (and scalar epilogue) loops consisting
478   /// of various checks and bypasses. Return the pre-header block of the new
479   /// loop.
480   /// In the case of epilogue vectorization, this function is overriden to
481   /// handle the more complex control flow around the loops.
482   virtual BasicBlock *createVectorizedLoopSkeleton();
483 
484   /// Widen a single instruction within the innermost loop.
485   void widenInstruction(Instruction &I, VPValue *Def, VPUser &Operands,
486                         VPTransformState &State);
487 
488   /// Widen a single call instruction within the innermost loop.
489   void widenCallInstruction(CallInst &I, VPValue *Def, VPUser &ArgOperands,
490                             VPTransformState &State);
491 
492   /// Widen a single select instruction within the innermost loop.
493   void widenSelectInstruction(SelectInst &I, VPValue *VPDef, VPUser &Operands,
494                               bool InvariantCond, VPTransformState &State);
495 
496   /// Fix the vectorized code, taking care of header phi's, live-outs, and more.
497   void fixVectorizedLoop(VPTransformState &State);
498 
499   // Return true if any runtime check is added.
500   bool areSafetyChecksAdded() { return AddedSafetyChecks; }
501 
502   /// A type for vectorized values in the new loop. Each value from the
503   /// original loop, when vectorized, is represented by UF vector values in the
504   /// new unrolled loop, where UF is the unroll factor.
505   using VectorParts = SmallVector<Value *, 2>;
506 
507   /// Vectorize a single GetElementPtrInst based on information gathered and
508   /// decisions taken during planning.
509   void widenGEP(GetElementPtrInst *GEP, VPValue *VPDef, VPUser &Indices,
510                 unsigned UF, ElementCount VF, bool IsPtrLoopInvariant,
511                 SmallBitVector &IsIndexLoopInvariant, VPTransformState &State);
512 
513   /// Vectorize a single PHINode in a block. This method handles the induction
514   /// variable canonicalization. It supports both VF = 1 for unrolled loops and
515   /// arbitrary length vectors.
516   void widenPHIInstruction(Instruction *PN, RecurrenceDescriptor *RdxDesc,
517                            VPValue *StartV, VPValue *Def,
518                            VPTransformState &State);
519 
520   /// A helper function to scalarize a single Instruction in the innermost loop.
521   /// Generates a sequence of scalar instances for each lane between \p MinLane
522   /// and \p MaxLane, times each part between \p MinPart and \p MaxPart,
523   /// inclusive. Uses the VPValue operands from \p Operands instead of \p
524   /// Instr's operands.
525   void scalarizeInstruction(Instruction *Instr, VPValue *Def, VPUser &Operands,
526                             const VPIteration &Instance, bool IfPredicateInstr,
527                             VPTransformState &State);
528 
529   /// Widen an integer or floating-point induction variable \p IV. If \p Trunc
530   /// is provided, the integer induction variable will first be truncated to
531   /// the corresponding type.
532   void widenIntOrFpInduction(PHINode *IV, Value *Start, TruncInst *Trunc,
533                              VPValue *Def, VPValue *CastDef,
534                              VPTransformState &State);
535 
536   /// Construct the vector value of a scalarized value \p V one lane at a time.
537   void packScalarIntoVectorValue(VPValue *Def, const VPIteration &Instance,
538                                  VPTransformState &State);
539 
540   /// Try to vectorize interleaved access group \p Group with the base address
541   /// given in \p Addr, optionally masking the vector operations if \p
542   /// BlockInMask is non-null. Use \p State to translate given VPValues to IR
543   /// values in the vectorized loop.
544   void vectorizeInterleaveGroup(const InterleaveGroup<Instruction> *Group,
545                                 ArrayRef<VPValue *> VPDefs,
546                                 VPTransformState &State, VPValue *Addr,
547                                 ArrayRef<VPValue *> StoredValues,
548                                 VPValue *BlockInMask = nullptr);
549 
550   /// Vectorize Load and Store instructions with the base address given in \p
551   /// Addr, optionally masking the vector operations if \p BlockInMask is
552   /// non-null. Use \p State to translate given VPValues to IR values in the
553   /// vectorized loop.
554   void vectorizeMemoryInstruction(Instruction *Instr, VPTransformState &State,
555                                   VPValue *Def, VPValue *Addr,
556                                   VPValue *StoredValue, VPValue *BlockInMask);
557 
558   /// Set the debug location in the builder using the debug location in
559   /// the instruction.
560   void setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr);
561 
562   /// Fix the non-induction PHIs in the OrigPHIsToFix vector.
563   void fixNonInductionPHIs(VPTransformState &State);
564 
565   /// Create a broadcast instruction. This method generates a broadcast
566   /// instruction (shuffle) for loop invariant values and for the induction
567   /// value. If this is the induction variable then we extend it to N, N+1, ...
568   /// this is needed because each iteration in the loop corresponds to a SIMD
569   /// element.
570   virtual Value *getBroadcastInstrs(Value *V);
571 
572 protected:
573   friend class LoopVectorizationPlanner;
574 
575   /// A small list of PHINodes.
576   using PhiVector = SmallVector<PHINode *, 4>;
577 
578   /// A type for scalarized values in the new loop. Each value from the
579   /// original loop, when scalarized, is represented by UF x VF scalar values
580   /// in the new unrolled loop, where UF is the unroll factor and VF is the
581   /// vectorization factor.
582   using ScalarParts = SmallVector<SmallVector<Value *, 4>, 2>;
583 
584   /// Set up the values of the IVs correctly when exiting the vector loop.
585   void fixupIVUsers(PHINode *OrigPhi, const InductionDescriptor &II,
586                     Value *CountRoundDown, Value *EndValue,
587                     BasicBlock *MiddleBlock);
588 
589   /// Create a new induction variable inside L.
590   PHINode *createInductionVariable(Loop *L, Value *Start, Value *End,
591                                    Value *Step, Instruction *DL);
592 
593   /// Handle all cross-iteration phis in the header.
594   void fixCrossIterationPHIs(VPTransformState &State);
595 
596   /// Fix a first-order recurrence. This is the second phase of vectorizing
597   /// this phi node.
598   void fixFirstOrderRecurrence(PHINode *Phi, VPTransformState &State);
599 
600   /// Fix a reduction cross-iteration phi. This is the second phase of
601   /// vectorizing this phi node.
602   void fixReduction(PHINode *Phi, VPTransformState &State);
603 
604   /// Clear NSW/NUW flags from reduction instructions if necessary.
605   void clearReductionWrapFlags(RecurrenceDescriptor &RdxDesc,
606                                VPTransformState &State);
607 
608   /// Fixup the LCSSA phi nodes in the unique exit block.  This simply
609   /// means we need to add the appropriate incoming value from the middle
610   /// block as exiting edges from the scalar epilogue loop (if present) are
611   /// already in place, and we exit the vector loop exclusively to the middle
612   /// block.
613   void fixLCSSAPHIs(VPTransformState &State);
614 
615   /// Iteratively sink the scalarized operands of a predicated instruction into
616   /// the block that was created for it.
617   void sinkScalarOperands(Instruction *PredInst);
618 
619   /// Shrinks vector element sizes to the smallest bitwidth they can be legally
620   /// represented as.
621   void truncateToMinimalBitwidths(VPTransformState &State);
622 
623   /// This function adds
624   /// (StartIdx * Step, (StartIdx + 1) * Step, (StartIdx + 2) * Step, ...)
625   /// to each vector element of Val. The sequence starts at StartIndex.
626   /// \p Opcode is relevant for FP induction variable.
627   virtual Value *getStepVector(Value *Val, int StartIdx, Value *Step,
628                                Instruction::BinaryOps Opcode =
629                                Instruction::BinaryOpsEnd);
630 
631   /// Compute scalar induction steps. \p ScalarIV is the scalar induction
632   /// variable on which to base the steps, \p Step is the size of the step, and
633   /// \p EntryVal is the value from the original loop that maps to the steps.
634   /// Note that \p EntryVal doesn't have to be an induction variable - it
635   /// can also be a truncate instruction.
636   void buildScalarSteps(Value *ScalarIV, Value *Step, Instruction *EntryVal,
637                         const InductionDescriptor &ID, VPValue *Def,
638                         VPValue *CastDef, VPTransformState &State);
639 
640   /// Create a vector induction phi node based on an existing scalar one. \p
641   /// EntryVal is the value from the original loop that maps to the vector phi
642   /// node, and \p Step is the loop-invariant step. If \p EntryVal is a
643   /// truncate instruction, instead of widening the original IV, we widen a
644   /// version of the IV truncated to \p EntryVal's type.
645   void createVectorIntOrFpInductionPHI(const InductionDescriptor &II,
646                                        Value *Step, Value *Start,
647                                        Instruction *EntryVal, VPValue *Def,
648                                        VPValue *CastDef,
649                                        VPTransformState &State);
650 
651   /// Returns true if an instruction \p I should be scalarized instead of
652   /// vectorized for the chosen vectorization factor.
653   bool shouldScalarizeInstruction(Instruction *I) const;
654 
655   /// Returns true if we should generate a scalar version of \p IV.
656   bool needsScalarInduction(Instruction *IV) const;
657 
658   /// If there is a cast involved in the induction variable \p ID, which should
659   /// be ignored in the vectorized loop body, this function records the
660   /// VectorLoopValue of the respective Phi also as the VectorLoopValue of the
661   /// cast. We had already proved that the casted Phi is equal to the uncasted
662   /// Phi in the vectorized loop (under a runtime guard), and therefore
663   /// there is no need to vectorize the cast - the same value can be used in the
664   /// vector loop for both the Phi and the cast.
665   /// If \p VectorLoopValue is a scalarized value, \p Lane is also specified,
666   /// Otherwise, \p VectorLoopValue is a widened/vectorized value.
667   ///
668   /// \p EntryVal is the value from the original loop that maps to the vector
669   /// phi node and is used to distinguish what is the IV currently being
670   /// processed - original one (if \p EntryVal is a phi corresponding to the
671   /// original IV) or the "newly-created" one based on the proof mentioned above
672   /// (see also buildScalarSteps() and createVectorIntOrFPInductionPHI()). In the
673   /// latter case \p EntryVal is a TruncInst and we must not record anything for
674   /// that IV, but it's error-prone to expect callers of this routine to care
675   /// about that, hence this explicit parameter.
676   void recordVectorLoopValueForInductionCast(
677       const InductionDescriptor &ID, const Instruction *EntryVal,
678       Value *VectorLoopValue, VPValue *CastDef, VPTransformState &State,
679       unsigned Part, unsigned Lane = UINT_MAX);
680 
681   /// Generate a shuffle sequence that will reverse the vector Vec.
682   virtual Value *reverseVector(Value *Vec);
683 
684   /// Returns (and creates if needed) the original loop trip count.
685   Value *getOrCreateTripCount(Loop *NewLoop);
686 
687   /// Returns (and creates if needed) the trip count of the widened loop.
688   Value *getOrCreateVectorTripCount(Loop *NewLoop);
689 
690   /// Returns a bitcasted value to the requested vector type.
691   /// Also handles bitcasts of vector<float> <-> vector<pointer> types.
692   Value *createBitOrPointerCast(Value *V, VectorType *DstVTy,
693                                 const DataLayout &DL);
694 
695   /// Emit a bypass check to see if the vector trip count is zero, including if
696   /// it overflows.
697   void emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass);
698 
699   /// Emit a bypass check to see if all of the SCEV assumptions we've
700   /// had to make are correct. Returns the block containing the checks or
701   /// nullptr if no checks have been added.
702   BasicBlock *emitSCEVChecks(Loop *L, BasicBlock *Bypass);
703 
704   /// Emit bypass checks to check any memory assumptions we may have made.
705   /// Returns the block containing the checks or nullptr if no checks have been
706   /// added.
707   BasicBlock *emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass);
708 
709   /// Compute the transformed value of Index at offset StartValue using step
710   /// StepValue.
711   /// For integer induction, returns StartValue + Index * StepValue.
712   /// For pointer induction, returns StartValue[Index * StepValue].
713   /// FIXME: The newly created binary instructions should contain nsw/nuw
714   /// flags, which can be found from the original scalar operations.
715   Value *emitTransformedIndex(IRBuilder<> &B, Value *Index, ScalarEvolution *SE,
716                               const DataLayout &DL,
717                               const InductionDescriptor &ID) const;
718 
719   /// Emit basic blocks (prefixed with \p Prefix) for the iteration check,
720   /// vector loop preheader, middle block and scalar preheader. Also
721   /// allocate a loop object for the new vector loop and return it.
722   Loop *createVectorLoopSkeleton(StringRef Prefix);
723 
724   /// Create new phi nodes for the induction variables to resume iteration count
725   /// in the scalar epilogue, from where the vectorized loop left off (given by
726   /// \p VectorTripCount).
727   /// In cases where the loop skeleton is more complicated (eg. epilogue
728   /// vectorization) and the resume values can come from an additional bypass
729   /// block, the \p AdditionalBypass pair provides information about the bypass
730   /// block and the end value on the edge from bypass to this loop.
731   void createInductionResumeValues(
732       Loop *L, Value *VectorTripCount,
733       std::pair<BasicBlock *, Value *> AdditionalBypass = {nullptr, nullptr});
734 
735   /// Complete the loop skeleton by adding debug MDs, creating appropriate
736   /// conditional branches in the middle block, preparing the builder and
737   /// running the verifier. Take in the vector loop \p L as argument, and return
738   /// the preheader of the completed vector loop.
739   BasicBlock *completeLoopSkeleton(Loop *L, MDNode *OrigLoopID);
740 
741   /// Add additional metadata to \p To that was not present on \p Orig.
742   ///
743   /// Currently this is used to add the noalias annotations based on the
744   /// inserted memchecks.  Use this for instructions that are *cloned* into the
745   /// vector loop.
746   void addNewMetadata(Instruction *To, const Instruction *Orig);
747 
748   /// Add metadata from one instruction to another.
749   ///
750   /// This includes both the original MDs from \p From and additional ones (\see
751   /// addNewMetadata).  Use this for *newly created* instructions in the vector
752   /// loop.
753   void addMetadata(Instruction *To, Instruction *From);
754 
755   /// Similar to the previous function but it adds the metadata to a
756   /// vector of instructions.
757   void addMetadata(ArrayRef<Value *> To, Instruction *From);
758 
759   /// Allow subclasses to override and print debug traces before/after vplan
760   /// execution, when trace information is requested.
761   virtual void printDebugTracesAtStart(){};
762   virtual void printDebugTracesAtEnd(){};
763 
764   /// The original loop.
765   Loop *OrigLoop;
766 
767   /// A wrapper around ScalarEvolution used to add runtime SCEV checks. Applies
768   /// dynamic knowledge to simplify SCEV expressions and converts them to a
769   /// more usable form.
770   PredicatedScalarEvolution &PSE;
771 
772   /// Loop Info.
773   LoopInfo *LI;
774 
775   /// Dominator Tree.
776   DominatorTree *DT;
777 
778   /// Alias Analysis.
779   AAResults *AA;
780 
781   /// Target Library Info.
782   const TargetLibraryInfo *TLI;
783 
784   /// Target Transform Info.
785   const TargetTransformInfo *TTI;
786 
787   /// Assumption Cache.
788   AssumptionCache *AC;
789 
790   /// Interface to emit optimization remarks.
791   OptimizationRemarkEmitter *ORE;
792 
793   /// LoopVersioning.  It's only set up (non-null) if memchecks were
794   /// used.
795   ///
796   /// This is currently only used to add no-alias metadata based on the
797   /// memchecks.  The actually versioning is performed manually.
798   std::unique_ptr<LoopVersioning> LVer;
799 
800   /// The vectorization SIMD factor to use. Each vector will have this many
801   /// vector elements.
802   ElementCount VF;
803 
804   /// The vectorization unroll factor to use. Each scalar is vectorized to this
805   /// many different vector instructions.
806   unsigned UF;
807 
808   /// The builder that we use
809   IRBuilder<> Builder;
810 
811   // --- Vectorization state ---
812 
813   /// The vector-loop preheader.
814   BasicBlock *LoopVectorPreHeader;
815 
816   /// The scalar-loop preheader.
817   BasicBlock *LoopScalarPreHeader;
818 
819   /// Middle Block between the vector and the scalar.
820   BasicBlock *LoopMiddleBlock;
821 
822   /// The (unique) ExitBlock of the scalar loop.  Note that
823   /// there can be multiple exiting edges reaching this block.
824   BasicBlock *LoopExitBlock;
825 
826   /// The vector loop body.
827   BasicBlock *LoopVectorBody;
828 
829   /// The scalar loop body.
830   BasicBlock *LoopScalarBody;
831 
832   /// A list of all bypass blocks. The first block is the entry of the loop.
833   SmallVector<BasicBlock *, 4> LoopBypassBlocks;
834 
835   /// The new Induction variable which was added to the new block.
836   PHINode *Induction = nullptr;
837 
838   /// The induction variable of the old basic block.
839   PHINode *OldInduction = nullptr;
840 
841   /// Store instructions that were predicated.
842   SmallVector<Instruction *, 4> PredicatedInstructions;
843 
844   /// Trip count of the original loop.
845   Value *TripCount = nullptr;
846 
847   /// Trip count of the widened loop (TripCount - TripCount % (VF*UF))
848   Value *VectorTripCount = nullptr;
849 
850   /// The legality analysis.
851   LoopVectorizationLegality *Legal;
852 
853   /// The profitablity analysis.
854   LoopVectorizationCostModel *Cost;
855 
856   // Record whether runtime checks are added.
857   bool AddedSafetyChecks = false;
858 
859   // Holds the end values for each induction variable. We save the end values
860   // so we can later fix-up the external users of the induction variables.
861   DenseMap<PHINode *, Value *> IVEndValues;
862 
863   // Vector of original scalar PHIs whose corresponding widened PHIs need to be
864   // fixed up at the end of vector code generation.
865   SmallVector<PHINode *, 8> OrigPHIsToFix;
866 
867   /// BFI and PSI are used to check for profile guided size optimizations.
868   BlockFrequencyInfo *BFI;
869   ProfileSummaryInfo *PSI;
870 
871   // Whether this loop should be optimized for size based on profile guided size
872   // optimizatios.
873   bool OptForSizeBasedOnProfile;
874 
875   /// Structure to hold information about generated runtime checks, responsible
876   /// for cleaning the checks, if vectorization turns out unprofitable.
877   GeneratedRTChecks &RTChecks;
878 };
879 
880 class InnerLoopUnroller : public InnerLoopVectorizer {
881 public:
882   InnerLoopUnroller(Loop *OrigLoop, PredicatedScalarEvolution &PSE,
883                     LoopInfo *LI, DominatorTree *DT,
884                     const TargetLibraryInfo *TLI,
885                     const TargetTransformInfo *TTI, AssumptionCache *AC,
886                     OptimizationRemarkEmitter *ORE, unsigned UnrollFactor,
887                     LoopVectorizationLegality *LVL,
888                     LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI,
889                     ProfileSummaryInfo *PSI, GeneratedRTChecks &Check)
890       : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE,
891                             ElementCount::getFixed(1), UnrollFactor, LVL, CM,
892                             BFI, PSI, Check) {}
893 
894 private:
895   Value *getBroadcastInstrs(Value *V) override;
896   Value *getStepVector(Value *Val, int StartIdx, Value *Step,
897                        Instruction::BinaryOps Opcode =
898                        Instruction::BinaryOpsEnd) override;
899   Value *reverseVector(Value *Vec) override;
900 };
901 
902 /// Encapsulate information regarding vectorization of a loop and its epilogue.
903 /// This information is meant to be updated and used across two stages of
904 /// epilogue vectorization.
905 struct EpilogueLoopVectorizationInfo {
906   ElementCount MainLoopVF = ElementCount::getFixed(0);
907   unsigned MainLoopUF = 0;
908   ElementCount EpilogueVF = ElementCount::getFixed(0);
909   unsigned EpilogueUF = 0;
910   BasicBlock *MainLoopIterationCountCheck = nullptr;
911   BasicBlock *EpilogueIterationCountCheck = nullptr;
912   BasicBlock *SCEVSafetyCheck = nullptr;
913   BasicBlock *MemSafetyCheck = nullptr;
914   Value *TripCount = nullptr;
915   Value *VectorTripCount = nullptr;
916 
917   EpilogueLoopVectorizationInfo(unsigned MVF, unsigned MUF, unsigned EVF,
918                                 unsigned EUF)
919       : MainLoopVF(ElementCount::getFixed(MVF)), MainLoopUF(MUF),
920         EpilogueVF(ElementCount::getFixed(EVF)), EpilogueUF(EUF) {
921     assert(EUF == 1 &&
922            "A high UF for the epilogue loop is likely not beneficial.");
923   }
924 };
925 
926 /// An extension of the inner loop vectorizer that creates a skeleton for a
927 /// vectorized loop that has its epilogue (residual) also vectorized.
928 /// The idea is to run the vplan on a given loop twice, firstly to setup the
929 /// skeleton and vectorize the main loop, and secondly to complete the skeleton
930 /// from the first step and vectorize the epilogue.  This is achieved by
931 /// deriving two concrete strategy classes from this base class and invoking
932 /// them in succession from the loop vectorizer planner.
933 class InnerLoopAndEpilogueVectorizer : public InnerLoopVectorizer {
934 public:
935   InnerLoopAndEpilogueVectorizer(
936       Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI,
937       DominatorTree *DT, const TargetLibraryInfo *TLI,
938       const TargetTransformInfo *TTI, AssumptionCache *AC,
939       OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI,
940       LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM,
941       BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI,
942       GeneratedRTChecks &Checks)
943       : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE,
944                             EPI.MainLoopVF, EPI.MainLoopUF, LVL, CM, BFI, PSI,
945                             Checks),
946         EPI(EPI) {}
947 
948   // Override this function to handle the more complex control flow around the
949   // three loops.
950   BasicBlock *createVectorizedLoopSkeleton() final override {
951     return createEpilogueVectorizedLoopSkeleton();
952   }
953 
954   /// The interface for creating a vectorized skeleton using one of two
955   /// different strategies, each corresponding to one execution of the vplan
956   /// as described above.
957   virtual BasicBlock *createEpilogueVectorizedLoopSkeleton() = 0;
958 
959   /// Holds and updates state information required to vectorize the main loop
960   /// and its epilogue in two separate passes. This setup helps us avoid
961   /// regenerating and recomputing runtime safety checks. It also helps us to
962   /// shorten the iteration-count-check path length for the cases where the
963   /// iteration count of the loop is so small that the main vector loop is
964   /// completely skipped.
965   EpilogueLoopVectorizationInfo &EPI;
966 };
967 
968 /// A specialized derived class of inner loop vectorizer that performs
969 /// vectorization of *main* loops in the process of vectorizing loops and their
970 /// epilogues.
971 class EpilogueVectorizerMainLoop : public InnerLoopAndEpilogueVectorizer {
972 public:
973   EpilogueVectorizerMainLoop(
974       Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI,
975       DominatorTree *DT, const TargetLibraryInfo *TLI,
976       const TargetTransformInfo *TTI, AssumptionCache *AC,
977       OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI,
978       LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM,
979       BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI,
980       GeneratedRTChecks &Check)
981       : InnerLoopAndEpilogueVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE,
982                                        EPI, LVL, CM, BFI, PSI, Check) {}
983   /// Implements the interface for creating a vectorized skeleton using the
984   /// *main loop* strategy (ie the first pass of vplan execution).
985   BasicBlock *createEpilogueVectorizedLoopSkeleton() final override;
986 
987 protected:
988   /// Emits an iteration count bypass check once for the main loop (when \p
989   /// ForEpilogue is false) and once for the epilogue loop (when \p
990   /// ForEpilogue is true).
991   BasicBlock *emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass,
992                                              bool ForEpilogue);
993   void printDebugTracesAtStart() override;
994   void printDebugTracesAtEnd() override;
995 };
996 
997 // A specialized derived class of inner loop vectorizer that performs
998 // vectorization of *epilogue* loops in the process of vectorizing loops and
999 // their epilogues.
1000 class EpilogueVectorizerEpilogueLoop : public InnerLoopAndEpilogueVectorizer {
1001 public:
1002   EpilogueVectorizerEpilogueLoop(
1003       Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI,
1004       DominatorTree *DT, const TargetLibraryInfo *TLI,
1005       const TargetTransformInfo *TTI, AssumptionCache *AC,
1006       OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI,
1007       LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM,
1008       BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI,
1009       GeneratedRTChecks &Checks)
1010       : InnerLoopAndEpilogueVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE,
1011                                        EPI, LVL, CM, BFI, PSI, Checks) {}
1012   /// Implements the interface for creating a vectorized skeleton using the
1013   /// *epilogue loop* strategy (ie the second pass of vplan execution).
1014   BasicBlock *createEpilogueVectorizedLoopSkeleton() final override;
1015 
1016 protected:
1017   /// Emits an iteration count bypass check after the main vector loop has
1018   /// finished to see if there are any iterations left to execute by either
1019   /// the vector epilogue or the scalar epilogue.
1020   BasicBlock *emitMinimumVectorEpilogueIterCountCheck(Loop *L,
1021                                                       BasicBlock *Bypass,
1022                                                       BasicBlock *Insert);
1023   void printDebugTracesAtStart() override;
1024   void printDebugTracesAtEnd() override;
1025 };
1026 } // end namespace llvm
1027 
1028 /// Look for a meaningful debug location on the instruction or it's
1029 /// operands.
1030 static Instruction *getDebugLocFromInstOrOperands(Instruction *I) {
1031   if (!I)
1032     return I;
1033 
1034   DebugLoc Empty;
1035   if (I->getDebugLoc() != Empty)
1036     return I;
1037 
1038   for (Use &Op : I->operands()) {
1039     if (Instruction *OpInst = dyn_cast<Instruction>(Op))
1040       if (OpInst->getDebugLoc() != Empty)
1041         return OpInst;
1042   }
1043 
1044   return I;
1045 }
1046 
1047 void InnerLoopVectorizer::setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr) {
1048   if (const Instruction *Inst = dyn_cast_or_null<Instruction>(Ptr)) {
1049     const DILocation *DIL = Inst->getDebugLoc();
1050     if (DIL && Inst->getFunction()->isDebugInfoForProfiling() &&
1051         !isa<DbgInfoIntrinsic>(Inst)) {
1052       assert(!VF.isScalable() && "scalable vectors not yet supported.");
1053       auto NewDIL =
1054           DIL->cloneByMultiplyingDuplicationFactor(UF * VF.getKnownMinValue());
1055       if (NewDIL)
1056         B.SetCurrentDebugLocation(NewDIL.getValue());
1057       else
1058         LLVM_DEBUG(dbgs()
1059                    << "Failed to create new discriminator: "
1060                    << DIL->getFilename() << " Line: " << DIL->getLine());
1061     }
1062     else
1063       B.SetCurrentDebugLocation(DIL);
1064   } else
1065     B.SetCurrentDebugLocation(DebugLoc());
1066 }
1067 
1068 /// Write a record \p DebugMsg about vectorization failure to the debug
1069 /// output stream. If \p I is passed, it is an instruction that prevents
1070 /// vectorization.
1071 #ifndef NDEBUG
1072 static void debugVectorizationFailure(const StringRef DebugMsg,
1073     Instruction *I) {
1074   dbgs() << "LV: Not vectorizing: " << DebugMsg;
1075   if (I != nullptr)
1076     dbgs() << " " << *I;
1077   else
1078     dbgs() << '.';
1079   dbgs() << '\n';
1080 }
1081 #endif
1082 
1083 /// Create an analysis remark that explains why vectorization failed
1084 ///
1085 /// \p PassName is the name of the pass (e.g. can be AlwaysPrint).  \p
1086 /// RemarkName is the identifier for the remark.  If \p I is passed it is an
1087 /// instruction that prevents vectorization.  Otherwise \p TheLoop is used for
1088 /// the location of the remark.  \return the remark object that can be
1089 /// streamed to.
1090 static OptimizationRemarkAnalysis createLVAnalysis(const char *PassName,
1091     StringRef RemarkName, Loop *TheLoop, Instruction *I) {
1092   Value *CodeRegion = TheLoop->getHeader();
1093   DebugLoc DL = TheLoop->getStartLoc();
1094 
1095   if (I) {
1096     CodeRegion = I->getParent();
1097     // If there is no debug location attached to the instruction, revert back to
1098     // using the loop's.
1099     if (I->getDebugLoc())
1100       DL = I->getDebugLoc();
1101   }
1102 
1103   OptimizationRemarkAnalysis R(PassName, RemarkName, DL, CodeRegion);
1104   R << "loop not vectorized: ";
1105   return R;
1106 }
1107 
1108 /// Return a value for Step multiplied by VF.
1109 static Value *createStepForVF(IRBuilder<> &B, Constant *Step, ElementCount VF) {
1110   assert(isa<ConstantInt>(Step) && "Expected an integer step");
1111   Constant *StepVal = ConstantInt::get(
1112       Step->getType(),
1113       cast<ConstantInt>(Step)->getSExtValue() * VF.getKnownMinValue());
1114   return VF.isScalable() ? B.CreateVScale(StepVal) : StepVal;
1115 }
1116 
1117 namespace llvm {
1118 
1119 /// Return the runtime value for VF.
1120 Value *getRuntimeVF(IRBuilder<> &B, Type *Ty, ElementCount VF) {
1121   Constant *EC = ConstantInt::get(Ty, VF.getKnownMinValue());
1122   return VF.isScalable() ? B.CreateVScale(EC) : EC;
1123 }
1124 
1125 void reportVectorizationFailure(const StringRef DebugMsg,
1126     const StringRef OREMsg, const StringRef ORETag,
1127     OptimizationRemarkEmitter *ORE, Loop *TheLoop, Instruction *I) {
1128   LLVM_DEBUG(debugVectorizationFailure(DebugMsg, I));
1129   LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE);
1130   ORE->emit(createLVAnalysis(Hints.vectorizeAnalysisPassName(),
1131                 ORETag, TheLoop, I) << OREMsg);
1132 }
1133 
1134 } // end namespace llvm
1135 
1136 #ifndef NDEBUG
1137 /// \return string containing a file name and a line # for the given loop.
1138 static std::string getDebugLocString(const Loop *L) {
1139   std::string Result;
1140   if (L) {
1141     raw_string_ostream OS(Result);
1142     if (const DebugLoc LoopDbgLoc = L->getStartLoc())
1143       LoopDbgLoc.print(OS);
1144     else
1145       // Just print the module name.
1146       OS << L->getHeader()->getParent()->getParent()->getModuleIdentifier();
1147     OS.flush();
1148   }
1149   return Result;
1150 }
1151 #endif
1152 
1153 void InnerLoopVectorizer::addNewMetadata(Instruction *To,
1154                                          const Instruction *Orig) {
1155   // If the loop was versioned with memchecks, add the corresponding no-alias
1156   // metadata.
1157   if (LVer && (isa<LoadInst>(Orig) || isa<StoreInst>(Orig)))
1158     LVer->annotateInstWithNoAlias(To, Orig);
1159 }
1160 
1161 void InnerLoopVectorizer::addMetadata(Instruction *To,
1162                                       Instruction *From) {
1163   propagateMetadata(To, From);
1164   addNewMetadata(To, From);
1165 }
1166 
1167 void InnerLoopVectorizer::addMetadata(ArrayRef<Value *> To,
1168                                       Instruction *From) {
1169   for (Value *V : To) {
1170     if (Instruction *I = dyn_cast<Instruction>(V))
1171       addMetadata(I, From);
1172   }
1173 }
1174 
1175 namespace llvm {
1176 
1177 // Loop vectorization cost-model hints how the scalar epilogue loop should be
1178 // lowered.
1179 enum ScalarEpilogueLowering {
1180 
1181   // The default: allowing scalar epilogues.
1182   CM_ScalarEpilogueAllowed,
1183 
1184   // Vectorization with OptForSize: don't allow epilogues.
1185   CM_ScalarEpilogueNotAllowedOptSize,
1186 
1187   // A special case of vectorisation with OptForSize: loops with a very small
1188   // trip count are considered for vectorization under OptForSize, thereby
1189   // making sure the cost of their loop body is dominant, free of runtime
1190   // guards and scalar iteration overheads.
1191   CM_ScalarEpilogueNotAllowedLowTripLoop,
1192 
1193   // Loop hint predicate indicating an epilogue is undesired.
1194   CM_ScalarEpilogueNotNeededUsePredicate,
1195 
1196   // Directive indicating we must either tail fold or not vectorize
1197   CM_ScalarEpilogueNotAllowedUsePredicate
1198 };
1199 
1200 /// LoopVectorizationCostModel - estimates the expected speedups due to
1201 /// vectorization.
1202 /// In many cases vectorization is not profitable. This can happen because of
1203 /// a number of reasons. In this class we mainly attempt to predict the
1204 /// expected speedup/slowdowns due to the supported instruction set. We use the
1205 /// TargetTransformInfo to query the different backends for the cost of
1206 /// different operations.
1207 class LoopVectorizationCostModel {
1208 public:
1209   LoopVectorizationCostModel(ScalarEpilogueLowering SEL, Loop *L,
1210                              PredicatedScalarEvolution &PSE, LoopInfo *LI,
1211                              LoopVectorizationLegality *Legal,
1212                              const TargetTransformInfo &TTI,
1213                              const TargetLibraryInfo *TLI, DemandedBits *DB,
1214                              AssumptionCache *AC,
1215                              OptimizationRemarkEmitter *ORE, const Function *F,
1216                              const LoopVectorizeHints *Hints,
1217                              InterleavedAccessInfo &IAI)
1218       : ScalarEpilogueStatus(SEL), TheLoop(L), PSE(PSE), LI(LI), Legal(Legal),
1219         TTI(TTI), TLI(TLI), DB(DB), AC(AC), ORE(ORE), TheFunction(F),
1220         Hints(Hints), InterleaveInfo(IAI) {}
1221 
1222   /// \return An upper bound for the vectorization factor, or None if
1223   /// vectorization and interleaving should be avoided up front.
1224   Optional<ElementCount> computeMaxVF(ElementCount UserVF, unsigned UserIC);
1225 
1226   /// \return True if runtime checks are required for vectorization, and false
1227   /// otherwise.
1228   bool runtimeChecksRequired();
1229 
1230   /// \return The most profitable vectorization factor and the cost of that VF.
1231   /// This method checks every power of two up to MaxVF. If UserVF is not ZERO
1232   /// then this vectorization factor will be selected if vectorization is
1233   /// possible.
1234   VectorizationFactor selectVectorizationFactor(ElementCount MaxVF);
1235   VectorizationFactor
1236   selectEpilogueVectorizationFactor(const ElementCount MaxVF,
1237                                     const LoopVectorizationPlanner &LVP);
1238 
1239   /// Setup cost-based decisions for user vectorization factor.
1240   void selectUserVectorizationFactor(ElementCount UserVF) {
1241     collectUniformsAndScalars(UserVF);
1242     collectInstsToScalarize(UserVF);
1243   }
1244 
1245   /// \return The size (in bits) of the smallest and widest types in the code
1246   /// that needs to be vectorized. We ignore values that remain scalar such as
1247   /// 64 bit loop indices.
1248   std::pair<unsigned, unsigned> getSmallestAndWidestTypes();
1249 
1250   /// \return The desired interleave count.
1251   /// If interleave count has been specified by metadata it will be returned.
1252   /// Otherwise, the interleave count is computed and returned. VF and LoopCost
1253   /// are the selected vectorization factor and the cost of the selected VF.
1254   unsigned selectInterleaveCount(ElementCount VF, unsigned LoopCost);
1255 
1256   /// Memory access instruction may be vectorized in more than one way.
1257   /// Form of instruction after vectorization depends on cost.
1258   /// This function takes cost-based decisions for Load/Store instructions
1259   /// and collects them in a map. This decisions map is used for building
1260   /// the lists of loop-uniform and loop-scalar instructions.
1261   /// The calculated cost is saved with widening decision in order to
1262   /// avoid redundant calculations.
1263   void setCostBasedWideningDecision(ElementCount VF);
1264 
1265   /// A struct that represents some properties of the register usage
1266   /// of a loop.
1267   struct RegisterUsage {
1268     /// Holds the number of loop invariant values that are used in the loop.
1269     /// The key is ClassID of target-provided register class.
1270     SmallMapVector<unsigned, unsigned, 4> LoopInvariantRegs;
1271     /// Holds the maximum number of concurrent live intervals in the loop.
1272     /// The key is ClassID of target-provided register class.
1273     SmallMapVector<unsigned, unsigned, 4> MaxLocalUsers;
1274   };
1275 
1276   /// \return Returns information about the register usages of the loop for the
1277   /// given vectorization factors.
1278   SmallVector<RegisterUsage, 8>
1279   calculateRegisterUsage(ArrayRef<ElementCount> VFs);
1280 
1281   /// Collect values we want to ignore in the cost model.
1282   void collectValuesToIgnore();
1283 
1284   /// Split reductions into those that happen in the loop, and those that happen
1285   /// outside. In loop reductions are collected into InLoopReductionChains.
1286   void collectInLoopReductions();
1287 
1288   /// \returns The smallest bitwidth each instruction can be represented with.
1289   /// The vector equivalents of these instructions should be truncated to this
1290   /// type.
1291   const MapVector<Instruction *, uint64_t> &getMinimalBitwidths() const {
1292     return MinBWs;
1293   }
1294 
1295   /// \returns True if it is more profitable to scalarize instruction \p I for
1296   /// vectorization factor \p VF.
1297   bool isProfitableToScalarize(Instruction *I, ElementCount VF) const {
1298     assert(VF.isVector() &&
1299            "Profitable to scalarize relevant only for VF > 1.");
1300 
1301     // Cost model is not run in the VPlan-native path - return conservative
1302     // result until this changes.
1303     if (EnableVPlanNativePath)
1304       return false;
1305 
1306     auto Scalars = InstsToScalarize.find(VF);
1307     assert(Scalars != InstsToScalarize.end() &&
1308            "VF not yet analyzed for scalarization profitability");
1309     return Scalars->second.find(I) != Scalars->second.end();
1310   }
1311 
1312   /// Returns true if \p I is known to be uniform after vectorization.
1313   bool isUniformAfterVectorization(Instruction *I, ElementCount VF) const {
1314     if (VF.isScalar())
1315       return true;
1316 
1317     // Cost model is not run in the VPlan-native path - return conservative
1318     // result until this changes.
1319     if (EnableVPlanNativePath)
1320       return false;
1321 
1322     auto UniformsPerVF = Uniforms.find(VF);
1323     assert(UniformsPerVF != Uniforms.end() &&
1324            "VF not yet analyzed for uniformity");
1325     return UniformsPerVF->second.count(I);
1326   }
1327 
1328   /// Returns true if \p I is known to be scalar after vectorization.
1329   bool isScalarAfterVectorization(Instruction *I, ElementCount VF) const {
1330     if (VF.isScalar())
1331       return true;
1332 
1333     // Cost model is not run in the VPlan-native path - return conservative
1334     // result until this changes.
1335     if (EnableVPlanNativePath)
1336       return false;
1337 
1338     auto ScalarsPerVF = Scalars.find(VF);
1339     assert(ScalarsPerVF != Scalars.end() &&
1340            "Scalar values are not calculated for VF");
1341     return ScalarsPerVF->second.count(I);
1342   }
1343 
1344   /// \returns True if instruction \p I can be truncated to a smaller bitwidth
1345   /// for vectorization factor \p VF.
1346   bool canTruncateToMinimalBitwidth(Instruction *I, ElementCount VF) const {
1347     return VF.isVector() && MinBWs.find(I) != MinBWs.end() &&
1348            !isProfitableToScalarize(I, VF) &&
1349            !isScalarAfterVectorization(I, VF);
1350   }
1351 
1352   /// Decision that was taken during cost calculation for memory instruction.
1353   enum InstWidening {
1354     CM_Unknown,
1355     CM_Widen,         // For consecutive accesses with stride +1.
1356     CM_Widen_Reverse, // For consecutive accesses with stride -1.
1357     CM_Interleave,
1358     CM_GatherScatter,
1359     CM_Scalarize
1360   };
1361 
1362   /// Save vectorization decision \p W and \p Cost taken by the cost model for
1363   /// instruction \p I and vector width \p VF.
1364   void setWideningDecision(Instruction *I, ElementCount VF, InstWidening W,
1365                            InstructionCost Cost) {
1366     assert(VF.isVector() && "Expected VF >=2");
1367     WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost);
1368   }
1369 
1370   /// Save vectorization decision \p W and \p Cost taken by the cost model for
1371   /// interleaving group \p Grp and vector width \p VF.
1372   void setWideningDecision(const InterleaveGroup<Instruction> *Grp,
1373                            ElementCount VF, InstWidening W,
1374                            InstructionCost Cost) {
1375     assert(VF.isVector() && "Expected VF >=2");
1376     /// Broadcast this decicion to all instructions inside the group.
1377     /// But the cost will be assigned to one instruction only.
1378     for (unsigned i = 0; i < Grp->getFactor(); ++i) {
1379       if (auto *I = Grp->getMember(i)) {
1380         if (Grp->getInsertPos() == I)
1381           WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost);
1382         else
1383           WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, 0);
1384       }
1385     }
1386   }
1387 
1388   /// Return the cost model decision for the given instruction \p I and vector
1389   /// width \p VF. Return CM_Unknown if this instruction did not pass
1390   /// through the cost modeling.
1391   InstWidening getWideningDecision(Instruction *I, ElementCount VF) const {
1392     assert(VF.isVector() && "Expected VF to be a vector VF");
1393     // Cost model is not run in the VPlan-native path - return conservative
1394     // result until this changes.
1395     if (EnableVPlanNativePath)
1396       return CM_GatherScatter;
1397 
1398     std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(I, VF);
1399     auto Itr = WideningDecisions.find(InstOnVF);
1400     if (Itr == WideningDecisions.end())
1401       return CM_Unknown;
1402     return Itr->second.first;
1403   }
1404 
1405   /// Return the vectorization cost for the given instruction \p I and vector
1406   /// width \p VF.
1407   InstructionCost getWideningCost(Instruction *I, ElementCount VF) {
1408     assert(VF.isVector() && "Expected VF >=2");
1409     std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(I, VF);
1410     assert(WideningDecisions.find(InstOnVF) != WideningDecisions.end() &&
1411            "The cost is not calculated");
1412     return WideningDecisions[InstOnVF].second;
1413   }
1414 
1415   /// Return True if instruction \p I is an optimizable truncate whose operand
1416   /// is an induction variable. Such a truncate will be removed by adding a new
1417   /// induction variable with the destination type.
1418   bool isOptimizableIVTruncate(Instruction *I, ElementCount VF) {
1419     // If the instruction is not a truncate, return false.
1420     auto *Trunc = dyn_cast<TruncInst>(I);
1421     if (!Trunc)
1422       return false;
1423 
1424     // Get the source and destination types of the truncate.
1425     Type *SrcTy = ToVectorTy(cast<CastInst>(I)->getSrcTy(), VF);
1426     Type *DestTy = ToVectorTy(cast<CastInst>(I)->getDestTy(), VF);
1427 
1428     // If the truncate is free for the given types, return false. Replacing a
1429     // free truncate with an induction variable would add an induction variable
1430     // update instruction to each iteration of the loop. We exclude from this
1431     // check the primary induction variable since it will need an update
1432     // instruction regardless.
1433     Value *Op = Trunc->getOperand(0);
1434     if (Op != Legal->getPrimaryInduction() && TTI.isTruncateFree(SrcTy, DestTy))
1435       return false;
1436 
1437     // If the truncated value is not an induction variable, return false.
1438     return Legal->isInductionPhi(Op);
1439   }
1440 
1441   /// Collects the instructions to scalarize for each predicated instruction in
1442   /// the loop.
1443   void collectInstsToScalarize(ElementCount VF);
1444 
1445   /// Collect Uniform and Scalar values for the given \p VF.
1446   /// The sets depend on CM decision for Load/Store instructions
1447   /// that may be vectorized as interleave, gather-scatter or scalarized.
1448   void collectUniformsAndScalars(ElementCount VF) {
1449     // Do the analysis once.
1450     if (VF.isScalar() || Uniforms.find(VF) != Uniforms.end())
1451       return;
1452     setCostBasedWideningDecision(VF);
1453     collectLoopUniforms(VF);
1454     collectLoopScalars(VF);
1455   }
1456 
1457   /// Returns true if the target machine supports masked store operation
1458   /// for the given \p DataType and kind of access to \p Ptr.
1459   bool isLegalMaskedStore(Type *DataType, Value *Ptr, Align Alignment) const {
1460     return Legal->isConsecutivePtr(Ptr) &&
1461            TTI.isLegalMaskedStore(DataType, Alignment);
1462   }
1463 
1464   /// Returns true if the target machine supports masked load operation
1465   /// for the given \p DataType and kind of access to \p Ptr.
1466   bool isLegalMaskedLoad(Type *DataType, Value *Ptr, Align Alignment) const {
1467     return Legal->isConsecutivePtr(Ptr) &&
1468            TTI.isLegalMaskedLoad(DataType, Alignment);
1469   }
1470 
1471   /// Returns true if the target machine supports masked scatter operation
1472   /// for the given \p DataType.
1473   bool isLegalMaskedScatter(Type *DataType, Align Alignment) const {
1474     return TTI.isLegalMaskedScatter(DataType, Alignment);
1475   }
1476 
1477   /// Returns true if the target machine supports masked gather operation
1478   /// for the given \p DataType.
1479   bool isLegalMaskedGather(Type *DataType, Align Alignment) const {
1480     return TTI.isLegalMaskedGather(DataType, Alignment);
1481   }
1482 
1483   /// Returns true if the target machine can represent \p V as a masked gather
1484   /// or scatter operation.
1485   bool isLegalGatherOrScatter(Value *V) {
1486     bool LI = isa<LoadInst>(V);
1487     bool SI = isa<StoreInst>(V);
1488     if (!LI && !SI)
1489       return false;
1490     auto *Ty = getMemInstValueType(V);
1491     Align Align = getLoadStoreAlignment(V);
1492     return (LI && isLegalMaskedGather(Ty, Align)) ||
1493            (SI && isLegalMaskedScatter(Ty, Align));
1494   }
1495 
1496   /// Returns true if the target machine supports all of the reduction
1497   /// variables found for the given VF.
1498   bool canVectorizeReductions(ElementCount VF) {
1499     return (all_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool {
1500       RecurrenceDescriptor RdxDesc = Reduction.second;
1501       return TTI.isLegalToVectorizeReduction(RdxDesc, VF);
1502     }));
1503   }
1504 
1505   /// Returns true if \p I is an instruction that will be scalarized with
1506   /// predication. Such instructions include conditional stores and
1507   /// instructions that may divide by zero.
1508   /// If a non-zero VF has been calculated, we check if I will be scalarized
1509   /// predication for that VF.
1510   bool
1511   isScalarWithPredication(Instruction *I,
1512                           ElementCount VF = ElementCount::getFixed(1)) const;
1513 
1514   // Returns true if \p I is an instruction that will be predicated either
1515   // through scalar predication or masked load/store or masked gather/scatter.
1516   // Superset of instructions that return true for isScalarWithPredication.
1517   bool isPredicatedInst(Instruction *I) {
1518     if (!blockNeedsPredication(I->getParent()))
1519       return false;
1520     // Loads and stores that need some form of masked operation are predicated
1521     // instructions.
1522     if (isa<LoadInst>(I) || isa<StoreInst>(I))
1523       return Legal->isMaskRequired(I);
1524     return isScalarWithPredication(I);
1525   }
1526 
1527   /// Returns true if \p I is a memory instruction with consecutive memory
1528   /// access that can be widened.
1529   bool
1530   memoryInstructionCanBeWidened(Instruction *I,
1531                                 ElementCount VF = ElementCount::getFixed(1));
1532 
1533   /// Returns true if \p I is a memory instruction in an interleaved-group
1534   /// of memory accesses that can be vectorized with wide vector loads/stores
1535   /// and shuffles.
1536   bool
1537   interleavedAccessCanBeWidened(Instruction *I,
1538                                 ElementCount VF = ElementCount::getFixed(1));
1539 
1540   /// Check if \p Instr belongs to any interleaved access group.
1541   bool isAccessInterleaved(Instruction *Instr) {
1542     return InterleaveInfo.isInterleaved(Instr);
1543   }
1544 
1545   /// Get the interleaved access group that \p Instr belongs to.
1546   const InterleaveGroup<Instruction> *
1547   getInterleavedAccessGroup(Instruction *Instr) {
1548     return InterleaveInfo.getInterleaveGroup(Instr);
1549   }
1550 
1551   /// Returns true if we're required to use a scalar epilogue for at least
1552   /// the final iteration of the original loop.
1553   bool requiresScalarEpilogue() const {
1554     if (!isScalarEpilogueAllowed())
1555       return false;
1556     // If we might exit from anywhere but the latch, must run the exiting
1557     // iteration in scalar form.
1558     if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch())
1559       return true;
1560     return InterleaveInfo.requiresScalarEpilogue();
1561   }
1562 
1563   /// Returns true if a scalar epilogue is not allowed due to optsize or a
1564   /// loop hint annotation.
1565   bool isScalarEpilogueAllowed() const {
1566     return ScalarEpilogueStatus == CM_ScalarEpilogueAllowed;
1567   }
1568 
1569   /// Returns true if all loop blocks should be masked to fold tail loop.
1570   bool foldTailByMasking() const { return FoldTailByMasking; }
1571 
1572   bool blockNeedsPredication(BasicBlock *BB) const {
1573     return foldTailByMasking() || Legal->blockNeedsPredication(BB);
1574   }
1575 
1576   /// A SmallMapVector to store the InLoop reduction op chains, mapping phi
1577   /// nodes to the chain of instructions representing the reductions. Uses a
1578   /// MapVector to ensure deterministic iteration order.
1579   using ReductionChainMap =
1580       SmallMapVector<PHINode *, SmallVector<Instruction *, 4>, 4>;
1581 
1582   /// Return the chain of instructions representing an inloop reduction.
1583   const ReductionChainMap &getInLoopReductionChains() const {
1584     return InLoopReductionChains;
1585   }
1586 
1587   /// Returns true if the Phi is part of an inloop reduction.
1588   bool isInLoopReduction(PHINode *Phi) const {
1589     return InLoopReductionChains.count(Phi);
1590   }
1591 
1592   /// Estimate cost of an intrinsic call instruction CI if it were vectorized
1593   /// with factor VF.  Return the cost of the instruction, including
1594   /// scalarization overhead if it's needed.
1595   InstructionCost getVectorIntrinsicCost(CallInst *CI, ElementCount VF) const;
1596 
1597   /// Estimate cost of a call instruction CI if it were vectorized with factor
1598   /// VF. Return the cost of the instruction, including scalarization overhead
1599   /// if it's needed. The flag NeedToScalarize shows if the call needs to be
1600   /// scalarized -
1601   /// i.e. either vector version isn't available, or is too expensive.
1602   InstructionCost getVectorCallCost(CallInst *CI, ElementCount VF,
1603                                     bool &NeedToScalarize) const;
1604 
1605   /// Invalidates decisions already taken by the cost model.
1606   void invalidateCostModelingDecisions() {
1607     WideningDecisions.clear();
1608     Uniforms.clear();
1609     Scalars.clear();
1610   }
1611 
1612 private:
1613   unsigned NumPredStores = 0;
1614 
1615   /// \return An upper bound for the vectorization factor, a power-of-2 larger
1616   /// than zero. One is returned if vectorization should best be avoided due
1617   /// to cost.
1618   ElementCount computeFeasibleMaxVF(unsigned ConstTripCount,
1619                                     ElementCount UserVF);
1620 
1621   /// The vectorization cost is a combination of the cost itself and a boolean
1622   /// indicating whether any of the contributing operations will actually
1623   /// operate on
1624   /// vector values after type legalization in the backend. If this latter value
1625   /// is
1626   /// false, then all operations will be scalarized (i.e. no vectorization has
1627   /// actually taken place).
1628   using VectorizationCostTy = std::pair<InstructionCost, bool>;
1629 
1630   /// Returns the expected execution cost. The unit of the cost does
1631   /// not matter because we use the 'cost' units to compare different
1632   /// vector widths. The cost that is returned is *not* normalized by
1633   /// the factor width.
1634   VectorizationCostTy expectedCost(ElementCount VF);
1635 
1636   /// Returns the execution time cost of an instruction for a given vector
1637   /// width. Vector width of one means scalar.
1638   VectorizationCostTy getInstructionCost(Instruction *I, ElementCount VF);
1639 
1640   /// The cost-computation logic from getInstructionCost which provides
1641   /// the vector type as an output parameter.
1642   InstructionCost getInstructionCost(Instruction *I, ElementCount VF,
1643                                      Type *&VectorTy);
1644 
1645   /// Return the cost of instructions in an inloop reduction pattern, if I is
1646   /// part of that pattern.
1647   InstructionCost getReductionPatternCost(Instruction *I, ElementCount VF,
1648                                           Type *VectorTy,
1649                                           TTI::TargetCostKind CostKind);
1650 
1651   /// Calculate vectorization cost of memory instruction \p I.
1652   InstructionCost getMemoryInstructionCost(Instruction *I, ElementCount VF);
1653 
1654   /// The cost computation for scalarized memory instruction.
1655   InstructionCost getMemInstScalarizationCost(Instruction *I, ElementCount VF);
1656 
1657   /// The cost computation for interleaving group of memory instructions.
1658   InstructionCost getInterleaveGroupCost(Instruction *I, ElementCount VF);
1659 
1660   /// The cost computation for Gather/Scatter instruction.
1661   InstructionCost getGatherScatterCost(Instruction *I, ElementCount VF);
1662 
1663   /// The cost computation for widening instruction \p I with consecutive
1664   /// memory access.
1665   InstructionCost getConsecutiveMemOpCost(Instruction *I, ElementCount VF);
1666 
1667   /// The cost calculation for Load/Store instruction \p I with uniform pointer -
1668   /// Load: scalar load + broadcast.
1669   /// Store: scalar store + (loop invariant value stored? 0 : extract of last
1670   /// element)
1671   InstructionCost getUniformMemOpCost(Instruction *I, ElementCount VF);
1672 
1673   /// Estimate the overhead of scalarizing an instruction. This is a
1674   /// convenience wrapper for the type-based getScalarizationOverhead API.
1675   InstructionCost getScalarizationOverhead(Instruction *I,
1676                                            ElementCount VF) const;
1677 
1678   /// Returns whether the instruction is a load or store and will be a emitted
1679   /// as a vector operation.
1680   bool isConsecutiveLoadOrStore(Instruction *I);
1681 
1682   /// Returns true if an artificially high cost for emulated masked memrefs
1683   /// should be used.
1684   bool useEmulatedMaskMemRefHack(Instruction *I);
1685 
1686   /// Map of scalar integer values to the smallest bitwidth they can be legally
1687   /// represented as. The vector equivalents of these values should be truncated
1688   /// to this type.
1689   MapVector<Instruction *, uint64_t> MinBWs;
1690 
1691   /// A type representing the costs for instructions if they were to be
1692   /// scalarized rather than vectorized. The entries are Instruction-Cost
1693   /// pairs.
1694   using ScalarCostsTy = DenseMap<Instruction *, InstructionCost>;
1695 
1696   /// A set containing all BasicBlocks that are known to present after
1697   /// vectorization as a predicated block.
1698   SmallPtrSet<BasicBlock *, 4> PredicatedBBsAfterVectorization;
1699 
1700   /// Records whether it is allowed to have the original scalar loop execute at
1701   /// least once. This may be needed as a fallback loop in case runtime
1702   /// aliasing/dependence checks fail, or to handle the tail/remainder
1703   /// iterations when the trip count is unknown or doesn't divide by the VF,
1704   /// or as a peel-loop to handle gaps in interleave-groups.
1705   /// Under optsize and when the trip count is very small we don't allow any
1706   /// iterations to execute in the scalar loop.
1707   ScalarEpilogueLowering ScalarEpilogueStatus = CM_ScalarEpilogueAllowed;
1708 
1709   /// All blocks of loop are to be masked to fold tail of scalar iterations.
1710   bool FoldTailByMasking = false;
1711 
1712   /// A map holding scalar costs for different vectorization factors. The
1713   /// presence of a cost for an instruction in the mapping indicates that the
1714   /// instruction will be scalarized when vectorizing with the associated
1715   /// vectorization factor. The entries are VF-ScalarCostTy pairs.
1716   DenseMap<ElementCount, ScalarCostsTy> InstsToScalarize;
1717 
1718   /// Holds the instructions known to be uniform after vectorization.
1719   /// The data is collected per VF.
1720   DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Uniforms;
1721 
1722   /// Holds the instructions known to be scalar after vectorization.
1723   /// The data is collected per VF.
1724   DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Scalars;
1725 
1726   /// Holds the instructions (address computations) that are forced to be
1727   /// scalarized.
1728   DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> ForcedScalars;
1729 
1730   /// PHINodes of the reductions that should be expanded in-loop along with
1731   /// their associated chains of reduction operations, in program order from top
1732   /// (PHI) to bottom
1733   ReductionChainMap InLoopReductionChains;
1734 
1735   /// A Map of inloop reduction operations and their immediate chain operand.
1736   /// FIXME: This can be removed once reductions can be costed correctly in
1737   /// vplan. This was added to allow quick lookup to the inloop operations,
1738   /// without having to loop through InLoopReductionChains.
1739   DenseMap<Instruction *, Instruction *> InLoopReductionImmediateChains;
1740 
1741   /// Returns the expected difference in cost from scalarizing the expression
1742   /// feeding a predicated instruction \p PredInst. The instructions to
1743   /// scalarize and their scalar costs are collected in \p ScalarCosts. A
1744   /// non-negative return value implies the expression will be scalarized.
1745   /// Currently, only single-use chains are considered for scalarization.
1746   int computePredInstDiscount(Instruction *PredInst, ScalarCostsTy &ScalarCosts,
1747                               ElementCount VF);
1748 
1749   /// Collect the instructions that are uniform after vectorization. An
1750   /// instruction is uniform if we represent it with a single scalar value in
1751   /// the vectorized loop corresponding to each vector iteration. Examples of
1752   /// uniform instructions include pointer operands of consecutive or
1753   /// interleaved memory accesses. Note that although uniformity implies an
1754   /// instruction will be scalar, the reverse is not true. In general, a
1755   /// scalarized instruction will be represented by VF scalar values in the
1756   /// vectorized loop, each corresponding to an iteration of the original
1757   /// scalar loop.
1758   void collectLoopUniforms(ElementCount VF);
1759 
1760   /// Collect the instructions that are scalar after vectorization. An
1761   /// instruction is scalar if it is known to be uniform or will be scalarized
1762   /// during vectorization. Non-uniform scalarized instructions will be
1763   /// represented by VF values in the vectorized loop, each corresponding to an
1764   /// iteration of the original scalar loop.
1765   void collectLoopScalars(ElementCount VF);
1766 
1767   /// Keeps cost model vectorization decision and cost for instructions.
1768   /// Right now it is used for memory instructions only.
1769   using DecisionList = DenseMap<std::pair<Instruction *, ElementCount>,
1770                                 std::pair<InstWidening, InstructionCost>>;
1771 
1772   DecisionList WideningDecisions;
1773 
1774   /// Returns true if \p V is expected to be vectorized and it needs to be
1775   /// extracted.
1776   bool needsExtract(Value *V, ElementCount VF) const {
1777     Instruction *I = dyn_cast<Instruction>(V);
1778     if (VF.isScalar() || !I || !TheLoop->contains(I) ||
1779         TheLoop->isLoopInvariant(I))
1780       return false;
1781 
1782     // Assume we can vectorize V (and hence we need extraction) if the
1783     // scalars are not computed yet. This can happen, because it is called
1784     // via getScalarizationOverhead from setCostBasedWideningDecision, before
1785     // the scalars are collected. That should be a safe assumption in most
1786     // cases, because we check if the operands have vectorizable types
1787     // beforehand in LoopVectorizationLegality.
1788     return Scalars.find(VF) == Scalars.end() ||
1789            !isScalarAfterVectorization(I, VF);
1790   };
1791 
1792   /// Returns a range containing only operands needing to be extracted.
1793   SmallVector<Value *, 4> filterExtractingOperands(Instruction::op_range Ops,
1794                                                    ElementCount VF) const {
1795     return SmallVector<Value *, 4>(make_filter_range(
1796         Ops, [this, VF](Value *V) { return this->needsExtract(V, VF); }));
1797   }
1798 
1799   /// Determines if we have the infrastructure to vectorize loop \p L and its
1800   /// epilogue, assuming the main loop is vectorized by \p VF.
1801   bool isCandidateForEpilogueVectorization(const Loop &L,
1802                                            const ElementCount VF) const;
1803 
1804   /// Returns true if epilogue vectorization is considered profitable, and
1805   /// false otherwise.
1806   /// \p VF is the vectorization factor chosen for the original loop.
1807   bool isEpilogueVectorizationProfitable(const ElementCount VF) const;
1808 
1809 public:
1810   /// The loop that we evaluate.
1811   Loop *TheLoop;
1812 
1813   /// Predicated scalar evolution analysis.
1814   PredicatedScalarEvolution &PSE;
1815 
1816   /// Loop Info analysis.
1817   LoopInfo *LI;
1818 
1819   /// Vectorization legality.
1820   LoopVectorizationLegality *Legal;
1821 
1822   /// Vector target information.
1823   const TargetTransformInfo &TTI;
1824 
1825   /// Target Library Info.
1826   const TargetLibraryInfo *TLI;
1827 
1828   /// Demanded bits analysis.
1829   DemandedBits *DB;
1830 
1831   /// Assumption cache.
1832   AssumptionCache *AC;
1833 
1834   /// Interface to emit optimization remarks.
1835   OptimizationRemarkEmitter *ORE;
1836 
1837   const Function *TheFunction;
1838 
1839   /// Loop Vectorize Hint.
1840   const LoopVectorizeHints *Hints;
1841 
1842   /// The interleave access information contains groups of interleaved accesses
1843   /// with the same stride and close to each other.
1844   InterleavedAccessInfo &InterleaveInfo;
1845 
1846   /// Values to ignore in the cost model.
1847   SmallPtrSet<const Value *, 16> ValuesToIgnore;
1848 
1849   /// Values to ignore in the cost model when VF > 1.
1850   SmallPtrSet<const Value *, 16> VecValuesToIgnore;
1851 
1852   /// Profitable vector factors.
1853   SmallVector<VectorizationFactor, 8> ProfitableVFs;
1854 };
1855 } // end namespace llvm
1856 
1857 /// Helper struct to manage generating runtime checks for vectorization.
1858 ///
1859 /// The runtime checks are created up-front in temporary blocks to allow better
1860 /// estimating the cost and un-linked from the existing IR. After deciding to
1861 /// vectorize, the checks are moved back. If deciding not to vectorize, the
1862 /// temporary blocks are completely removed.
1863 class GeneratedRTChecks {
1864   /// Basic block which contains the generated SCEV checks, if any.
1865   BasicBlock *SCEVCheckBlock = nullptr;
1866 
1867   /// The value representing the result of the generated SCEV checks. If it is
1868   /// nullptr, either no SCEV checks have been generated or they have been used.
1869   Value *SCEVCheckCond = nullptr;
1870 
1871   /// Basic block which contains the generated memory runtime checks, if any.
1872   BasicBlock *MemCheckBlock = nullptr;
1873 
1874   /// The value representing the result of the generated memory runtime checks.
1875   /// If it is nullptr, either no memory runtime checks have been generated or
1876   /// they have been used.
1877   Instruction *MemRuntimeCheckCond = nullptr;
1878 
1879   DominatorTree *DT;
1880   LoopInfo *LI;
1881 
1882   SCEVExpander SCEVExp;
1883   SCEVExpander MemCheckExp;
1884 
1885 public:
1886   GeneratedRTChecks(ScalarEvolution &SE, DominatorTree *DT, LoopInfo *LI,
1887                     const DataLayout &DL)
1888       : DT(DT), LI(LI), SCEVExp(SE, DL, "scev.check"),
1889         MemCheckExp(SE, DL, "scev.check") {}
1890 
1891   /// Generate runtime checks in SCEVCheckBlock and MemCheckBlock, so we can
1892   /// accurately estimate the cost of the runtime checks. The blocks are
1893   /// un-linked from the IR and is added back during vector code generation. If
1894   /// there is no vector code generation, the check blocks are removed
1895   /// completely.
1896   void Create(Loop *L, const LoopAccessInfo &LAI,
1897               const SCEVUnionPredicate &UnionPred) {
1898 
1899     BasicBlock *LoopHeader = L->getHeader();
1900     BasicBlock *Preheader = L->getLoopPreheader();
1901 
1902     // Use SplitBlock to create blocks for SCEV & memory runtime checks to
1903     // ensure the blocks are properly added to LoopInfo & DominatorTree. Those
1904     // may be used by SCEVExpander. The blocks will be un-linked from their
1905     // predecessors and removed from LI & DT at the end of the function.
1906     if (!UnionPred.isAlwaysTrue()) {
1907       SCEVCheckBlock = SplitBlock(Preheader, Preheader->getTerminator(), DT, LI,
1908                                   nullptr, "vector.scevcheck");
1909 
1910       SCEVCheckCond = SCEVExp.expandCodeForPredicate(
1911           &UnionPred, SCEVCheckBlock->getTerminator());
1912     }
1913 
1914     const auto &RtPtrChecking = *LAI.getRuntimePointerChecking();
1915     if (RtPtrChecking.Need) {
1916       auto *Pred = SCEVCheckBlock ? SCEVCheckBlock : Preheader;
1917       MemCheckBlock = SplitBlock(Pred, Pred->getTerminator(), DT, LI, nullptr,
1918                                  "vector.memcheck");
1919 
1920       std::tie(std::ignore, MemRuntimeCheckCond) =
1921           addRuntimeChecks(MemCheckBlock->getTerminator(), L,
1922                            RtPtrChecking.getChecks(), MemCheckExp);
1923       assert(MemRuntimeCheckCond &&
1924              "no RT checks generated although RtPtrChecking "
1925              "claimed checks are required");
1926     }
1927 
1928     if (!MemCheckBlock && !SCEVCheckBlock)
1929       return;
1930 
1931     // Unhook the temporary block with the checks, update various places
1932     // accordingly.
1933     if (SCEVCheckBlock)
1934       SCEVCheckBlock->replaceAllUsesWith(Preheader);
1935     if (MemCheckBlock)
1936       MemCheckBlock->replaceAllUsesWith(Preheader);
1937 
1938     if (SCEVCheckBlock) {
1939       SCEVCheckBlock->getTerminator()->moveBefore(Preheader->getTerminator());
1940       new UnreachableInst(Preheader->getContext(), SCEVCheckBlock);
1941       Preheader->getTerminator()->eraseFromParent();
1942     }
1943     if (MemCheckBlock) {
1944       MemCheckBlock->getTerminator()->moveBefore(Preheader->getTerminator());
1945       new UnreachableInst(Preheader->getContext(), MemCheckBlock);
1946       Preheader->getTerminator()->eraseFromParent();
1947     }
1948 
1949     DT->changeImmediateDominator(LoopHeader, Preheader);
1950     if (MemCheckBlock) {
1951       DT->eraseNode(MemCheckBlock);
1952       LI->removeBlock(MemCheckBlock);
1953     }
1954     if (SCEVCheckBlock) {
1955       DT->eraseNode(SCEVCheckBlock);
1956       LI->removeBlock(SCEVCheckBlock);
1957     }
1958   }
1959 
1960   /// Remove the created SCEV & memory runtime check blocks & instructions, if
1961   /// unused.
1962   ~GeneratedRTChecks() {
1963     SCEVExpanderCleaner SCEVCleaner(SCEVExp, *DT);
1964     SCEVExpanderCleaner MemCheckCleaner(MemCheckExp, *DT);
1965     if (!SCEVCheckCond)
1966       SCEVCleaner.markResultUsed();
1967 
1968     if (!MemRuntimeCheckCond)
1969       MemCheckCleaner.markResultUsed();
1970 
1971     if (MemRuntimeCheckCond) {
1972       auto &SE = *MemCheckExp.getSE();
1973       // Memory runtime check generation creates compares that use expanded
1974       // values. Remove them before running the SCEVExpanderCleaners.
1975       for (auto &I : make_early_inc_range(reverse(*MemCheckBlock))) {
1976         if (MemCheckExp.isInsertedInstruction(&I))
1977           continue;
1978         SE.forgetValue(&I);
1979         SE.eraseValueFromMap(&I);
1980         I.eraseFromParent();
1981       }
1982     }
1983     MemCheckCleaner.cleanup();
1984     SCEVCleaner.cleanup();
1985 
1986     if (SCEVCheckCond)
1987       SCEVCheckBlock->eraseFromParent();
1988     if (MemRuntimeCheckCond)
1989       MemCheckBlock->eraseFromParent();
1990   }
1991 
1992   /// Adds the generated SCEVCheckBlock before \p LoopVectorPreHeader and
1993   /// adjusts the branches to branch to the vector preheader or \p Bypass,
1994   /// depending on the generated condition.
1995   BasicBlock *emitSCEVChecks(Loop *L, BasicBlock *Bypass,
1996                              BasicBlock *LoopVectorPreHeader,
1997                              BasicBlock *LoopExitBlock) {
1998     if (!SCEVCheckCond)
1999       return nullptr;
2000     if (auto *C = dyn_cast<ConstantInt>(SCEVCheckCond))
2001       if (C->isZero())
2002         return nullptr;
2003 
2004     auto *Pred = LoopVectorPreHeader->getSinglePredecessor();
2005 
2006     BranchInst::Create(LoopVectorPreHeader, SCEVCheckBlock);
2007     // Create new preheader for vector loop.
2008     if (auto *PL = LI->getLoopFor(LoopVectorPreHeader))
2009       PL->addBasicBlockToLoop(SCEVCheckBlock, *LI);
2010 
2011     SCEVCheckBlock->getTerminator()->eraseFromParent();
2012     SCEVCheckBlock->moveBefore(LoopVectorPreHeader);
2013     Pred->getTerminator()->replaceSuccessorWith(LoopVectorPreHeader,
2014                                                 SCEVCheckBlock);
2015 
2016     DT->addNewBlock(SCEVCheckBlock, Pred);
2017     DT->changeImmediateDominator(LoopVectorPreHeader, SCEVCheckBlock);
2018 
2019     ReplaceInstWithInst(
2020         SCEVCheckBlock->getTerminator(),
2021         BranchInst::Create(Bypass, LoopVectorPreHeader, SCEVCheckCond));
2022     // Mark the check as used, to prevent it from being removed during cleanup.
2023     SCEVCheckCond = nullptr;
2024     return SCEVCheckBlock;
2025   }
2026 
2027   /// Adds the generated MemCheckBlock before \p LoopVectorPreHeader and adjusts
2028   /// the branches to branch to the vector preheader or \p Bypass, depending on
2029   /// the generated condition.
2030   BasicBlock *emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass,
2031                                    BasicBlock *LoopVectorPreHeader) {
2032     // Check if we generated code that checks in runtime if arrays overlap.
2033     if (!MemRuntimeCheckCond)
2034       return nullptr;
2035 
2036     auto *Pred = LoopVectorPreHeader->getSinglePredecessor();
2037     Pred->getTerminator()->replaceSuccessorWith(LoopVectorPreHeader,
2038                                                 MemCheckBlock);
2039 
2040     DT->addNewBlock(MemCheckBlock, Pred);
2041     DT->changeImmediateDominator(LoopVectorPreHeader, MemCheckBlock);
2042     MemCheckBlock->moveBefore(LoopVectorPreHeader);
2043 
2044     if (auto *PL = LI->getLoopFor(LoopVectorPreHeader))
2045       PL->addBasicBlockToLoop(MemCheckBlock, *LI);
2046 
2047     ReplaceInstWithInst(
2048         MemCheckBlock->getTerminator(),
2049         BranchInst::Create(Bypass, LoopVectorPreHeader, MemRuntimeCheckCond));
2050     MemCheckBlock->getTerminator()->setDebugLoc(
2051         Pred->getTerminator()->getDebugLoc());
2052 
2053     // Mark the check as used, to prevent it from being removed during cleanup.
2054     MemRuntimeCheckCond = nullptr;
2055     return MemCheckBlock;
2056   }
2057 };
2058 
2059 // Return true if \p OuterLp is an outer loop annotated with hints for explicit
2060 // vectorization. The loop needs to be annotated with #pragma omp simd
2061 // simdlen(#) or #pragma clang vectorize(enable) vectorize_width(#). If the
2062 // vector length information is not provided, vectorization is not considered
2063 // explicit. Interleave hints are not allowed either. These limitations will be
2064 // relaxed in the future.
2065 // Please, note that we are currently forced to abuse the pragma 'clang
2066 // vectorize' semantics. This pragma provides *auto-vectorization hints*
2067 // (i.e., LV must check that vectorization is legal) whereas pragma 'omp simd'
2068 // provides *explicit vectorization hints* (LV can bypass legal checks and
2069 // assume that vectorization is legal). However, both hints are implemented
2070 // using the same metadata (llvm.loop.vectorize, processed by
2071 // LoopVectorizeHints). This will be fixed in the future when the native IR
2072 // representation for pragma 'omp simd' is introduced.
2073 static bool isExplicitVecOuterLoop(Loop *OuterLp,
2074                                    OptimizationRemarkEmitter *ORE) {
2075   assert(!OuterLp->isInnermost() && "This is not an outer loop");
2076   LoopVectorizeHints Hints(OuterLp, true /*DisableInterleaving*/, *ORE);
2077 
2078   // Only outer loops with an explicit vectorization hint are supported.
2079   // Unannotated outer loops are ignored.
2080   if (Hints.getForce() == LoopVectorizeHints::FK_Undefined)
2081     return false;
2082 
2083   Function *Fn = OuterLp->getHeader()->getParent();
2084   if (!Hints.allowVectorization(Fn, OuterLp,
2085                                 true /*VectorizeOnlyWhenForced*/)) {
2086     LLVM_DEBUG(dbgs() << "LV: Loop hints prevent outer loop vectorization.\n");
2087     return false;
2088   }
2089 
2090   if (Hints.getInterleave() > 1) {
2091     // TODO: Interleave support is future work.
2092     LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Interleave is not supported for "
2093                          "outer loops.\n");
2094     Hints.emitRemarkWithHints();
2095     return false;
2096   }
2097 
2098   return true;
2099 }
2100 
2101 static void collectSupportedLoops(Loop &L, LoopInfo *LI,
2102                                   OptimizationRemarkEmitter *ORE,
2103                                   SmallVectorImpl<Loop *> &V) {
2104   // Collect inner loops and outer loops without irreducible control flow. For
2105   // now, only collect outer loops that have explicit vectorization hints. If we
2106   // are stress testing the VPlan H-CFG construction, we collect the outermost
2107   // loop of every loop nest.
2108   if (L.isInnermost() || VPlanBuildStressTest ||
2109       (EnableVPlanNativePath && isExplicitVecOuterLoop(&L, ORE))) {
2110     LoopBlocksRPO RPOT(&L);
2111     RPOT.perform(LI);
2112     if (!containsIrreducibleCFG<const BasicBlock *>(RPOT, *LI)) {
2113       V.push_back(&L);
2114       // TODO: Collect inner loops inside marked outer loops in case
2115       // vectorization fails for the outer loop. Do not invoke
2116       // 'containsIrreducibleCFG' again for inner loops when the outer loop is
2117       // already known to be reducible. We can use an inherited attribute for
2118       // that.
2119       return;
2120     }
2121   }
2122   for (Loop *InnerL : L)
2123     collectSupportedLoops(*InnerL, LI, ORE, V);
2124 }
2125 
2126 namespace {
2127 
2128 /// The LoopVectorize Pass.
2129 struct LoopVectorize : public FunctionPass {
2130   /// Pass identification, replacement for typeid
2131   static char ID;
2132 
2133   LoopVectorizePass Impl;
2134 
2135   explicit LoopVectorize(bool InterleaveOnlyWhenForced = false,
2136                          bool VectorizeOnlyWhenForced = false)
2137       : FunctionPass(ID),
2138         Impl({InterleaveOnlyWhenForced, VectorizeOnlyWhenForced}) {
2139     initializeLoopVectorizePass(*PassRegistry::getPassRegistry());
2140   }
2141 
2142   bool runOnFunction(Function &F) override {
2143     if (skipFunction(F))
2144       return false;
2145 
2146     auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
2147     auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
2148     auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
2149     auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
2150     auto *BFI = &getAnalysis<BlockFrequencyInfoWrapperPass>().getBFI();
2151     auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
2152     auto *TLI = TLIP ? &TLIP->getTLI(F) : nullptr;
2153     auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
2154     auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
2155     auto *LAA = &getAnalysis<LoopAccessLegacyAnalysis>();
2156     auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits();
2157     auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE();
2158     auto *PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI();
2159 
2160     std::function<const LoopAccessInfo &(Loop &)> GetLAA =
2161         [&](Loop &L) -> const LoopAccessInfo & { return LAA->getInfo(&L); };
2162 
2163     return Impl.runImpl(F, *SE, *LI, *TTI, *DT, *BFI, TLI, *DB, *AA, *AC,
2164                         GetLAA, *ORE, PSI).MadeAnyChange;
2165   }
2166 
2167   void getAnalysisUsage(AnalysisUsage &AU) const override {
2168     AU.addRequired<AssumptionCacheTracker>();
2169     AU.addRequired<BlockFrequencyInfoWrapperPass>();
2170     AU.addRequired<DominatorTreeWrapperPass>();
2171     AU.addRequired<LoopInfoWrapperPass>();
2172     AU.addRequired<ScalarEvolutionWrapperPass>();
2173     AU.addRequired<TargetTransformInfoWrapperPass>();
2174     AU.addRequired<AAResultsWrapperPass>();
2175     AU.addRequired<LoopAccessLegacyAnalysis>();
2176     AU.addRequired<DemandedBitsWrapperPass>();
2177     AU.addRequired<OptimizationRemarkEmitterWrapperPass>();
2178     AU.addRequired<InjectTLIMappingsLegacy>();
2179 
2180     // We currently do not preserve loopinfo/dominator analyses with outer loop
2181     // vectorization. Until this is addressed, mark these analyses as preserved
2182     // only for non-VPlan-native path.
2183     // TODO: Preserve Loop and Dominator analyses for VPlan-native path.
2184     if (!EnableVPlanNativePath) {
2185       AU.addPreserved<LoopInfoWrapperPass>();
2186       AU.addPreserved<DominatorTreeWrapperPass>();
2187     }
2188 
2189     AU.addPreserved<BasicAAWrapperPass>();
2190     AU.addPreserved<GlobalsAAWrapperPass>();
2191     AU.addRequired<ProfileSummaryInfoWrapperPass>();
2192   }
2193 };
2194 
2195 } // end anonymous namespace
2196 
2197 //===----------------------------------------------------------------------===//
2198 // Implementation of LoopVectorizationLegality, InnerLoopVectorizer and
2199 // LoopVectorizationCostModel and LoopVectorizationPlanner.
2200 //===----------------------------------------------------------------------===//
2201 
2202 Value *InnerLoopVectorizer::getBroadcastInstrs(Value *V) {
2203   // We need to place the broadcast of invariant variables outside the loop,
2204   // but only if it's proven safe to do so. Else, broadcast will be inside
2205   // vector loop body.
2206   Instruction *Instr = dyn_cast<Instruction>(V);
2207   bool SafeToHoist = OrigLoop->isLoopInvariant(V) &&
2208                      (!Instr ||
2209                       DT->dominates(Instr->getParent(), LoopVectorPreHeader));
2210   // Place the code for broadcasting invariant variables in the new preheader.
2211   IRBuilder<>::InsertPointGuard Guard(Builder);
2212   if (SafeToHoist)
2213     Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
2214 
2215   // Broadcast the scalar into all locations in the vector.
2216   Value *Shuf = Builder.CreateVectorSplat(VF, V, "broadcast");
2217 
2218   return Shuf;
2219 }
2220 
2221 void InnerLoopVectorizer::createVectorIntOrFpInductionPHI(
2222     const InductionDescriptor &II, Value *Step, Value *Start,
2223     Instruction *EntryVal, VPValue *Def, VPValue *CastDef,
2224     VPTransformState &State) {
2225   assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) &&
2226          "Expected either an induction phi-node or a truncate of it!");
2227 
2228   // Construct the initial value of the vector IV in the vector loop preheader
2229   auto CurrIP = Builder.saveIP();
2230   Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
2231   if (isa<TruncInst>(EntryVal)) {
2232     assert(Start->getType()->isIntegerTy() &&
2233            "Truncation requires an integer type");
2234     auto *TruncType = cast<IntegerType>(EntryVal->getType());
2235     Step = Builder.CreateTrunc(Step, TruncType);
2236     Start = Builder.CreateCast(Instruction::Trunc, Start, TruncType);
2237   }
2238   Value *SplatStart = Builder.CreateVectorSplat(VF, Start);
2239   Value *SteppedStart =
2240       getStepVector(SplatStart, 0, Step, II.getInductionOpcode());
2241 
2242   // We create vector phi nodes for both integer and floating-point induction
2243   // variables. Here, we determine the kind of arithmetic we will perform.
2244   Instruction::BinaryOps AddOp;
2245   Instruction::BinaryOps MulOp;
2246   if (Step->getType()->isIntegerTy()) {
2247     AddOp = Instruction::Add;
2248     MulOp = Instruction::Mul;
2249   } else {
2250     AddOp = II.getInductionOpcode();
2251     MulOp = Instruction::FMul;
2252   }
2253 
2254   // Multiply the vectorization factor by the step using integer or
2255   // floating-point arithmetic as appropriate.
2256   Type *StepType = Step->getType();
2257   if (Step->getType()->isFloatingPointTy())
2258     StepType = IntegerType::get(StepType->getContext(),
2259                                 StepType->getScalarSizeInBits());
2260   Value *RuntimeVF = getRuntimeVF(Builder, StepType, VF);
2261   if (Step->getType()->isFloatingPointTy())
2262     RuntimeVF = Builder.CreateSIToFP(RuntimeVF, Step->getType());
2263   Value *Mul = Builder.CreateBinOp(MulOp, Step, RuntimeVF);
2264 
2265   // Create a vector splat to use in the induction update.
2266   //
2267   // FIXME: If the step is non-constant, we create the vector splat with
2268   //        IRBuilder. IRBuilder can constant-fold the multiply, but it doesn't
2269   //        handle a constant vector splat.
2270   Value *SplatVF = isa<Constant>(Mul)
2271                        ? ConstantVector::getSplat(VF, cast<Constant>(Mul))
2272                        : Builder.CreateVectorSplat(VF, Mul);
2273   Builder.restoreIP(CurrIP);
2274 
2275   // We may need to add the step a number of times, depending on the unroll
2276   // factor. The last of those goes into the PHI.
2277   PHINode *VecInd = PHINode::Create(SteppedStart->getType(), 2, "vec.ind",
2278                                     &*LoopVectorBody->getFirstInsertionPt());
2279   VecInd->setDebugLoc(EntryVal->getDebugLoc());
2280   Instruction *LastInduction = VecInd;
2281   for (unsigned Part = 0; Part < UF; ++Part) {
2282     State.set(Def, LastInduction, Part);
2283 
2284     if (isa<TruncInst>(EntryVal))
2285       addMetadata(LastInduction, EntryVal);
2286     recordVectorLoopValueForInductionCast(II, EntryVal, LastInduction, CastDef,
2287                                           State, Part);
2288 
2289     LastInduction = cast<Instruction>(
2290         Builder.CreateBinOp(AddOp, LastInduction, SplatVF, "step.add"));
2291     LastInduction->setDebugLoc(EntryVal->getDebugLoc());
2292   }
2293 
2294   // Move the last step to the end of the latch block. This ensures consistent
2295   // placement of all induction updates.
2296   auto *LoopVectorLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch();
2297   auto *Br = cast<BranchInst>(LoopVectorLatch->getTerminator());
2298   auto *ICmp = cast<Instruction>(Br->getCondition());
2299   LastInduction->moveBefore(ICmp);
2300   LastInduction->setName("vec.ind.next");
2301 
2302   VecInd->addIncoming(SteppedStart, LoopVectorPreHeader);
2303   VecInd->addIncoming(LastInduction, LoopVectorLatch);
2304 }
2305 
2306 bool InnerLoopVectorizer::shouldScalarizeInstruction(Instruction *I) const {
2307   return Cost->isScalarAfterVectorization(I, VF) ||
2308          Cost->isProfitableToScalarize(I, VF);
2309 }
2310 
2311 bool InnerLoopVectorizer::needsScalarInduction(Instruction *IV) const {
2312   if (shouldScalarizeInstruction(IV))
2313     return true;
2314   auto isScalarInst = [&](User *U) -> bool {
2315     auto *I = cast<Instruction>(U);
2316     return (OrigLoop->contains(I) && shouldScalarizeInstruction(I));
2317   };
2318   return llvm::any_of(IV->users(), isScalarInst);
2319 }
2320 
2321 void InnerLoopVectorizer::recordVectorLoopValueForInductionCast(
2322     const InductionDescriptor &ID, const Instruction *EntryVal,
2323     Value *VectorLoopVal, VPValue *CastDef, VPTransformState &State,
2324     unsigned Part, unsigned Lane) {
2325   assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) &&
2326          "Expected either an induction phi-node or a truncate of it!");
2327 
2328   // This induction variable is not the phi from the original loop but the
2329   // newly-created IV based on the proof that casted Phi is equal to the
2330   // uncasted Phi in the vectorized loop (under a runtime guard possibly). It
2331   // re-uses the same InductionDescriptor that original IV uses but we don't
2332   // have to do any recording in this case - that is done when original IV is
2333   // processed.
2334   if (isa<TruncInst>(EntryVal))
2335     return;
2336 
2337   const SmallVectorImpl<Instruction *> &Casts = ID.getCastInsts();
2338   if (Casts.empty())
2339     return;
2340   // Only the first Cast instruction in the Casts vector is of interest.
2341   // The rest of the Casts (if exist) have no uses outside the
2342   // induction update chain itself.
2343   if (Lane < UINT_MAX)
2344     State.set(CastDef, VectorLoopVal, VPIteration(Part, Lane));
2345   else
2346     State.set(CastDef, VectorLoopVal, Part);
2347 }
2348 
2349 void InnerLoopVectorizer::widenIntOrFpInduction(PHINode *IV, Value *Start,
2350                                                 TruncInst *Trunc, VPValue *Def,
2351                                                 VPValue *CastDef,
2352                                                 VPTransformState &State) {
2353   assert((IV->getType()->isIntegerTy() || IV != OldInduction) &&
2354          "Primary induction variable must have an integer type");
2355 
2356   auto II = Legal->getInductionVars().find(IV);
2357   assert(II != Legal->getInductionVars().end() && "IV is not an induction");
2358 
2359   auto ID = II->second;
2360   assert(IV->getType() == ID.getStartValue()->getType() && "Types must match");
2361 
2362   // The value from the original loop to which we are mapping the new induction
2363   // variable.
2364   Instruction *EntryVal = Trunc ? cast<Instruction>(Trunc) : IV;
2365 
2366   auto &DL = OrigLoop->getHeader()->getModule()->getDataLayout();
2367 
2368   // Generate code for the induction step. Note that induction steps are
2369   // required to be loop-invariant
2370   auto CreateStepValue = [&](const SCEV *Step) -> Value * {
2371     assert(PSE.getSE()->isLoopInvariant(Step, OrigLoop) &&
2372            "Induction step should be loop invariant");
2373     if (PSE.getSE()->isSCEVable(IV->getType())) {
2374       SCEVExpander Exp(*PSE.getSE(), DL, "induction");
2375       return Exp.expandCodeFor(Step, Step->getType(),
2376                                LoopVectorPreHeader->getTerminator());
2377     }
2378     return cast<SCEVUnknown>(Step)->getValue();
2379   };
2380 
2381   // The scalar value to broadcast. This is derived from the canonical
2382   // induction variable. If a truncation type is given, truncate the canonical
2383   // induction variable and step. Otherwise, derive these values from the
2384   // induction descriptor.
2385   auto CreateScalarIV = [&](Value *&Step) -> Value * {
2386     Value *ScalarIV = Induction;
2387     if (IV != OldInduction) {
2388       ScalarIV = IV->getType()->isIntegerTy()
2389                      ? Builder.CreateSExtOrTrunc(Induction, IV->getType())
2390                      : Builder.CreateCast(Instruction::SIToFP, Induction,
2391                                           IV->getType());
2392       ScalarIV = emitTransformedIndex(Builder, ScalarIV, PSE.getSE(), DL, ID);
2393       ScalarIV->setName("offset.idx");
2394     }
2395     if (Trunc) {
2396       auto *TruncType = cast<IntegerType>(Trunc->getType());
2397       assert(Step->getType()->isIntegerTy() &&
2398              "Truncation requires an integer step");
2399       ScalarIV = Builder.CreateTrunc(ScalarIV, TruncType);
2400       Step = Builder.CreateTrunc(Step, TruncType);
2401     }
2402     return ScalarIV;
2403   };
2404 
2405   // Create the vector values from the scalar IV, in the absence of creating a
2406   // vector IV.
2407   auto CreateSplatIV = [&](Value *ScalarIV, Value *Step) {
2408     Value *Broadcasted = getBroadcastInstrs(ScalarIV);
2409     for (unsigned Part = 0; Part < UF; ++Part) {
2410       assert(!VF.isScalable() && "scalable vectors not yet supported.");
2411       Value *EntryPart =
2412           getStepVector(Broadcasted, VF.getKnownMinValue() * Part, Step,
2413                         ID.getInductionOpcode());
2414       State.set(Def, EntryPart, Part);
2415       if (Trunc)
2416         addMetadata(EntryPart, Trunc);
2417       recordVectorLoopValueForInductionCast(ID, EntryVal, EntryPart, CastDef,
2418                                             State, Part);
2419     }
2420   };
2421 
2422   // Fast-math-flags propagate from the original induction instruction.
2423   IRBuilder<>::FastMathFlagGuard FMFG(Builder);
2424   if (ID.getInductionBinOp() && isa<FPMathOperator>(ID.getInductionBinOp()))
2425     Builder.setFastMathFlags(ID.getInductionBinOp()->getFastMathFlags());
2426 
2427   // Now do the actual transformations, and start with creating the step value.
2428   Value *Step = CreateStepValue(ID.getStep());
2429   if (VF.isZero() || VF.isScalar()) {
2430     Value *ScalarIV = CreateScalarIV(Step);
2431     CreateSplatIV(ScalarIV, Step);
2432     return;
2433   }
2434 
2435   // Determine if we want a scalar version of the induction variable. This is
2436   // true if the induction variable itself is not widened, or if it has at
2437   // least one user in the loop that is not widened.
2438   auto NeedsScalarIV = needsScalarInduction(EntryVal);
2439   if (!NeedsScalarIV) {
2440     createVectorIntOrFpInductionPHI(ID, Step, Start, EntryVal, Def, CastDef,
2441                                     State);
2442     return;
2443   }
2444 
2445   // Try to create a new independent vector induction variable. If we can't
2446   // create the phi node, we will splat the scalar induction variable in each
2447   // loop iteration.
2448   if (!shouldScalarizeInstruction(EntryVal)) {
2449     createVectorIntOrFpInductionPHI(ID, Step, Start, EntryVal, Def, CastDef,
2450                                     State);
2451     Value *ScalarIV = CreateScalarIV(Step);
2452     // Create scalar steps that can be used by instructions we will later
2453     // scalarize. Note that the addition of the scalar steps will not increase
2454     // the number of instructions in the loop in the common case prior to
2455     // InstCombine. We will be trading one vector extract for each scalar step.
2456     buildScalarSteps(ScalarIV, Step, EntryVal, ID, Def, CastDef, State);
2457     return;
2458   }
2459 
2460   // All IV users are scalar instructions, so only emit a scalar IV, not a
2461   // vectorised IV. Except when we tail-fold, then the splat IV feeds the
2462   // predicate used by the masked loads/stores.
2463   Value *ScalarIV = CreateScalarIV(Step);
2464   if (!Cost->isScalarEpilogueAllowed())
2465     CreateSplatIV(ScalarIV, Step);
2466   buildScalarSteps(ScalarIV, Step, EntryVal, ID, Def, CastDef, State);
2467 }
2468 
2469 Value *InnerLoopVectorizer::getStepVector(Value *Val, int StartIdx, Value *Step,
2470                                           Instruction::BinaryOps BinOp) {
2471   // Create and check the types.
2472   auto *ValVTy = cast<VectorType>(Val->getType());
2473   ElementCount VLen = ValVTy->getElementCount();
2474 
2475   Type *STy = Val->getType()->getScalarType();
2476   assert((STy->isIntegerTy() || STy->isFloatingPointTy()) &&
2477          "Induction Step must be an integer or FP");
2478   assert(Step->getType() == STy && "Step has wrong type");
2479 
2480   SmallVector<Constant *, 8> Indices;
2481 
2482   // Create a vector of consecutive numbers from zero to VF.
2483   VectorType *InitVecValVTy = ValVTy;
2484   Type *InitVecValSTy = STy;
2485   if (STy->isFloatingPointTy()) {
2486     InitVecValSTy =
2487         IntegerType::get(STy->getContext(), STy->getScalarSizeInBits());
2488     InitVecValVTy = VectorType::get(InitVecValSTy, VLen);
2489   }
2490   Value *InitVec = Builder.CreateStepVector(InitVecValVTy);
2491 
2492   // Add on StartIdx
2493   Value *StartIdxSplat = Builder.CreateVectorSplat(
2494       VLen, ConstantInt::get(InitVecValSTy, StartIdx));
2495   InitVec = Builder.CreateAdd(InitVec, StartIdxSplat);
2496 
2497   if (STy->isIntegerTy()) {
2498     Step = Builder.CreateVectorSplat(VLen, Step);
2499     assert(Step->getType() == Val->getType() && "Invalid step vec");
2500     // FIXME: The newly created binary instructions should contain nsw/nuw flags,
2501     // which can be found from the original scalar operations.
2502     Step = Builder.CreateMul(InitVec, Step);
2503     return Builder.CreateAdd(Val, Step, "induction");
2504   }
2505 
2506   // Floating point induction.
2507   assert((BinOp == Instruction::FAdd || BinOp == Instruction::FSub) &&
2508          "Binary Opcode should be specified for FP induction");
2509   InitVec = Builder.CreateUIToFP(InitVec, ValVTy);
2510   Step = Builder.CreateVectorSplat(VLen, Step);
2511   Value *MulOp = Builder.CreateFMul(InitVec, Step);
2512   return Builder.CreateBinOp(BinOp, Val, MulOp, "induction");
2513 }
2514 
2515 void InnerLoopVectorizer::buildScalarSteps(Value *ScalarIV, Value *Step,
2516                                            Instruction *EntryVal,
2517                                            const InductionDescriptor &ID,
2518                                            VPValue *Def, VPValue *CastDef,
2519                                            VPTransformState &State) {
2520   // We shouldn't have to build scalar steps if we aren't vectorizing.
2521   assert(VF.isVector() && "VF should be greater than one");
2522   // Get the value type and ensure it and the step have the same integer type.
2523   Type *ScalarIVTy = ScalarIV->getType()->getScalarType();
2524   assert(ScalarIVTy == Step->getType() &&
2525          "Val and Step should have the same type");
2526 
2527   // We build scalar steps for both integer and floating-point induction
2528   // variables. Here, we determine the kind of arithmetic we will perform.
2529   Instruction::BinaryOps AddOp;
2530   Instruction::BinaryOps MulOp;
2531   if (ScalarIVTy->isIntegerTy()) {
2532     AddOp = Instruction::Add;
2533     MulOp = Instruction::Mul;
2534   } else {
2535     AddOp = ID.getInductionOpcode();
2536     MulOp = Instruction::FMul;
2537   }
2538 
2539   // Determine the number of scalars we need to generate for each unroll
2540   // iteration. If EntryVal is uniform, we only need to generate the first
2541   // lane. Otherwise, we generate all VF values.
2542   bool IsUniform =
2543       Cost->isUniformAfterVectorization(cast<Instruction>(EntryVal), VF);
2544   unsigned Lanes = IsUniform ? 1 : VF.getKnownMinValue();
2545   // Compute the scalar steps and save the results in State.
2546   Type *IntStepTy = IntegerType::get(ScalarIVTy->getContext(),
2547                                      ScalarIVTy->getScalarSizeInBits());
2548   Type *VecIVTy = nullptr;
2549   Value *UnitStepVec = nullptr, *SplatStep = nullptr, *SplatIV = nullptr;
2550   if (!IsUniform && VF.isScalable()) {
2551     VecIVTy = VectorType::get(ScalarIVTy, VF);
2552     UnitStepVec = Builder.CreateStepVector(VectorType::get(IntStepTy, VF));
2553     SplatStep = Builder.CreateVectorSplat(VF, Step);
2554     SplatIV = Builder.CreateVectorSplat(VF, ScalarIV);
2555   }
2556 
2557   for (unsigned Part = 0; Part < UF; ++Part) {
2558     Value *StartIdx0 =
2559         createStepForVF(Builder, ConstantInt::get(IntStepTy, Part), VF);
2560 
2561     if (!IsUniform && VF.isScalable()) {
2562       auto *SplatStartIdx = Builder.CreateVectorSplat(VF, StartIdx0);
2563       auto *InitVec = Builder.CreateAdd(SplatStartIdx, UnitStepVec);
2564       if (ScalarIVTy->isFloatingPointTy())
2565         InitVec = Builder.CreateSIToFP(InitVec, VecIVTy);
2566       auto *Mul = Builder.CreateBinOp(MulOp, InitVec, SplatStep);
2567       auto *Add = Builder.CreateBinOp(AddOp, SplatIV, Mul);
2568       State.set(Def, Add, Part);
2569       recordVectorLoopValueForInductionCast(ID, EntryVal, Add, CastDef, State,
2570                                             Part);
2571       // It's useful to record the lane values too for the known minimum number
2572       // of elements so we do those below. This improves the code quality when
2573       // trying to extract the first element, for example.
2574     }
2575 
2576     if (ScalarIVTy->isFloatingPointTy())
2577       StartIdx0 = Builder.CreateSIToFP(StartIdx0, ScalarIVTy);
2578 
2579     for (unsigned Lane = 0; Lane < Lanes; ++Lane) {
2580       Value *StartIdx = Builder.CreateBinOp(
2581           AddOp, StartIdx0, getSignedIntOrFpConstant(ScalarIVTy, Lane));
2582       // The step returned by `createStepForVF` is a runtime-evaluated value
2583       // when VF is scalable. Otherwise, it should be folded into a Constant.
2584       assert((VF.isScalable() || isa<Constant>(StartIdx)) &&
2585              "Expected StartIdx to be folded to a constant when VF is not "
2586              "scalable");
2587       auto *Mul = Builder.CreateBinOp(MulOp, StartIdx, Step);
2588       auto *Add = Builder.CreateBinOp(AddOp, ScalarIV, Mul);
2589       State.set(Def, Add, VPIteration(Part, Lane));
2590       recordVectorLoopValueForInductionCast(ID, EntryVal, Add, CastDef, State,
2591                                             Part, Lane);
2592     }
2593   }
2594 }
2595 
2596 void InnerLoopVectorizer::packScalarIntoVectorValue(VPValue *Def,
2597                                                     const VPIteration &Instance,
2598                                                     VPTransformState &State) {
2599   Value *ScalarInst = State.get(Def, Instance);
2600   Value *VectorValue = State.get(Def, Instance.Part);
2601   VectorValue = Builder.CreateInsertElement(
2602       VectorValue, ScalarInst,
2603       Instance.Lane.getAsRuntimeExpr(State.Builder, VF));
2604   State.set(Def, VectorValue, Instance.Part);
2605 }
2606 
2607 Value *InnerLoopVectorizer::reverseVector(Value *Vec) {
2608   assert(Vec->getType()->isVectorTy() && "Invalid type");
2609   return Builder.CreateVectorReverse(Vec, "reverse");
2610 }
2611 
2612 // Return whether we allow using masked interleave-groups (for dealing with
2613 // strided loads/stores that reside in predicated blocks, or for dealing
2614 // with gaps).
2615 static bool useMaskedInterleavedAccesses(const TargetTransformInfo &TTI) {
2616   // If an override option has been passed in for interleaved accesses, use it.
2617   if (EnableMaskedInterleavedMemAccesses.getNumOccurrences() > 0)
2618     return EnableMaskedInterleavedMemAccesses;
2619 
2620   return TTI.enableMaskedInterleavedAccessVectorization();
2621 }
2622 
2623 // Try to vectorize the interleave group that \p Instr belongs to.
2624 //
2625 // E.g. Translate following interleaved load group (factor = 3):
2626 //   for (i = 0; i < N; i+=3) {
2627 //     R = Pic[i];             // Member of index 0
2628 //     G = Pic[i+1];           // Member of index 1
2629 //     B = Pic[i+2];           // Member of index 2
2630 //     ... // do something to R, G, B
2631 //   }
2632 // To:
2633 //   %wide.vec = load <12 x i32>                       ; Read 4 tuples of R,G,B
2634 //   %R.vec = shuffle %wide.vec, poison, <0, 3, 6, 9>   ; R elements
2635 //   %G.vec = shuffle %wide.vec, poison, <1, 4, 7, 10>  ; G elements
2636 //   %B.vec = shuffle %wide.vec, poison, <2, 5, 8, 11>  ; B elements
2637 //
2638 // Or translate following interleaved store group (factor = 3):
2639 //   for (i = 0; i < N; i+=3) {
2640 //     ... do something to R, G, B
2641 //     Pic[i]   = R;           // Member of index 0
2642 //     Pic[i+1] = G;           // Member of index 1
2643 //     Pic[i+2] = B;           // Member of index 2
2644 //   }
2645 // To:
2646 //   %R_G.vec = shuffle %R.vec, %G.vec, <0, 1, 2, ..., 7>
2647 //   %B_U.vec = shuffle %B.vec, poison, <0, 1, 2, 3, u, u, u, u>
2648 //   %interleaved.vec = shuffle %R_G.vec, %B_U.vec,
2649 //        <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11>    ; Interleave R,G,B elements
2650 //   store <12 x i32> %interleaved.vec              ; Write 4 tuples of R,G,B
2651 void InnerLoopVectorizer::vectorizeInterleaveGroup(
2652     const InterleaveGroup<Instruction> *Group, ArrayRef<VPValue *> VPDefs,
2653     VPTransformState &State, VPValue *Addr, ArrayRef<VPValue *> StoredValues,
2654     VPValue *BlockInMask) {
2655   Instruction *Instr = Group->getInsertPos();
2656   const DataLayout &DL = Instr->getModule()->getDataLayout();
2657 
2658   // Prepare for the vector type of the interleaved load/store.
2659   Type *ScalarTy = getMemInstValueType(Instr);
2660   unsigned InterleaveFactor = Group->getFactor();
2661   assert(!VF.isScalable() && "scalable vectors not yet supported.");
2662   auto *VecTy = VectorType::get(ScalarTy, VF * InterleaveFactor);
2663 
2664   // Prepare for the new pointers.
2665   SmallVector<Value *, 2> AddrParts;
2666   unsigned Index = Group->getIndex(Instr);
2667 
2668   // TODO: extend the masked interleaved-group support to reversed access.
2669   assert((!BlockInMask || !Group->isReverse()) &&
2670          "Reversed masked interleave-group not supported.");
2671 
2672   // If the group is reverse, adjust the index to refer to the last vector lane
2673   // instead of the first. We adjust the index from the first vector lane,
2674   // rather than directly getting the pointer for lane VF - 1, because the
2675   // pointer operand of the interleaved access is supposed to be uniform. For
2676   // uniform instructions, we're only required to generate a value for the
2677   // first vector lane in each unroll iteration.
2678   assert(!VF.isScalable() &&
2679          "scalable vector reverse operation is not implemented");
2680   if (Group->isReverse())
2681     Index += (VF.getKnownMinValue() - 1) * Group->getFactor();
2682 
2683   for (unsigned Part = 0; Part < UF; Part++) {
2684     Value *AddrPart = State.get(Addr, VPIteration(Part, 0));
2685     setDebugLocFromInst(Builder, AddrPart);
2686 
2687     // Notice current instruction could be any index. Need to adjust the address
2688     // to the member of index 0.
2689     //
2690     // E.g.  a = A[i+1];     // Member of index 1 (Current instruction)
2691     //       b = A[i];       // Member of index 0
2692     // Current pointer is pointed to A[i+1], adjust it to A[i].
2693     //
2694     // E.g.  A[i+1] = a;     // Member of index 1
2695     //       A[i]   = b;     // Member of index 0
2696     //       A[i+2] = c;     // Member of index 2 (Current instruction)
2697     // Current pointer is pointed to A[i+2], adjust it to A[i].
2698 
2699     bool InBounds = false;
2700     if (auto *gep = dyn_cast<GetElementPtrInst>(AddrPart->stripPointerCasts()))
2701       InBounds = gep->isInBounds();
2702     AddrPart = Builder.CreateGEP(ScalarTy, AddrPart, Builder.getInt32(-Index));
2703     cast<GetElementPtrInst>(AddrPart)->setIsInBounds(InBounds);
2704 
2705     // Cast to the vector pointer type.
2706     unsigned AddressSpace = AddrPart->getType()->getPointerAddressSpace();
2707     Type *PtrTy = VecTy->getPointerTo(AddressSpace);
2708     AddrParts.push_back(Builder.CreateBitCast(AddrPart, PtrTy));
2709   }
2710 
2711   setDebugLocFromInst(Builder, Instr);
2712   Value *PoisonVec = PoisonValue::get(VecTy);
2713 
2714   Value *MaskForGaps = nullptr;
2715   if (Group->requiresScalarEpilogue() && !Cost->isScalarEpilogueAllowed()) {
2716     assert(!VF.isScalable() && "scalable vectors not yet supported.");
2717     MaskForGaps = createBitMaskForGaps(Builder, VF.getKnownMinValue(), *Group);
2718     assert(MaskForGaps && "Mask for Gaps is required but it is null");
2719   }
2720 
2721   // Vectorize the interleaved load group.
2722   if (isa<LoadInst>(Instr)) {
2723     // For each unroll part, create a wide load for the group.
2724     SmallVector<Value *, 2> NewLoads;
2725     for (unsigned Part = 0; Part < UF; Part++) {
2726       Instruction *NewLoad;
2727       if (BlockInMask || MaskForGaps) {
2728         assert(useMaskedInterleavedAccesses(*TTI) &&
2729                "masked interleaved groups are not allowed.");
2730         Value *GroupMask = MaskForGaps;
2731         if (BlockInMask) {
2732           Value *BlockInMaskPart = State.get(BlockInMask, Part);
2733           assert(!VF.isScalable() && "scalable vectors not yet supported.");
2734           Value *ShuffledMask = Builder.CreateShuffleVector(
2735               BlockInMaskPart,
2736               createReplicatedMask(InterleaveFactor, VF.getKnownMinValue()),
2737               "interleaved.mask");
2738           GroupMask = MaskForGaps
2739                           ? Builder.CreateBinOp(Instruction::And, ShuffledMask,
2740                                                 MaskForGaps)
2741                           : ShuffledMask;
2742         }
2743         NewLoad =
2744             Builder.CreateMaskedLoad(AddrParts[Part], Group->getAlign(),
2745                                      GroupMask, PoisonVec, "wide.masked.vec");
2746       }
2747       else
2748         NewLoad = Builder.CreateAlignedLoad(VecTy, AddrParts[Part],
2749                                             Group->getAlign(), "wide.vec");
2750       Group->addMetadata(NewLoad);
2751       NewLoads.push_back(NewLoad);
2752     }
2753 
2754     // For each member in the group, shuffle out the appropriate data from the
2755     // wide loads.
2756     unsigned J = 0;
2757     for (unsigned I = 0; I < InterleaveFactor; ++I) {
2758       Instruction *Member = Group->getMember(I);
2759 
2760       // Skip the gaps in the group.
2761       if (!Member)
2762         continue;
2763 
2764       assert(!VF.isScalable() && "scalable vectors not yet supported.");
2765       auto StrideMask =
2766           createStrideMask(I, InterleaveFactor, VF.getKnownMinValue());
2767       for (unsigned Part = 0; Part < UF; Part++) {
2768         Value *StridedVec = Builder.CreateShuffleVector(
2769             NewLoads[Part], StrideMask, "strided.vec");
2770 
2771         // If this member has different type, cast the result type.
2772         if (Member->getType() != ScalarTy) {
2773           assert(!VF.isScalable() && "VF is assumed to be non scalable.");
2774           VectorType *OtherVTy = VectorType::get(Member->getType(), VF);
2775           StridedVec = createBitOrPointerCast(StridedVec, OtherVTy, DL);
2776         }
2777 
2778         if (Group->isReverse())
2779           StridedVec = reverseVector(StridedVec);
2780 
2781         State.set(VPDefs[J], StridedVec, Part);
2782       }
2783       ++J;
2784     }
2785     return;
2786   }
2787 
2788   // The sub vector type for current instruction.
2789   assert(!VF.isScalable() && "VF is assumed to be non scalable.");
2790   auto *SubVT = VectorType::get(ScalarTy, VF);
2791 
2792   // Vectorize the interleaved store group.
2793   for (unsigned Part = 0; Part < UF; Part++) {
2794     // Collect the stored vector from each member.
2795     SmallVector<Value *, 4> StoredVecs;
2796     for (unsigned i = 0; i < InterleaveFactor; i++) {
2797       // Interleaved store group doesn't allow a gap, so each index has a member
2798       assert(Group->getMember(i) && "Fail to get a member from an interleaved store group");
2799 
2800       Value *StoredVec = State.get(StoredValues[i], Part);
2801 
2802       if (Group->isReverse())
2803         StoredVec = reverseVector(StoredVec);
2804 
2805       // If this member has different type, cast it to a unified type.
2806 
2807       if (StoredVec->getType() != SubVT)
2808         StoredVec = createBitOrPointerCast(StoredVec, SubVT, DL);
2809 
2810       StoredVecs.push_back(StoredVec);
2811     }
2812 
2813     // Concatenate all vectors into a wide vector.
2814     Value *WideVec = concatenateVectors(Builder, StoredVecs);
2815 
2816     // Interleave the elements in the wide vector.
2817     assert(!VF.isScalable() && "scalable vectors not yet supported.");
2818     Value *IVec = Builder.CreateShuffleVector(
2819         WideVec, createInterleaveMask(VF.getKnownMinValue(), InterleaveFactor),
2820         "interleaved.vec");
2821 
2822     Instruction *NewStoreInstr;
2823     if (BlockInMask) {
2824       Value *BlockInMaskPart = State.get(BlockInMask, Part);
2825       Value *ShuffledMask = Builder.CreateShuffleVector(
2826           BlockInMaskPart,
2827           createReplicatedMask(InterleaveFactor, VF.getKnownMinValue()),
2828           "interleaved.mask");
2829       NewStoreInstr = Builder.CreateMaskedStore(
2830           IVec, AddrParts[Part], Group->getAlign(), ShuffledMask);
2831     }
2832     else
2833       NewStoreInstr =
2834           Builder.CreateAlignedStore(IVec, AddrParts[Part], Group->getAlign());
2835 
2836     Group->addMetadata(NewStoreInstr);
2837   }
2838 }
2839 
2840 void InnerLoopVectorizer::vectorizeMemoryInstruction(
2841     Instruction *Instr, VPTransformState &State, VPValue *Def, VPValue *Addr,
2842     VPValue *StoredValue, VPValue *BlockInMask) {
2843   // Attempt to issue a wide load.
2844   LoadInst *LI = dyn_cast<LoadInst>(Instr);
2845   StoreInst *SI = dyn_cast<StoreInst>(Instr);
2846 
2847   assert((LI || SI) && "Invalid Load/Store instruction");
2848   assert((!SI || StoredValue) && "No stored value provided for widened store");
2849   assert((!LI || !StoredValue) && "Stored value provided for widened load");
2850 
2851   LoopVectorizationCostModel::InstWidening Decision =
2852       Cost->getWideningDecision(Instr, VF);
2853   assert((Decision == LoopVectorizationCostModel::CM_Widen ||
2854           Decision == LoopVectorizationCostModel::CM_Widen_Reverse ||
2855           Decision == LoopVectorizationCostModel::CM_GatherScatter) &&
2856          "CM decision is not to widen the memory instruction");
2857 
2858   Type *ScalarDataTy = getMemInstValueType(Instr);
2859 
2860   auto *DataTy = VectorType::get(ScalarDataTy, VF);
2861   const Align Alignment = getLoadStoreAlignment(Instr);
2862 
2863   // Determine if the pointer operand of the access is either consecutive or
2864   // reverse consecutive.
2865   bool Reverse = (Decision == LoopVectorizationCostModel::CM_Widen_Reverse);
2866   bool ConsecutiveStride =
2867       Reverse || (Decision == LoopVectorizationCostModel::CM_Widen);
2868   bool CreateGatherScatter =
2869       (Decision == LoopVectorizationCostModel::CM_GatherScatter);
2870 
2871   // Either Ptr feeds a vector load/store, or a vector GEP should feed a vector
2872   // gather/scatter. Otherwise Decision should have been to Scalarize.
2873   assert((ConsecutiveStride || CreateGatherScatter) &&
2874          "The instruction should be scalarized");
2875   (void)ConsecutiveStride;
2876 
2877   VectorParts BlockInMaskParts(UF);
2878   bool isMaskRequired = BlockInMask;
2879   if (isMaskRequired)
2880     for (unsigned Part = 0; Part < UF; ++Part)
2881       BlockInMaskParts[Part] = State.get(BlockInMask, Part);
2882 
2883   const auto CreateVecPtr = [&](unsigned Part, Value *Ptr) -> Value * {
2884     // Calculate the pointer for the specific unroll-part.
2885     GetElementPtrInst *PartPtr = nullptr;
2886 
2887     bool InBounds = false;
2888     if (auto *gep = dyn_cast<GetElementPtrInst>(Ptr->stripPointerCasts()))
2889       InBounds = gep->isInBounds();
2890     if (Reverse) {
2891       // If the address is consecutive but reversed, then the
2892       // wide store needs to start at the last vector element.
2893       // RunTimeVF =  VScale * VF.getKnownMinValue()
2894       // For fixed-width VScale is 1, then RunTimeVF = VF.getKnownMinValue()
2895       Value *RunTimeVF = getRuntimeVF(Builder, Builder.getInt32Ty(), VF);
2896       // NumElt = -Part * RunTimeVF
2897       Value *NumElt = Builder.CreateMul(Builder.getInt32(-Part), RunTimeVF);
2898       // LastLane = 1 - RunTimeVF
2899       Value *LastLane = Builder.CreateSub(Builder.getInt32(1), RunTimeVF);
2900       PartPtr =
2901           cast<GetElementPtrInst>(Builder.CreateGEP(ScalarDataTy, Ptr, NumElt));
2902       PartPtr->setIsInBounds(InBounds);
2903       PartPtr = cast<GetElementPtrInst>(
2904           Builder.CreateGEP(ScalarDataTy, PartPtr, LastLane));
2905       PartPtr->setIsInBounds(InBounds);
2906       if (isMaskRequired) // Reverse of a null all-one mask is a null mask.
2907         BlockInMaskParts[Part] = reverseVector(BlockInMaskParts[Part]);
2908     } else {
2909       Value *Increment = createStepForVF(Builder, Builder.getInt32(Part), VF);
2910       PartPtr = cast<GetElementPtrInst>(
2911           Builder.CreateGEP(ScalarDataTy, Ptr, Increment));
2912       PartPtr->setIsInBounds(InBounds);
2913     }
2914 
2915     unsigned AddressSpace = Ptr->getType()->getPointerAddressSpace();
2916     return Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace));
2917   };
2918 
2919   // Handle Stores:
2920   if (SI) {
2921     setDebugLocFromInst(Builder, SI);
2922 
2923     for (unsigned Part = 0; Part < UF; ++Part) {
2924       Instruction *NewSI = nullptr;
2925       Value *StoredVal = State.get(StoredValue, Part);
2926       if (CreateGatherScatter) {
2927         Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr;
2928         Value *VectorGep = State.get(Addr, Part);
2929         NewSI = Builder.CreateMaskedScatter(StoredVal, VectorGep, Alignment,
2930                                             MaskPart);
2931       } else {
2932         if (Reverse) {
2933           // If we store to reverse consecutive memory locations, then we need
2934           // to reverse the order of elements in the stored value.
2935           StoredVal = reverseVector(StoredVal);
2936           // We don't want to update the value in the map as it might be used in
2937           // another expression. So don't call resetVectorValue(StoredVal).
2938         }
2939         auto *VecPtr = CreateVecPtr(Part, State.get(Addr, VPIteration(0, 0)));
2940         if (isMaskRequired)
2941           NewSI = Builder.CreateMaskedStore(StoredVal, VecPtr, Alignment,
2942                                             BlockInMaskParts[Part]);
2943         else
2944           NewSI = Builder.CreateAlignedStore(StoredVal, VecPtr, Alignment);
2945       }
2946       addMetadata(NewSI, SI);
2947     }
2948     return;
2949   }
2950 
2951   // Handle loads.
2952   assert(LI && "Must have a load instruction");
2953   setDebugLocFromInst(Builder, LI);
2954   for (unsigned Part = 0; Part < UF; ++Part) {
2955     Value *NewLI;
2956     if (CreateGatherScatter) {
2957       Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr;
2958       Value *VectorGep = State.get(Addr, Part);
2959       NewLI = Builder.CreateMaskedGather(VectorGep, Alignment, MaskPart,
2960                                          nullptr, "wide.masked.gather");
2961       addMetadata(NewLI, LI);
2962     } else {
2963       auto *VecPtr = CreateVecPtr(Part, State.get(Addr, VPIteration(0, 0)));
2964       if (isMaskRequired)
2965         NewLI = Builder.CreateMaskedLoad(
2966             VecPtr, Alignment, BlockInMaskParts[Part], PoisonValue::get(DataTy),
2967             "wide.masked.load");
2968       else
2969         NewLI =
2970             Builder.CreateAlignedLoad(DataTy, VecPtr, Alignment, "wide.load");
2971 
2972       // Add metadata to the load, but setVectorValue to the reverse shuffle.
2973       addMetadata(NewLI, LI);
2974       if (Reverse)
2975         NewLI = reverseVector(NewLI);
2976     }
2977 
2978     State.set(Def, NewLI, Part);
2979   }
2980 }
2981 
2982 void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr, VPValue *Def,
2983                                                VPUser &User,
2984                                                const VPIteration &Instance,
2985                                                bool IfPredicateInstr,
2986                                                VPTransformState &State) {
2987   assert(!Instr->getType()->isAggregateType() && "Can't handle vectors");
2988 
2989   // llvm.experimental.noalias.scope.decl intrinsics must only be duplicated for
2990   // the first lane and part.
2991   if (isa<NoAliasScopeDeclInst>(Instr))
2992     if (!Instance.isFirstIteration())
2993       return;
2994 
2995   setDebugLocFromInst(Builder, Instr);
2996 
2997   // Does this instruction return a value ?
2998   bool IsVoidRetTy = Instr->getType()->isVoidTy();
2999 
3000   Instruction *Cloned = Instr->clone();
3001   if (!IsVoidRetTy)
3002     Cloned->setName(Instr->getName() + ".cloned");
3003 
3004   State.Builder.SetInsertPoint(Builder.GetInsertBlock(),
3005                                Builder.GetInsertPoint());
3006   // Replace the operands of the cloned instructions with their scalar
3007   // equivalents in the new loop.
3008   for (unsigned op = 0, e = User.getNumOperands(); op != e; ++op) {
3009     auto *Operand = dyn_cast<Instruction>(Instr->getOperand(op));
3010     auto InputInstance = Instance;
3011     if (!Operand || !OrigLoop->contains(Operand) ||
3012         (Cost->isUniformAfterVectorization(Operand, State.VF)))
3013       InputInstance.Lane = VPLane::getFirstLane();
3014     auto *NewOp = State.get(User.getOperand(op), InputInstance);
3015     Cloned->setOperand(op, NewOp);
3016   }
3017   addNewMetadata(Cloned, Instr);
3018 
3019   // Place the cloned scalar in the new loop.
3020   Builder.Insert(Cloned);
3021 
3022   State.set(Def, Cloned, Instance);
3023 
3024   // If we just cloned a new assumption, add it the assumption cache.
3025   if (auto *II = dyn_cast<AssumeInst>(Cloned))
3026     AC->registerAssumption(II);
3027 
3028   // End if-block.
3029   if (IfPredicateInstr)
3030     PredicatedInstructions.push_back(Cloned);
3031 }
3032 
3033 PHINode *InnerLoopVectorizer::createInductionVariable(Loop *L, Value *Start,
3034                                                       Value *End, Value *Step,
3035                                                       Instruction *DL) {
3036   BasicBlock *Header = L->getHeader();
3037   BasicBlock *Latch = L->getLoopLatch();
3038   // As we're just creating this loop, it's possible no latch exists
3039   // yet. If so, use the header as this will be a single block loop.
3040   if (!Latch)
3041     Latch = Header;
3042 
3043   IRBuilder<> Builder(&*Header->getFirstInsertionPt());
3044   Instruction *OldInst = getDebugLocFromInstOrOperands(OldInduction);
3045   setDebugLocFromInst(Builder, OldInst);
3046   auto *Induction = Builder.CreatePHI(Start->getType(), 2, "index");
3047 
3048   Builder.SetInsertPoint(Latch->getTerminator());
3049   setDebugLocFromInst(Builder, OldInst);
3050 
3051   // Create i+1 and fill the PHINode.
3052   Value *Next = Builder.CreateAdd(Induction, Step, "index.next");
3053   Induction->addIncoming(Start, L->getLoopPreheader());
3054   Induction->addIncoming(Next, Latch);
3055   // Create the compare.
3056   Value *ICmp = Builder.CreateICmpEQ(Next, End);
3057   Builder.CreateCondBr(ICmp, L->getUniqueExitBlock(), Header);
3058 
3059   // Now we have two terminators. Remove the old one from the block.
3060   Latch->getTerminator()->eraseFromParent();
3061 
3062   return Induction;
3063 }
3064 
3065 Value *InnerLoopVectorizer::getOrCreateTripCount(Loop *L) {
3066   if (TripCount)
3067     return TripCount;
3068 
3069   assert(L && "Create Trip Count for null loop.");
3070   IRBuilder<> Builder(L->getLoopPreheader()->getTerminator());
3071   // Find the loop boundaries.
3072   ScalarEvolution *SE = PSE.getSE();
3073   const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount();
3074   assert(!isa<SCEVCouldNotCompute>(BackedgeTakenCount) &&
3075          "Invalid loop count");
3076 
3077   Type *IdxTy = Legal->getWidestInductionType();
3078   assert(IdxTy && "No type for induction");
3079 
3080   // The exit count might have the type of i64 while the phi is i32. This can
3081   // happen if we have an induction variable that is sign extended before the
3082   // compare. The only way that we get a backedge taken count is that the
3083   // induction variable was signed and as such will not overflow. In such a case
3084   // truncation is legal.
3085   if (SE->getTypeSizeInBits(BackedgeTakenCount->getType()) >
3086       IdxTy->getPrimitiveSizeInBits())
3087     BackedgeTakenCount = SE->getTruncateOrNoop(BackedgeTakenCount, IdxTy);
3088   BackedgeTakenCount = SE->getNoopOrZeroExtend(BackedgeTakenCount, IdxTy);
3089 
3090   // Get the total trip count from the count by adding 1.
3091   const SCEV *ExitCount = SE->getAddExpr(
3092       BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType()));
3093 
3094   const DataLayout &DL = L->getHeader()->getModule()->getDataLayout();
3095 
3096   // Expand the trip count and place the new instructions in the preheader.
3097   // Notice that the pre-header does not change, only the loop body.
3098   SCEVExpander Exp(*SE, DL, "induction");
3099 
3100   // Count holds the overall loop count (N).
3101   TripCount = Exp.expandCodeFor(ExitCount, ExitCount->getType(),
3102                                 L->getLoopPreheader()->getTerminator());
3103 
3104   if (TripCount->getType()->isPointerTy())
3105     TripCount =
3106         CastInst::CreatePointerCast(TripCount, IdxTy, "exitcount.ptrcnt.to.int",
3107                                     L->getLoopPreheader()->getTerminator());
3108 
3109   return TripCount;
3110 }
3111 
3112 Value *InnerLoopVectorizer::getOrCreateVectorTripCount(Loop *L) {
3113   if (VectorTripCount)
3114     return VectorTripCount;
3115 
3116   Value *TC = getOrCreateTripCount(L);
3117   IRBuilder<> Builder(L->getLoopPreheader()->getTerminator());
3118 
3119   Type *Ty = TC->getType();
3120   // This is where we can make the step a runtime constant.
3121   Value *Step = createStepForVF(Builder, ConstantInt::get(Ty, UF), VF);
3122 
3123   // If the tail is to be folded by masking, round the number of iterations N
3124   // up to a multiple of Step instead of rounding down. This is done by first
3125   // adding Step-1 and then rounding down. Note that it's ok if this addition
3126   // overflows: the vector induction variable will eventually wrap to zero given
3127   // that it starts at zero and its Step is a power of two; the loop will then
3128   // exit, with the last early-exit vector comparison also producing all-true.
3129   if (Cost->foldTailByMasking()) {
3130     assert(isPowerOf2_32(VF.getKnownMinValue() * UF) &&
3131            "VF*UF must be a power of 2 when folding tail by masking");
3132     assert(!VF.isScalable() &&
3133            "Tail folding not yet supported for scalable vectors");
3134     TC = Builder.CreateAdd(
3135         TC, ConstantInt::get(Ty, VF.getKnownMinValue() * UF - 1), "n.rnd.up");
3136   }
3137 
3138   // Now we need to generate the expression for the part of the loop that the
3139   // vectorized body will execute. This is equal to N - (N % Step) if scalar
3140   // iterations are not required for correctness, or N - Step, otherwise. Step
3141   // is equal to the vectorization factor (number of SIMD elements) times the
3142   // unroll factor (number of SIMD instructions).
3143   Value *R = Builder.CreateURem(TC, Step, "n.mod.vf");
3144 
3145   // There are two cases where we need to ensure (at least) the last iteration
3146   // runs in the scalar remainder loop. Thus, if the step evenly divides
3147   // the trip count, we set the remainder to be equal to the step. If the step
3148   // does not evenly divide the trip count, no adjustment is necessary since
3149   // there will already be scalar iterations. Note that the minimum iterations
3150   // check ensures that N >= Step. The cases are:
3151   // 1) If there is a non-reversed interleaved group that may speculatively
3152   //    access memory out-of-bounds.
3153   // 2) If any instruction may follow a conditionally taken exit. That is, if
3154   //    the loop contains multiple exiting blocks, or a single exiting block
3155   //    which is not the latch.
3156   if (VF.isVector() && Cost->requiresScalarEpilogue()) {
3157     auto *IsZero = Builder.CreateICmpEQ(R, ConstantInt::get(R->getType(), 0));
3158     R = Builder.CreateSelect(IsZero, Step, R);
3159   }
3160 
3161   VectorTripCount = Builder.CreateSub(TC, R, "n.vec");
3162 
3163   return VectorTripCount;
3164 }
3165 
3166 Value *InnerLoopVectorizer::createBitOrPointerCast(Value *V, VectorType *DstVTy,
3167                                                    const DataLayout &DL) {
3168   // Verify that V is a vector type with same number of elements as DstVTy.
3169   auto *DstFVTy = cast<FixedVectorType>(DstVTy);
3170   unsigned VF = DstFVTy->getNumElements();
3171   auto *SrcVecTy = cast<FixedVectorType>(V->getType());
3172   assert((VF == SrcVecTy->getNumElements()) && "Vector dimensions do not match");
3173   Type *SrcElemTy = SrcVecTy->getElementType();
3174   Type *DstElemTy = DstFVTy->getElementType();
3175   assert((DL.getTypeSizeInBits(SrcElemTy) == DL.getTypeSizeInBits(DstElemTy)) &&
3176          "Vector elements must have same size");
3177 
3178   // Do a direct cast if element types are castable.
3179   if (CastInst::isBitOrNoopPointerCastable(SrcElemTy, DstElemTy, DL)) {
3180     return Builder.CreateBitOrPointerCast(V, DstFVTy);
3181   }
3182   // V cannot be directly casted to desired vector type.
3183   // May happen when V is a floating point vector but DstVTy is a vector of
3184   // pointers or vice-versa. Handle this using a two-step bitcast using an
3185   // intermediate Integer type for the bitcast i.e. Ptr <-> Int <-> Float.
3186   assert((DstElemTy->isPointerTy() != SrcElemTy->isPointerTy()) &&
3187          "Only one type should be a pointer type");
3188   assert((DstElemTy->isFloatingPointTy() != SrcElemTy->isFloatingPointTy()) &&
3189          "Only one type should be a floating point type");
3190   Type *IntTy =
3191       IntegerType::getIntNTy(V->getContext(), DL.getTypeSizeInBits(SrcElemTy));
3192   auto *VecIntTy = FixedVectorType::get(IntTy, VF);
3193   Value *CastVal = Builder.CreateBitOrPointerCast(V, VecIntTy);
3194   return Builder.CreateBitOrPointerCast(CastVal, DstFVTy);
3195 }
3196 
3197 void InnerLoopVectorizer::emitMinimumIterationCountCheck(Loop *L,
3198                                                          BasicBlock *Bypass) {
3199   Value *Count = getOrCreateTripCount(L);
3200   // Reuse existing vector loop preheader for TC checks.
3201   // Note that new preheader block is generated for vector loop.
3202   BasicBlock *const TCCheckBlock = LoopVectorPreHeader;
3203   IRBuilder<> Builder(TCCheckBlock->getTerminator());
3204 
3205   // Generate code to check if the loop's trip count is less than VF * UF, or
3206   // equal to it in case a scalar epilogue is required; this implies that the
3207   // vector trip count is zero. This check also covers the case where adding one
3208   // to the backedge-taken count overflowed leading to an incorrect trip count
3209   // of zero. In this case we will also jump to the scalar loop.
3210   auto P = Cost->requiresScalarEpilogue() ? ICmpInst::ICMP_ULE
3211                                           : ICmpInst::ICMP_ULT;
3212 
3213   // If tail is to be folded, vector loop takes care of all iterations.
3214   Value *CheckMinIters = Builder.getFalse();
3215   if (!Cost->foldTailByMasking()) {
3216     Value *Step =
3217         createStepForVF(Builder, ConstantInt::get(Count->getType(), UF), VF);
3218     CheckMinIters = Builder.CreateICmp(P, Count, Step, "min.iters.check");
3219   }
3220   // Create new preheader for vector loop.
3221   LoopVectorPreHeader =
3222       SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(), DT, LI, nullptr,
3223                  "vector.ph");
3224 
3225   assert(DT->properlyDominates(DT->getNode(TCCheckBlock),
3226                                DT->getNode(Bypass)->getIDom()) &&
3227          "TC check is expected to dominate Bypass");
3228 
3229   // Update dominator for Bypass & LoopExit.
3230   DT->changeImmediateDominator(Bypass, TCCheckBlock);
3231   DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock);
3232 
3233   ReplaceInstWithInst(
3234       TCCheckBlock->getTerminator(),
3235       BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters));
3236   LoopBypassBlocks.push_back(TCCheckBlock);
3237 }
3238 
3239 BasicBlock *InnerLoopVectorizer::emitSCEVChecks(Loop *L, BasicBlock *Bypass) {
3240 
3241   BasicBlock *const SCEVCheckBlock =
3242       RTChecks.emitSCEVChecks(L, Bypass, LoopVectorPreHeader, LoopExitBlock);
3243   if (!SCEVCheckBlock)
3244     return nullptr;
3245 
3246   assert(!(SCEVCheckBlock->getParent()->hasOptSize() ||
3247            (OptForSizeBasedOnProfile &&
3248             Cost->Hints->getForce() != LoopVectorizeHints::FK_Enabled)) &&
3249          "Cannot SCEV check stride or overflow when optimizing for size");
3250 
3251 
3252   // Update dominator only if this is first RT check.
3253   if (LoopBypassBlocks.empty()) {
3254     DT->changeImmediateDominator(Bypass, SCEVCheckBlock);
3255     DT->changeImmediateDominator(LoopExitBlock, SCEVCheckBlock);
3256   }
3257 
3258   LoopBypassBlocks.push_back(SCEVCheckBlock);
3259   AddedSafetyChecks = true;
3260   return SCEVCheckBlock;
3261 }
3262 
3263 BasicBlock *InnerLoopVectorizer::emitMemRuntimeChecks(Loop *L,
3264                                                       BasicBlock *Bypass) {
3265   // VPlan-native path does not do any analysis for runtime checks currently.
3266   if (EnableVPlanNativePath)
3267     return nullptr;
3268 
3269   BasicBlock *const MemCheckBlock =
3270       RTChecks.emitMemRuntimeChecks(L, Bypass, LoopVectorPreHeader);
3271 
3272   // Check if we generated code that checks in runtime if arrays overlap. We put
3273   // the checks into a separate block to make the more common case of few
3274   // elements faster.
3275   if (!MemCheckBlock)
3276     return nullptr;
3277 
3278   if (MemCheckBlock->getParent()->hasOptSize() || OptForSizeBasedOnProfile) {
3279     assert(Cost->Hints->getForce() == LoopVectorizeHints::FK_Enabled &&
3280            "Cannot emit memory checks when optimizing for size, unless forced "
3281            "to vectorize.");
3282     ORE->emit([&]() {
3283       return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationCodeSize",
3284                                         L->getStartLoc(), L->getHeader())
3285              << "Code-size may be reduced by not forcing "
3286                 "vectorization, or by source-code modifications "
3287                 "eliminating the need for runtime checks "
3288                 "(e.g., adding 'restrict').";
3289     });
3290   }
3291 
3292   LoopBypassBlocks.push_back(MemCheckBlock);
3293 
3294   AddedSafetyChecks = true;
3295 
3296   // We currently don't use LoopVersioning for the actual loop cloning but we
3297   // still use it to add the noalias metadata.
3298   LVer = std::make_unique<LoopVersioning>(
3299       *Legal->getLAI(),
3300       Legal->getLAI()->getRuntimePointerChecking()->getChecks(), OrigLoop, LI,
3301       DT, PSE.getSE());
3302   LVer->prepareNoAliasMetadata();
3303   return MemCheckBlock;
3304 }
3305 
3306 Value *InnerLoopVectorizer::emitTransformedIndex(
3307     IRBuilder<> &B, Value *Index, ScalarEvolution *SE, const DataLayout &DL,
3308     const InductionDescriptor &ID) const {
3309 
3310   SCEVExpander Exp(*SE, DL, "induction");
3311   auto Step = ID.getStep();
3312   auto StartValue = ID.getStartValue();
3313   assert(Index->getType() == Step->getType() &&
3314          "Index type does not match StepValue type");
3315 
3316   // Note: the IR at this point is broken. We cannot use SE to create any new
3317   // SCEV and then expand it, hoping that SCEV's simplification will give us
3318   // a more optimal code. Unfortunately, attempt of doing so on invalid IR may
3319   // lead to various SCEV crashes. So all we can do is to use builder and rely
3320   // on InstCombine for future simplifications. Here we handle some trivial
3321   // cases only.
3322   auto CreateAdd = [&B](Value *X, Value *Y) {
3323     assert(X->getType() == Y->getType() && "Types don't match!");
3324     if (auto *CX = dyn_cast<ConstantInt>(X))
3325       if (CX->isZero())
3326         return Y;
3327     if (auto *CY = dyn_cast<ConstantInt>(Y))
3328       if (CY->isZero())
3329         return X;
3330     return B.CreateAdd(X, Y);
3331   };
3332 
3333   auto CreateMul = [&B](Value *X, Value *Y) {
3334     assert(X->getType() == Y->getType() && "Types don't match!");
3335     if (auto *CX = dyn_cast<ConstantInt>(X))
3336       if (CX->isOne())
3337         return Y;
3338     if (auto *CY = dyn_cast<ConstantInt>(Y))
3339       if (CY->isOne())
3340         return X;
3341     return B.CreateMul(X, Y);
3342   };
3343 
3344   // Get a suitable insert point for SCEV expansion. For blocks in the vector
3345   // loop, choose the end of the vector loop header (=LoopVectorBody), because
3346   // the DomTree is not kept up-to-date for additional blocks generated in the
3347   // vector loop. By using the header as insertion point, we guarantee that the
3348   // expanded instructions dominate all their uses.
3349   auto GetInsertPoint = [this, &B]() {
3350     BasicBlock *InsertBB = B.GetInsertPoint()->getParent();
3351     if (InsertBB != LoopVectorBody &&
3352         LI->getLoopFor(LoopVectorBody) == LI->getLoopFor(InsertBB))
3353       return LoopVectorBody->getTerminator();
3354     return &*B.GetInsertPoint();
3355   };
3356 
3357   switch (ID.getKind()) {
3358   case InductionDescriptor::IK_IntInduction: {
3359     assert(Index->getType() == StartValue->getType() &&
3360            "Index type does not match StartValue type");
3361     if (ID.getConstIntStepValue() && ID.getConstIntStepValue()->isMinusOne())
3362       return B.CreateSub(StartValue, Index);
3363     auto *Offset = CreateMul(
3364         Index, Exp.expandCodeFor(Step, Index->getType(), GetInsertPoint()));
3365     return CreateAdd(StartValue, Offset);
3366   }
3367   case InductionDescriptor::IK_PtrInduction: {
3368     assert(isa<SCEVConstant>(Step) &&
3369            "Expected constant step for pointer induction");
3370     return B.CreateGEP(
3371         StartValue->getType()->getPointerElementType(), StartValue,
3372         CreateMul(Index,
3373                   Exp.expandCodeFor(Step, Index->getType(), GetInsertPoint())));
3374   }
3375   case InductionDescriptor::IK_FpInduction: {
3376     assert(Step->getType()->isFloatingPointTy() && "Expected FP Step value");
3377     auto InductionBinOp = ID.getInductionBinOp();
3378     assert(InductionBinOp &&
3379            (InductionBinOp->getOpcode() == Instruction::FAdd ||
3380             InductionBinOp->getOpcode() == Instruction::FSub) &&
3381            "Original bin op should be defined for FP induction");
3382 
3383     Value *StepValue = cast<SCEVUnknown>(Step)->getValue();
3384     Value *MulExp = B.CreateFMul(StepValue, Index);
3385     return B.CreateBinOp(InductionBinOp->getOpcode(), StartValue, MulExp,
3386                          "induction");
3387   }
3388   case InductionDescriptor::IK_NoInduction:
3389     return nullptr;
3390   }
3391   llvm_unreachable("invalid enum");
3392 }
3393 
3394 Loop *InnerLoopVectorizer::createVectorLoopSkeleton(StringRef Prefix) {
3395   LoopScalarBody = OrigLoop->getHeader();
3396   LoopVectorPreHeader = OrigLoop->getLoopPreheader();
3397   LoopExitBlock = OrigLoop->getUniqueExitBlock();
3398   assert(LoopExitBlock && "Must have an exit block");
3399   assert(LoopVectorPreHeader && "Invalid loop structure");
3400 
3401   LoopMiddleBlock =
3402       SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT,
3403                  LI, nullptr, Twine(Prefix) + "middle.block");
3404   LoopScalarPreHeader =
3405       SplitBlock(LoopMiddleBlock, LoopMiddleBlock->getTerminator(), DT, LI,
3406                  nullptr, Twine(Prefix) + "scalar.ph");
3407 
3408   // Set up branch from middle block to the exit and scalar preheader blocks.
3409   // completeLoopSkeleton will update the condition to use an iteration check,
3410   // if required to decide whether to execute the remainder.
3411   BranchInst *BrInst =
3412       BranchInst::Create(LoopExitBlock, LoopScalarPreHeader, Builder.getTrue());
3413   auto *ScalarLatchTerm = OrigLoop->getLoopLatch()->getTerminator();
3414   BrInst->setDebugLoc(ScalarLatchTerm->getDebugLoc());
3415   ReplaceInstWithInst(LoopMiddleBlock->getTerminator(), BrInst);
3416 
3417   // We intentionally don't let SplitBlock to update LoopInfo since
3418   // LoopVectorBody should belong to another loop than LoopVectorPreHeader.
3419   // LoopVectorBody is explicitly added to the correct place few lines later.
3420   LoopVectorBody =
3421       SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT,
3422                  nullptr, nullptr, Twine(Prefix) + "vector.body");
3423 
3424   // Update dominator for loop exit.
3425   DT->changeImmediateDominator(LoopExitBlock, LoopMiddleBlock);
3426 
3427   // Create and register the new vector loop.
3428   Loop *Lp = LI->AllocateLoop();
3429   Loop *ParentLoop = OrigLoop->getParentLoop();
3430 
3431   // Insert the new loop into the loop nest and register the new basic blocks
3432   // before calling any utilities such as SCEV that require valid LoopInfo.
3433   if (ParentLoop) {
3434     ParentLoop->addChildLoop(Lp);
3435   } else {
3436     LI->addTopLevelLoop(Lp);
3437   }
3438   Lp->addBasicBlockToLoop(LoopVectorBody, *LI);
3439   return Lp;
3440 }
3441 
3442 void InnerLoopVectorizer::createInductionResumeValues(
3443     Loop *L, Value *VectorTripCount,
3444     std::pair<BasicBlock *, Value *> AdditionalBypass) {
3445   assert(VectorTripCount && L && "Expected valid arguments");
3446   assert(((AdditionalBypass.first && AdditionalBypass.second) ||
3447           (!AdditionalBypass.first && !AdditionalBypass.second)) &&
3448          "Inconsistent information about additional bypass.");
3449   // We are going to resume the execution of the scalar loop.
3450   // Go over all of the induction variables that we found and fix the
3451   // PHIs that are left in the scalar version of the loop.
3452   // The starting values of PHI nodes depend on the counter of the last
3453   // iteration in the vectorized loop.
3454   // If we come from a bypass edge then we need to start from the original
3455   // start value.
3456   for (auto &InductionEntry : Legal->getInductionVars()) {
3457     PHINode *OrigPhi = InductionEntry.first;
3458     InductionDescriptor II = InductionEntry.second;
3459 
3460     // Create phi nodes to merge from the  backedge-taken check block.
3461     PHINode *BCResumeVal =
3462         PHINode::Create(OrigPhi->getType(), 3, "bc.resume.val",
3463                         LoopScalarPreHeader->getTerminator());
3464     // Copy original phi DL over to the new one.
3465     BCResumeVal->setDebugLoc(OrigPhi->getDebugLoc());
3466     Value *&EndValue = IVEndValues[OrigPhi];
3467     Value *EndValueFromAdditionalBypass = AdditionalBypass.second;
3468     if (OrigPhi == OldInduction) {
3469       // We know what the end value is.
3470       EndValue = VectorTripCount;
3471     } else {
3472       IRBuilder<> B(L->getLoopPreheader()->getTerminator());
3473 
3474       // Fast-math-flags propagate from the original induction instruction.
3475       if (II.getInductionBinOp() && isa<FPMathOperator>(II.getInductionBinOp()))
3476         B.setFastMathFlags(II.getInductionBinOp()->getFastMathFlags());
3477 
3478       Type *StepType = II.getStep()->getType();
3479       Instruction::CastOps CastOp =
3480           CastInst::getCastOpcode(VectorTripCount, true, StepType, true);
3481       Value *CRD = B.CreateCast(CastOp, VectorTripCount, StepType, "cast.crd");
3482       const DataLayout &DL = LoopScalarBody->getModule()->getDataLayout();
3483       EndValue = emitTransformedIndex(B, CRD, PSE.getSE(), DL, II);
3484       EndValue->setName("ind.end");
3485 
3486       // Compute the end value for the additional bypass (if applicable).
3487       if (AdditionalBypass.first) {
3488         B.SetInsertPoint(&(*AdditionalBypass.first->getFirstInsertionPt()));
3489         CastOp = CastInst::getCastOpcode(AdditionalBypass.second, true,
3490                                          StepType, true);
3491         CRD =
3492             B.CreateCast(CastOp, AdditionalBypass.second, StepType, "cast.crd");
3493         EndValueFromAdditionalBypass =
3494             emitTransformedIndex(B, CRD, PSE.getSE(), DL, II);
3495         EndValueFromAdditionalBypass->setName("ind.end");
3496       }
3497     }
3498     // The new PHI merges the original incoming value, in case of a bypass,
3499     // or the value at the end of the vectorized loop.
3500     BCResumeVal->addIncoming(EndValue, LoopMiddleBlock);
3501 
3502     // Fix the scalar body counter (PHI node).
3503     // The old induction's phi node in the scalar body needs the truncated
3504     // value.
3505     for (BasicBlock *BB : LoopBypassBlocks)
3506       BCResumeVal->addIncoming(II.getStartValue(), BB);
3507 
3508     if (AdditionalBypass.first)
3509       BCResumeVal->setIncomingValueForBlock(AdditionalBypass.first,
3510                                             EndValueFromAdditionalBypass);
3511 
3512     OrigPhi->setIncomingValueForBlock(LoopScalarPreHeader, BCResumeVal);
3513   }
3514 }
3515 
3516 BasicBlock *InnerLoopVectorizer::completeLoopSkeleton(Loop *L,
3517                                                       MDNode *OrigLoopID) {
3518   assert(L && "Expected valid loop.");
3519 
3520   // The trip counts should be cached by now.
3521   Value *Count = getOrCreateTripCount(L);
3522   Value *VectorTripCount = getOrCreateVectorTripCount(L);
3523 
3524   auto *ScalarLatchTerm = OrigLoop->getLoopLatch()->getTerminator();
3525 
3526   // Add a check in the middle block to see if we have completed
3527   // all of the iterations in the first vector loop.
3528   // If (N - N%VF) == N, then we *don't* need to run the remainder.
3529   // If tail is to be folded, we know we don't need to run the remainder.
3530   if (!Cost->foldTailByMasking()) {
3531     Instruction *CmpN = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ,
3532                                         Count, VectorTripCount, "cmp.n",
3533                                         LoopMiddleBlock->getTerminator());
3534 
3535     // Here we use the same DebugLoc as the scalar loop latch terminator instead
3536     // of the corresponding compare because they may have ended up with
3537     // different line numbers and we want to avoid awkward line stepping while
3538     // debugging. Eg. if the compare has got a line number inside the loop.
3539     CmpN->setDebugLoc(ScalarLatchTerm->getDebugLoc());
3540     cast<BranchInst>(LoopMiddleBlock->getTerminator())->setCondition(CmpN);
3541   }
3542 
3543   // Get ready to start creating new instructions into the vectorized body.
3544   assert(LoopVectorPreHeader == L->getLoopPreheader() &&
3545          "Inconsistent vector loop preheader");
3546   Builder.SetInsertPoint(&*LoopVectorBody->getFirstInsertionPt());
3547 
3548   Optional<MDNode *> VectorizedLoopID =
3549       makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll,
3550                                       LLVMLoopVectorizeFollowupVectorized});
3551   if (VectorizedLoopID.hasValue()) {
3552     L->setLoopID(VectorizedLoopID.getValue());
3553 
3554     // Do not setAlreadyVectorized if loop attributes have been defined
3555     // explicitly.
3556     return LoopVectorPreHeader;
3557   }
3558 
3559   // Keep all loop hints from the original loop on the vector loop (we'll
3560   // replace the vectorizer-specific hints below).
3561   if (MDNode *LID = OrigLoop->getLoopID())
3562     L->setLoopID(LID);
3563 
3564   LoopVectorizeHints Hints(L, true, *ORE);
3565   Hints.setAlreadyVectorized();
3566 
3567 #ifdef EXPENSIVE_CHECKS
3568   assert(DT->verify(DominatorTree::VerificationLevel::Fast));
3569   LI->verify(*DT);
3570 #endif
3571 
3572   return LoopVectorPreHeader;
3573 }
3574 
3575 BasicBlock *InnerLoopVectorizer::createVectorizedLoopSkeleton() {
3576   /*
3577    In this function we generate a new loop. The new loop will contain
3578    the vectorized instructions while the old loop will continue to run the
3579    scalar remainder.
3580 
3581        [ ] <-- loop iteration number check.
3582     /   |
3583    /    v
3584   |    [ ] <-- vector loop bypass (may consist of multiple blocks).
3585   |  /  |
3586   | /   v
3587   ||   [ ]     <-- vector pre header.
3588   |/    |
3589   |     v
3590   |    [  ] \
3591   |    [  ]_|   <-- vector loop.
3592   |     |
3593   |     v
3594   |   -[ ]   <--- middle-block.
3595   |  /  |
3596   | /   v
3597   -|- >[ ]     <--- new preheader.
3598    |    |
3599    |    v
3600    |   [ ] \
3601    |   [ ]_|   <-- old scalar loop to handle remainder.
3602     \   |
3603      \  v
3604       >[ ]     <-- exit block.
3605    ...
3606    */
3607 
3608   // Get the metadata of the original loop before it gets modified.
3609   MDNode *OrigLoopID = OrigLoop->getLoopID();
3610 
3611   // Create an empty vector loop, and prepare basic blocks for the runtime
3612   // checks.
3613   Loop *Lp = createVectorLoopSkeleton("");
3614 
3615   // Now, compare the new count to zero. If it is zero skip the vector loop and
3616   // jump to the scalar loop. This check also covers the case where the
3617   // backedge-taken count is uint##_max: adding one to it will overflow leading
3618   // to an incorrect trip count of zero. In this (rare) case we will also jump
3619   // to the scalar loop.
3620   emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader);
3621 
3622   // Generate the code to check any assumptions that we've made for SCEV
3623   // expressions.
3624   emitSCEVChecks(Lp, LoopScalarPreHeader);
3625 
3626   // Generate the code that checks in runtime if arrays overlap. We put the
3627   // checks into a separate block to make the more common case of few elements
3628   // faster.
3629   emitMemRuntimeChecks(Lp, LoopScalarPreHeader);
3630 
3631   // Some loops have a single integer induction variable, while other loops
3632   // don't. One example is c++ iterators that often have multiple pointer
3633   // induction variables. In the code below we also support a case where we
3634   // don't have a single induction variable.
3635   //
3636   // We try to obtain an induction variable from the original loop as hard
3637   // as possible. However if we don't find one that:
3638   //   - is an integer
3639   //   - counts from zero, stepping by one
3640   //   - is the size of the widest induction variable type
3641   // then we create a new one.
3642   OldInduction = Legal->getPrimaryInduction();
3643   Type *IdxTy = Legal->getWidestInductionType();
3644   Value *StartIdx = ConstantInt::get(IdxTy, 0);
3645   // The loop step is equal to the vectorization factor (num of SIMD elements)
3646   // times the unroll factor (num of SIMD instructions).
3647   Builder.SetInsertPoint(&*Lp->getHeader()->getFirstInsertionPt());
3648   Value *Step = createStepForVF(Builder, ConstantInt::get(IdxTy, UF), VF);
3649   Value *CountRoundDown = getOrCreateVectorTripCount(Lp);
3650   Induction =
3651       createInductionVariable(Lp, StartIdx, CountRoundDown, Step,
3652                               getDebugLocFromInstOrOperands(OldInduction));
3653 
3654   // Emit phis for the new starting index of the scalar loop.
3655   createInductionResumeValues(Lp, CountRoundDown);
3656 
3657   return completeLoopSkeleton(Lp, OrigLoopID);
3658 }
3659 
3660 // Fix up external users of the induction variable. At this point, we are
3661 // in LCSSA form, with all external PHIs that use the IV having one input value,
3662 // coming from the remainder loop. We need those PHIs to also have a correct
3663 // value for the IV when arriving directly from the middle block.
3664 void InnerLoopVectorizer::fixupIVUsers(PHINode *OrigPhi,
3665                                        const InductionDescriptor &II,
3666                                        Value *CountRoundDown, Value *EndValue,
3667                                        BasicBlock *MiddleBlock) {
3668   // There are two kinds of external IV usages - those that use the value
3669   // computed in the last iteration (the PHI) and those that use the penultimate
3670   // value (the value that feeds into the phi from the loop latch).
3671   // We allow both, but they, obviously, have different values.
3672 
3673   assert(OrigLoop->getUniqueExitBlock() && "Expected a single exit block");
3674 
3675   DenseMap<Value *, Value *> MissingVals;
3676 
3677   // An external user of the last iteration's value should see the value that
3678   // the remainder loop uses to initialize its own IV.
3679   Value *PostInc = OrigPhi->getIncomingValueForBlock(OrigLoop->getLoopLatch());
3680   for (User *U : PostInc->users()) {
3681     Instruction *UI = cast<Instruction>(U);
3682     if (!OrigLoop->contains(UI)) {
3683       assert(isa<PHINode>(UI) && "Expected LCSSA form");
3684       MissingVals[UI] = EndValue;
3685     }
3686   }
3687 
3688   // An external user of the penultimate value need to see EndValue - Step.
3689   // The simplest way to get this is to recompute it from the constituent SCEVs,
3690   // that is Start + (Step * (CRD - 1)).
3691   for (User *U : OrigPhi->users()) {
3692     auto *UI = cast<Instruction>(U);
3693     if (!OrigLoop->contains(UI)) {
3694       const DataLayout &DL =
3695           OrigLoop->getHeader()->getModule()->getDataLayout();
3696       assert(isa<PHINode>(UI) && "Expected LCSSA form");
3697 
3698       IRBuilder<> B(MiddleBlock->getTerminator());
3699 
3700       // Fast-math-flags propagate from the original induction instruction.
3701       if (II.getInductionBinOp() && isa<FPMathOperator>(II.getInductionBinOp()))
3702         B.setFastMathFlags(II.getInductionBinOp()->getFastMathFlags());
3703 
3704       Value *CountMinusOne = B.CreateSub(
3705           CountRoundDown, ConstantInt::get(CountRoundDown->getType(), 1));
3706       Value *CMO =
3707           !II.getStep()->getType()->isIntegerTy()
3708               ? B.CreateCast(Instruction::SIToFP, CountMinusOne,
3709                              II.getStep()->getType())
3710               : B.CreateSExtOrTrunc(CountMinusOne, II.getStep()->getType());
3711       CMO->setName("cast.cmo");
3712       Value *Escape = emitTransformedIndex(B, CMO, PSE.getSE(), DL, II);
3713       Escape->setName("ind.escape");
3714       MissingVals[UI] = Escape;
3715     }
3716   }
3717 
3718   for (auto &I : MissingVals) {
3719     PHINode *PHI = cast<PHINode>(I.first);
3720     // One corner case we have to handle is two IVs "chasing" each-other,
3721     // that is %IV2 = phi [...], [ %IV1, %latch ]
3722     // In this case, if IV1 has an external use, we need to avoid adding both
3723     // "last value of IV1" and "penultimate value of IV2". So, verify that we
3724     // don't already have an incoming value for the middle block.
3725     if (PHI->getBasicBlockIndex(MiddleBlock) == -1)
3726       PHI->addIncoming(I.second, MiddleBlock);
3727   }
3728 }
3729 
3730 namespace {
3731 
3732 struct CSEDenseMapInfo {
3733   static bool canHandle(const Instruction *I) {
3734     return isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) ||
3735            isa<ShuffleVectorInst>(I) || isa<GetElementPtrInst>(I);
3736   }
3737 
3738   static inline Instruction *getEmptyKey() {
3739     return DenseMapInfo<Instruction *>::getEmptyKey();
3740   }
3741 
3742   static inline Instruction *getTombstoneKey() {
3743     return DenseMapInfo<Instruction *>::getTombstoneKey();
3744   }
3745 
3746   static unsigned getHashValue(const Instruction *I) {
3747     assert(canHandle(I) && "Unknown instruction!");
3748     return hash_combine(I->getOpcode(), hash_combine_range(I->value_op_begin(),
3749                                                            I->value_op_end()));
3750   }
3751 
3752   static bool isEqual(const Instruction *LHS, const Instruction *RHS) {
3753     if (LHS == getEmptyKey() || RHS == getEmptyKey() ||
3754         LHS == getTombstoneKey() || RHS == getTombstoneKey())
3755       return LHS == RHS;
3756     return LHS->isIdenticalTo(RHS);
3757   }
3758 };
3759 
3760 } // end anonymous namespace
3761 
3762 ///Perform cse of induction variable instructions.
3763 static void cse(BasicBlock *BB) {
3764   // Perform simple cse.
3765   SmallDenseMap<Instruction *, Instruction *, 4, CSEDenseMapInfo> CSEMap;
3766   for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) {
3767     Instruction *In = &*I++;
3768 
3769     if (!CSEDenseMapInfo::canHandle(In))
3770       continue;
3771 
3772     // Check if we can replace this instruction with any of the
3773     // visited instructions.
3774     if (Instruction *V = CSEMap.lookup(In)) {
3775       In->replaceAllUsesWith(V);
3776       In->eraseFromParent();
3777       continue;
3778     }
3779 
3780     CSEMap[In] = In;
3781   }
3782 }
3783 
3784 InstructionCost
3785 LoopVectorizationCostModel::getVectorCallCost(CallInst *CI, ElementCount VF,
3786                                               bool &NeedToScalarize) const {
3787   Function *F = CI->getCalledFunction();
3788   Type *ScalarRetTy = CI->getType();
3789   SmallVector<Type *, 4> Tys, ScalarTys;
3790   for (auto &ArgOp : CI->arg_operands())
3791     ScalarTys.push_back(ArgOp->getType());
3792 
3793   // Estimate cost of scalarized vector call. The source operands are assumed
3794   // to be vectors, so we need to extract individual elements from there,
3795   // execute VF scalar calls, and then gather the result into the vector return
3796   // value.
3797   InstructionCost ScalarCallCost =
3798       TTI.getCallInstrCost(F, ScalarRetTy, ScalarTys, TTI::TCK_RecipThroughput);
3799   if (VF.isScalar())
3800     return ScalarCallCost;
3801 
3802   // Compute corresponding vector type for return value and arguments.
3803   Type *RetTy = ToVectorTy(ScalarRetTy, VF);
3804   for (Type *ScalarTy : ScalarTys)
3805     Tys.push_back(ToVectorTy(ScalarTy, VF));
3806 
3807   // Compute costs of unpacking argument values for the scalar calls and
3808   // packing the return values to a vector.
3809   InstructionCost ScalarizationCost = getScalarizationOverhead(CI, VF);
3810 
3811   InstructionCost Cost =
3812       ScalarCallCost * VF.getKnownMinValue() + ScalarizationCost;
3813 
3814   // If we can't emit a vector call for this function, then the currently found
3815   // cost is the cost we need to return.
3816   NeedToScalarize = true;
3817   VFShape Shape = VFShape::get(*CI, VF, false /*HasGlobalPred*/);
3818   Function *VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape);
3819 
3820   if (!TLI || CI->isNoBuiltin() || !VecFunc)
3821     return Cost;
3822 
3823   // If the corresponding vector cost is cheaper, return its cost.
3824   InstructionCost VectorCallCost =
3825       TTI.getCallInstrCost(nullptr, RetTy, Tys, TTI::TCK_RecipThroughput);
3826   if (VectorCallCost < Cost) {
3827     NeedToScalarize = false;
3828     Cost = VectorCallCost;
3829   }
3830   return Cost;
3831 }
3832 
3833 static Type *MaybeVectorizeType(Type *Elt, ElementCount VF) {
3834   if (VF.isScalar() || (!Elt->isIntOrPtrTy() && !Elt->isFloatingPointTy()))
3835     return Elt;
3836   return VectorType::get(Elt, VF);
3837 }
3838 
3839 InstructionCost
3840 LoopVectorizationCostModel::getVectorIntrinsicCost(CallInst *CI,
3841                                                    ElementCount VF) const {
3842   Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
3843   assert(ID && "Expected intrinsic call!");
3844   Type *RetTy = MaybeVectorizeType(CI->getType(), VF);
3845   FastMathFlags FMF;
3846   if (auto *FPMO = dyn_cast<FPMathOperator>(CI))
3847     FMF = FPMO->getFastMathFlags();
3848 
3849   SmallVector<const Value *> Arguments(CI->arg_begin(), CI->arg_end());
3850   FunctionType *FTy = CI->getCalledFunction()->getFunctionType();
3851   SmallVector<Type *> ParamTys;
3852   std::transform(FTy->param_begin(), FTy->param_end(),
3853                  std::back_inserter(ParamTys),
3854                  [&](Type *Ty) { return MaybeVectorizeType(Ty, VF); });
3855 
3856   IntrinsicCostAttributes CostAttrs(ID, RetTy, Arguments, ParamTys, FMF,
3857                                     dyn_cast<IntrinsicInst>(CI));
3858   return TTI.getIntrinsicInstrCost(CostAttrs,
3859                                    TargetTransformInfo::TCK_RecipThroughput);
3860 }
3861 
3862 static Type *smallestIntegerVectorType(Type *T1, Type *T2) {
3863   auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType());
3864   auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType());
3865   return I1->getBitWidth() < I2->getBitWidth() ? T1 : T2;
3866 }
3867 
3868 static Type *largestIntegerVectorType(Type *T1, Type *T2) {
3869   auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType());
3870   auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType());
3871   return I1->getBitWidth() > I2->getBitWidth() ? T1 : T2;
3872 }
3873 
3874 void InnerLoopVectorizer::truncateToMinimalBitwidths(VPTransformState &State) {
3875   // For every instruction `I` in MinBWs, truncate the operands, create a
3876   // truncated version of `I` and reextend its result. InstCombine runs
3877   // later and will remove any ext/trunc pairs.
3878   SmallPtrSet<Value *, 4> Erased;
3879   for (const auto &KV : Cost->getMinimalBitwidths()) {
3880     // If the value wasn't vectorized, we must maintain the original scalar
3881     // type. The absence of the value from State indicates that it
3882     // wasn't vectorized.
3883     VPValue *Def = State.Plan->getVPValue(KV.first);
3884     if (!State.hasAnyVectorValue(Def))
3885       continue;
3886     for (unsigned Part = 0; Part < UF; ++Part) {
3887       Value *I = State.get(Def, Part);
3888       if (Erased.count(I) || I->use_empty() || !isa<Instruction>(I))
3889         continue;
3890       Type *OriginalTy = I->getType();
3891       Type *ScalarTruncatedTy =
3892           IntegerType::get(OriginalTy->getContext(), KV.second);
3893       auto *TruncatedTy = FixedVectorType::get(
3894           ScalarTruncatedTy,
3895           cast<FixedVectorType>(OriginalTy)->getNumElements());
3896       if (TruncatedTy == OriginalTy)
3897         continue;
3898 
3899       IRBuilder<> B(cast<Instruction>(I));
3900       auto ShrinkOperand = [&](Value *V) -> Value * {
3901         if (auto *ZI = dyn_cast<ZExtInst>(V))
3902           if (ZI->getSrcTy() == TruncatedTy)
3903             return ZI->getOperand(0);
3904         return B.CreateZExtOrTrunc(V, TruncatedTy);
3905       };
3906 
3907       // The actual instruction modification depends on the instruction type,
3908       // unfortunately.
3909       Value *NewI = nullptr;
3910       if (auto *BO = dyn_cast<BinaryOperator>(I)) {
3911         NewI = B.CreateBinOp(BO->getOpcode(), ShrinkOperand(BO->getOperand(0)),
3912                              ShrinkOperand(BO->getOperand(1)));
3913 
3914         // Any wrapping introduced by shrinking this operation shouldn't be
3915         // considered undefined behavior. So, we can't unconditionally copy
3916         // arithmetic wrapping flags to NewI.
3917         cast<BinaryOperator>(NewI)->copyIRFlags(I, /*IncludeWrapFlags=*/false);
3918       } else if (auto *CI = dyn_cast<ICmpInst>(I)) {
3919         NewI =
3920             B.CreateICmp(CI->getPredicate(), ShrinkOperand(CI->getOperand(0)),
3921                          ShrinkOperand(CI->getOperand(1)));
3922       } else if (auto *SI = dyn_cast<SelectInst>(I)) {
3923         NewI = B.CreateSelect(SI->getCondition(),
3924                               ShrinkOperand(SI->getTrueValue()),
3925                               ShrinkOperand(SI->getFalseValue()));
3926       } else if (auto *CI = dyn_cast<CastInst>(I)) {
3927         switch (CI->getOpcode()) {
3928         default:
3929           llvm_unreachable("Unhandled cast!");
3930         case Instruction::Trunc:
3931           NewI = ShrinkOperand(CI->getOperand(0));
3932           break;
3933         case Instruction::SExt:
3934           NewI = B.CreateSExtOrTrunc(
3935               CI->getOperand(0),
3936               smallestIntegerVectorType(OriginalTy, TruncatedTy));
3937           break;
3938         case Instruction::ZExt:
3939           NewI = B.CreateZExtOrTrunc(
3940               CI->getOperand(0),
3941               smallestIntegerVectorType(OriginalTy, TruncatedTy));
3942           break;
3943         }
3944       } else if (auto *SI = dyn_cast<ShuffleVectorInst>(I)) {
3945         auto Elements0 = cast<FixedVectorType>(SI->getOperand(0)->getType())
3946                              ->getNumElements();
3947         auto *O0 = B.CreateZExtOrTrunc(
3948             SI->getOperand(0),
3949             FixedVectorType::get(ScalarTruncatedTy, Elements0));
3950         auto Elements1 = cast<FixedVectorType>(SI->getOperand(1)->getType())
3951                              ->getNumElements();
3952         auto *O1 = B.CreateZExtOrTrunc(
3953             SI->getOperand(1),
3954             FixedVectorType::get(ScalarTruncatedTy, Elements1));
3955 
3956         NewI = B.CreateShuffleVector(O0, O1, SI->getShuffleMask());
3957       } else if (isa<LoadInst>(I) || isa<PHINode>(I)) {
3958         // Don't do anything with the operands, just extend the result.
3959         continue;
3960       } else if (auto *IE = dyn_cast<InsertElementInst>(I)) {
3961         auto Elements = cast<FixedVectorType>(IE->getOperand(0)->getType())
3962                             ->getNumElements();
3963         auto *O0 = B.CreateZExtOrTrunc(
3964             IE->getOperand(0),
3965             FixedVectorType::get(ScalarTruncatedTy, Elements));
3966         auto *O1 = B.CreateZExtOrTrunc(IE->getOperand(1), ScalarTruncatedTy);
3967         NewI = B.CreateInsertElement(O0, O1, IE->getOperand(2));
3968       } else if (auto *EE = dyn_cast<ExtractElementInst>(I)) {
3969         auto Elements = cast<FixedVectorType>(EE->getOperand(0)->getType())
3970                             ->getNumElements();
3971         auto *O0 = B.CreateZExtOrTrunc(
3972             EE->getOperand(0),
3973             FixedVectorType::get(ScalarTruncatedTy, Elements));
3974         NewI = B.CreateExtractElement(O0, EE->getOperand(2));
3975       } else {
3976         // If we don't know what to do, be conservative and don't do anything.
3977         continue;
3978       }
3979 
3980       // Lastly, extend the result.
3981       NewI->takeName(cast<Instruction>(I));
3982       Value *Res = B.CreateZExtOrTrunc(NewI, OriginalTy);
3983       I->replaceAllUsesWith(Res);
3984       cast<Instruction>(I)->eraseFromParent();
3985       Erased.insert(I);
3986       State.reset(Def, Res, Part);
3987     }
3988   }
3989 
3990   // We'll have created a bunch of ZExts that are now parentless. Clean up.
3991   for (const auto &KV : Cost->getMinimalBitwidths()) {
3992     // If the value wasn't vectorized, we must maintain the original scalar
3993     // type. The absence of the value from State indicates that it
3994     // wasn't vectorized.
3995     VPValue *Def = State.Plan->getVPValue(KV.first);
3996     if (!State.hasAnyVectorValue(Def))
3997       continue;
3998     for (unsigned Part = 0; Part < UF; ++Part) {
3999       Value *I = State.get(Def, Part);
4000       ZExtInst *Inst = dyn_cast<ZExtInst>(I);
4001       if (Inst && Inst->use_empty()) {
4002         Value *NewI = Inst->getOperand(0);
4003         Inst->eraseFromParent();
4004         State.reset(Def, NewI, Part);
4005       }
4006     }
4007   }
4008 }
4009 
4010 void InnerLoopVectorizer::fixVectorizedLoop(VPTransformState &State) {
4011   // Insert truncates and extends for any truncated instructions as hints to
4012   // InstCombine.
4013   if (VF.isVector())
4014     truncateToMinimalBitwidths(State);
4015 
4016   // Fix widened non-induction PHIs by setting up the PHI operands.
4017   if (OrigPHIsToFix.size()) {
4018     assert(EnableVPlanNativePath &&
4019            "Unexpected non-induction PHIs for fixup in non VPlan-native path");
4020     fixNonInductionPHIs(State);
4021   }
4022 
4023   // At this point every instruction in the original loop is widened to a
4024   // vector form. Now we need to fix the recurrences in the loop. These PHI
4025   // nodes are currently empty because we did not want to introduce cycles.
4026   // This is the second stage of vectorizing recurrences.
4027   fixCrossIterationPHIs(State);
4028 
4029   // Forget the original basic block.
4030   PSE.getSE()->forgetLoop(OrigLoop);
4031 
4032   // Fix-up external users of the induction variables.
4033   for (auto &Entry : Legal->getInductionVars())
4034     fixupIVUsers(Entry.first, Entry.second,
4035                  getOrCreateVectorTripCount(LI->getLoopFor(LoopVectorBody)),
4036                  IVEndValues[Entry.first], LoopMiddleBlock);
4037 
4038   fixLCSSAPHIs(State);
4039   for (Instruction *PI : PredicatedInstructions)
4040     sinkScalarOperands(&*PI);
4041 
4042   // Remove redundant induction instructions.
4043   cse(LoopVectorBody);
4044 
4045   // Set/update profile weights for the vector and remainder loops as original
4046   // loop iterations are now distributed among them. Note that original loop
4047   // represented by LoopScalarBody becomes remainder loop after vectorization.
4048   //
4049   // For cases like foldTailByMasking() and requiresScalarEpiloque() we may
4050   // end up getting slightly roughened result but that should be OK since
4051   // profile is not inherently precise anyway. Note also possible bypass of
4052   // vector code caused by legality checks is ignored, assigning all the weight
4053   // to the vector loop, optimistically.
4054   //
4055   // For scalable vectorization we can't know at compile time how many iterations
4056   // of the loop are handled in one vector iteration, so instead assume a pessimistic
4057   // vscale of '1'.
4058   setProfileInfoAfterUnrolling(
4059       LI->getLoopFor(LoopScalarBody), LI->getLoopFor(LoopVectorBody),
4060       LI->getLoopFor(LoopScalarBody), VF.getKnownMinValue() * UF);
4061 }
4062 
4063 void InnerLoopVectorizer::fixCrossIterationPHIs(VPTransformState &State) {
4064   // In order to support recurrences we need to be able to vectorize Phi nodes.
4065   // Phi nodes have cycles, so we need to vectorize them in two stages. This is
4066   // stage #2: We now need to fix the recurrences by adding incoming edges to
4067   // the currently empty PHI nodes. At this point every instruction in the
4068   // original loop is widened to a vector form so we can use them to construct
4069   // the incoming edges.
4070   for (PHINode &Phi : OrigLoop->getHeader()->phis()) {
4071     // Handle first-order recurrences and reductions that need to be fixed.
4072     if (Legal->isFirstOrderRecurrence(&Phi))
4073       fixFirstOrderRecurrence(&Phi, State);
4074     else if (Legal->isReductionVariable(&Phi))
4075       fixReduction(&Phi, State);
4076   }
4077 }
4078 
4079 void InnerLoopVectorizer::fixFirstOrderRecurrence(PHINode *Phi,
4080                                                   VPTransformState &State) {
4081   // This is the second phase of vectorizing first-order recurrences. An
4082   // overview of the transformation is described below. Suppose we have the
4083   // following loop.
4084   //
4085   //   for (int i = 0; i < n; ++i)
4086   //     b[i] = a[i] - a[i - 1];
4087   //
4088   // There is a first-order recurrence on "a". For this loop, the shorthand
4089   // scalar IR looks like:
4090   //
4091   //   scalar.ph:
4092   //     s_init = a[-1]
4093   //     br scalar.body
4094   //
4095   //   scalar.body:
4096   //     i = phi [0, scalar.ph], [i+1, scalar.body]
4097   //     s1 = phi [s_init, scalar.ph], [s2, scalar.body]
4098   //     s2 = a[i]
4099   //     b[i] = s2 - s1
4100   //     br cond, scalar.body, ...
4101   //
4102   // In this example, s1 is a recurrence because it's value depends on the
4103   // previous iteration. In the first phase of vectorization, we created a
4104   // temporary value for s1. We now complete the vectorization and produce the
4105   // shorthand vector IR shown below (for VF = 4, UF = 1).
4106   //
4107   //   vector.ph:
4108   //     v_init = vector(..., ..., ..., a[-1])
4109   //     br vector.body
4110   //
4111   //   vector.body
4112   //     i = phi [0, vector.ph], [i+4, vector.body]
4113   //     v1 = phi [v_init, vector.ph], [v2, vector.body]
4114   //     v2 = a[i, i+1, i+2, i+3];
4115   //     v3 = vector(v1(3), v2(0, 1, 2))
4116   //     b[i, i+1, i+2, i+3] = v2 - v3
4117   //     br cond, vector.body, middle.block
4118   //
4119   //   middle.block:
4120   //     x = v2(3)
4121   //     br scalar.ph
4122   //
4123   //   scalar.ph:
4124   //     s_init = phi [x, middle.block], [a[-1], otherwise]
4125   //     br scalar.body
4126   //
4127   // After execution completes the vector loop, we extract the next value of
4128   // the recurrence (x) to use as the initial value in the scalar loop.
4129 
4130   // Get the original loop preheader and single loop latch.
4131   auto *Preheader = OrigLoop->getLoopPreheader();
4132   auto *Latch = OrigLoop->getLoopLatch();
4133 
4134   // Get the initial and previous values of the scalar recurrence.
4135   auto *ScalarInit = Phi->getIncomingValueForBlock(Preheader);
4136   auto *Previous = Phi->getIncomingValueForBlock(Latch);
4137 
4138   // Create a vector from the initial value.
4139   auto *VectorInit = ScalarInit;
4140   if (VF.isVector()) {
4141     Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
4142     assert(!VF.isScalable() && "VF is assumed to be non scalable.");
4143     VectorInit = Builder.CreateInsertElement(
4144         PoisonValue::get(VectorType::get(VectorInit->getType(), VF)), VectorInit,
4145         Builder.getInt32(VF.getKnownMinValue() - 1), "vector.recur.init");
4146   }
4147 
4148   VPValue *PhiDef = State.Plan->getVPValue(Phi);
4149   VPValue *PreviousDef = State.Plan->getVPValue(Previous);
4150   // We constructed a temporary phi node in the first phase of vectorization.
4151   // This phi node will eventually be deleted.
4152   Builder.SetInsertPoint(cast<Instruction>(State.get(PhiDef, 0)));
4153 
4154   // Create a phi node for the new recurrence. The current value will either be
4155   // the initial value inserted into a vector or loop-varying vector value.
4156   auto *VecPhi = Builder.CreatePHI(VectorInit->getType(), 2, "vector.recur");
4157   VecPhi->addIncoming(VectorInit, LoopVectorPreHeader);
4158 
4159   // Get the vectorized previous value of the last part UF - 1. It appears last
4160   // among all unrolled iterations, due to the order of their construction.
4161   Value *PreviousLastPart = State.get(PreviousDef, UF - 1);
4162 
4163   // Find and set the insertion point after the previous value if it is an
4164   // instruction.
4165   BasicBlock::iterator InsertPt;
4166   // Note that the previous value may have been constant-folded so it is not
4167   // guaranteed to be an instruction in the vector loop.
4168   // FIXME: Loop invariant values do not form recurrences. We should deal with
4169   //        them earlier.
4170   if (LI->getLoopFor(LoopVectorBody)->isLoopInvariant(PreviousLastPart))
4171     InsertPt = LoopVectorBody->getFirstInsertionPt();
4172   else {
4173     Instruction *PreviousInst = cast<Instruction>(PreviousLastPart);
4174     if (isa<PHINode>(PreviousLastPart))
4175       // If the previous value is a phi node, we should insert after all the phi
4176       // nodes in the block containing the PHI to avoid breaking basic block
4177       // verification. Note that the basic block may be different to
4178       // LoopVectorBody, in case we predicate the loop.
4179       InsertPt = PreviousInst->getParent()->getFirstInsertionPt();
4180     else
4181       InsertPt = ++PreviousInst->getIterator();
4182   }
4183   Builder.SetInsertPoint(&*InsertPt);
4184 
4185   // We will construct a vector for the recurrence by combining the values for
4186   // the current and previous iterations. This is the required shuffle mask.
4187   assert(!VF.isScalable());
4188   SmallVector<int, 8> ShuffleMask(VF.getKnownMinValue());
4189   ShuffleMask[0] = VF.getKnownMinValue() - 1;
4190   for (unsigned I = 1; I < VF.getKnownMinValue(); ++I)
4191     ShuffleMask[I] = I + VF.getKnownMinValue() - 1;
4192 
4193   // The vector from which to take the initial value for the current iteration
4194   // (actual or unrolled). Initially, this is the vector phi node.
4195   Value *Incoming = VecPhi;
4196 
4197   // Shuffle the current and previous vector and update the vector parts.
4198   for (unsigned Part = 0; Part < UF; ++Part) {
4199     Value *PreviousPart = State.get(PreviousDef, Part);
4200     Value *PhiPart = State.get(PhiDef, Part);
4201     auto *Shuffle =
4202         VF.isVector()
4203             ? Builder.CreateShuffleVector(Incoming, PreviousPart, ShuffleMask)
4204             : Incoming;
4205     PhiPart->replaceAllUsesWith(Shuffle);
4206     cast<Instruction>(PhiPart)->eraseFromParent();
4207     State.reset(PhiDef, Shuffle, Part);
4208     Incoming = PreviousPart;
4209   }
4210 
4211   // Fix the latch value of the new recurrence in the vector loop.
4212   VecPhi->addIncoming(Incoming, LI->getLoopFor(LoopVectorBody)->getLoopLatch());
4213 
4214   // Extract the last vector element in the middle block. This will be the
4215   // initial value for the recurrence when jumping to the scalar loop.
4216   auto *ExtractForScalar = Incoming;
4217   if (VF.isVector()) {
4218     Builder.SetInsertPoint(LoopMiddleBlock->getTerminator());
4219     ExtractForScalar = Builder.CreateExtractElement(
4220         ExtractForScalar, Builder.getInt32(VF.getKnownMinValue() - 1),
4221         "vector.recur.extract");
4222   }
4223   // Extract the second last element in the middle block if the
4224   // Phi is used outside the loop. We need to extract the phi itself
4225   // and not the last element (the phi update in the current iteration). This
4226   // will be the value when jumping to the exit block from the LoopMiddleBlock,
4227   // when the scalar loop is not run at all.
4228   Value *ExtractForPhiUsedOutsideLoop = nullptr;
4229   if (VF.isVector())
4230     ExtractForPhiUsedOutsideLoop = Builder.CreateExtractElement(
4231         Incoming, Builder.getInt32(VF.getKnownMinValue() - 2),
4232         "vector.recur.extract.for.phi");
4233   // When loop is unrolled without vectorizing, initialize
4234   // ExtractForPhiUsedOutsideLoop with the value just prior to unrolled value of
4235   // `Incoming`. This is analogous to the vectorized case above: extracting the
4236   // second last element when VF > 1.
4237   else if (UF > 1)
4238     ExtractForPhiUsedOutsideLoop = State.get(PreviousDef, UF - 2);
4239 
4240   // Fix the initial value of the original recurrence in the scalar loop.
4241   Builder.SetInsertPoint(&*LoopScalarPreHeader->begin());
4242   auto *Start = Builder.CreatePHI(Phi->getType(), 2, "scalar.recur.init");
4243   for (auto *BB : predecessors(LoopScalarPreHeader)) {
4244     auto *Incoming = BB == LoopMiddleBlock ? ExtractForScalar : ScalarInit;
4245     Start->addIncoming(Incoming, BB);
4246   }
4247 
4248   Phi->setIncomingValueForBlock(LoopScalarPreHeader, Start);
4249   Phi->setName("scalar.recur");
4250 
4251   // Finally, fix users of the recurrence outside the loop. The users will need
4252   // either the last value of the scalar recurrence or the last value of the
4253   // vector recurrence we extracted in the middle block. Since the loop is in
4254   // LCSSA form, we just need to find all the phi nodes for the original scalar
4255   // recurrence in the exit block, and then add an edge for the middle block.
4256   // Note that LCSSA does not imply single entry when the original scalar loop
4257   // had multiple exiting edges (as we always run the last iteration in the
4258   // scalar epilogue); in that case, the exiting path through middle will be
4259   // dynamically dead and the value picked for the phi doesn't matter.
4260   for (PHINode &LCSSAPhi : LoopExitBlock->phis())
4261     if (any_of(LCSSAPhi.incoming_values(),
4262                [Phi](Value *V) { return V == Phi; }))
4263       LCSSAPhi.addIncoming(ExtractForPhiUsedOutsideLoop, LoopMiddleBlock);
4264 }
4265 
4266 static bool useOrderedReductions(RecurrenceDescriptor &RdxDesc) {
4267   return EnableStrictReductions && RdxDesc.isOrdered();
4268 }
4269 
4270 void InnerLoopVectorizer::fixReduction(PHINode *Phi, VPTransformState &State) {
4271   // Get it's reduction variable descriptor.
4272   assert(Legal->isReductionVariable(Phi) &&
4273          "Unable to find the reduction variable");
4274   RecurrenceDescriptor RdxDesc = Legal->getReductionVars()[Phi];
4275 
4276   RecurKind RK = RdxDesc.getRecurrenceKind();
4277   TrackingVH<Value> ReductionStartValue = RdxDesc.getRecurrenceStartValue();
4278   Instruction *LoopExitInst = RdxDesc.getLoopExitInstr();
4279   setDebugLocFromInst(Builder, ReductionStartValue);
4280   bool IsInLoopReductionPhi = Cost->isInLoopReduction(Phi);
4281 
4282   VPValue *LoopExitInstDef = State.Plan->getVPValue(LoopExitInst);
4283   // This is the vector-clone of the value that leaves the loop.
4284   Type *VecTy = State.get(LoopExitInstDef, 0)->getType();
4285 
4286   // Wrap flags are in general invalid after vectorization, clear them.
4287   clearReductionWrapFlags(RdxDesc, State);
4288 
4289   // Fix the vector-loop phi.
4290 
4291   // Reductions do not have to start at zero. They can start with
4292   // any loop invariant values.
4293   BasicBlock *Latch = OrigLoop->getLoopLatch();
4294   Value *LoopVal = Phi->getIncomingValueForBlock(Latch);
4295 
4296   for (unsigned Part = 0; Part < UF; ++Part) {
4297     Value *VecRdxPhi = State.get(State.Plan->getVPValue(Phi), Part);
4298     Value *Val = State.get(State.Plan->getVPValue(LoopVal), Part);
4299     if (IsInLoopReductionPhi && useOrderedReductions(RdxDesc) &&
4300         State.VF.isVector())
4301       Val = State.get(State.Plan->getVPValue(LoopVal), UF - 1);
4302     cast<PHINode>(VecRdxPhi)
4303       ->addIncoming(Val, LI->getLoopFor(LoopVectorBody)->getLoopLatch());
4304   }
4305 
4306   // Before each round, move the insertion point right between
4307   // the PHIs and the values we are going to write.
4308   // This allows us to write both PHINodes and the extractelement
4309   // instructions.
4310   Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt());
4311 
4312   setDebugLocFromInst(Builder, LoopExitInst);
4313 
4314   Type *PhiTy = Phi->getType();
4315   // If tail is folded by masking, the vector value to leave the loop should be
4316   // a Select choosing between the vectorized LoopExitInst and vectorized Phi,
4317   // instead of the former. For an inloop reduction the reduction will already
4318   // be predicated, and does not need to be handled here.
4319   if (Cost->foldTailByMasking() && !IsInLoopReductionPhi) {
4320     for (unsigned Part = 0; Part < UF; ++Part) {
4321       Value *VecLoopExitInst = State.get(LoopExitInstDef, Part);
4322       Value *Sel = nullptr;
4323       for (User *U : VecLoopExitInst->users()) {
4324         if (isa<SelectInst>(U)) {
4325           assert(!Sel && "Reduction exit feeding two selects");
4326           Sel = U;
4327         } else
4328           assert(isa<PHINode>(U) && "Reduction exit must feed Phi's or select");
4329       }
4330       assert(Sel && "Reduction exit feeds no select");
4331       State.reset(LoopExitInstDef, Sel, Part);
4332 
4333       // If the target can create a predicated operator for the reduction at no
4334       // extra cost in the loop (for example a predicated vadd), it can be
4335       // cheaper for the select to remain in the loop than be sunk out of it,
4336       // and so use the select value for the phi instead of the old
4337       // LoopExitValue.
4338       if (PreferPredicatedReductionSelect ||
4339           TTI->preferPredicatedReductionSelect(
4340               RdxDesc.getOpcode(), PhiTy,
4341               TargetTransformInfo::ReductionFlags())) {
4342         auto *VecRdxPhi =
4343             cast<PHINode>(State.get(State.Plan->getVPValue(Phi), Part));
4344         VecRdxPhi->setIncomingValueForBlock(
4345             LI->getLoopFor(LoopVectorBody)->getLoopLatch(), Sel);
4346       }
4347     }
4348   }
4349 
4350   // If the vector reduction can be performed in a smaller type, we truncate
4351   // then extend the loop exit value to enable InstCombine to evaluate the
4352   // entire expression in the smaller type.
4353   if (VF.isVector() && PhiTy != RdxDesc.getRecurrenceType()) {
4354     assert(!IsInLoopReductionPhi && "Unexpected truncated inloop reduction!");
4355     assert(!VF.isScalable() && "scalable vectors not yet supported.");
4356     Type *RdxVecTy = VectorType::get(RdxDesc.getRecurrenceType(), VF);
4357     Builder.SetInsertPoint(
4358         LI->getLoopFor(LoopVectorBody)->getLoopLatch()->getTerminator());
4359     VectorParts RdxParts(UF);
4360     for (unsigned Part = 0; Part < UF; ++Part) {
4361       RdxParts[Part] = State.get(LoopExitInstDef, Part);
4362       Value *Trunc = Builder.CreateTrunc(RdxParts[Part], RdxVecTy);
4363       Value *Extnd = RdxDesc.isSigned() ? Builder.CreateSExt(Trunc, VecTy)
4364                                         : Builder.CreateZExt(Trunc, VecTy);
4365       for (Value::user_iterator UI = RdxParts[Part]->user_begin();
4366            UI != RdxParts[Part]->user_end();)
4367         if (*UI != Trunc) {
4368           (*UI++)->replaceUsesOfWith(RdxParts[Part], Extnd);
4369           RdxParts[Part] = Extnd;
4370         } else {
4371           ++UI;
4372         }
4373     }
4374     Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt());
4375     for (unsigned Part = 0; Part < UF; ++Part) {
4376       RdxParts[Part] = Builder.CreateTrunc(RdxParts[Part], RdxVecTy);
4377       State.reset(LoopExitInstDef, RdxParts[Part], Part);
4378     }
4379   }
4380 
4381   // Reduce all of the unrolled parts into a single vector.
4382   Value *ReducedPartRdx = State.get(LoopExitInstDef, 0);
4383   unsigned Op = RecurrenceDescriptor::getOpcode(RK);
4384 
4385   // The middle block terminator has already been assigned a DebugLoc here (the
4386   // OrigLoop's single latch terminator). We want the whole middle block to
4387   // appear to execute on this line because: (a) it is all compiler generated,
4388   // (b) these instructions are always executed after evaluating the latch
4389   // conditional branch, and (c) other passes may add new predecessors which
4390   // terminate on this line. This is the easiest way to ensure we don't
4391   // accidentally cause an extra step back into the loop while debugging.
4392   setDebugLocFromInst(Builder, LoopMiddleBlock->getTerminator());
4393   if (IsInLoopReductionPhi && useOrderedReductions(RdxDesc))
4394     ReducedPartRdx = State.get(LoopExitInstDef, UF - 1);
4395   else {
4396     // Floating-point operations should have some FMF to enable the reduction.
4397     IRBuilderBase::FastMathFlagGuard FMFG(Builder);
4398     Builder.setFastMathFlags(RdxDesc.getFastMathFlags());
4399     for (unsigned Part = 1; Part < UF; ++Part) {
4400       Value *RdxPart = State.get(LoopExitInstDef, Part);
4401       if (Op != Instruction::ICmp && Op != Instruction::FCmp) {
4402         ReducedPartRdx = Builder.CreateBinOp(
4403             (Instruction::BinaryOps)Op, RdxPart, ReducedPartRdx, "bin.rdx");
4404       } else {
4405         ReducedPartRdx = createMinMaxOp(Builder, RK, ReducedPartRdx, RdxPart);
4406       }
4407     }
4408   }
4409 
4410   // Create the reduction after the loop. Note that inloop reductions create the
4411   // target reduction in the loop using a Reduction recipe.
4412   if (VF.isVector() && !IsInLoopReductionPhi) {
4413     ReducedPartRdx =
4414         createTargetReduction(Builder, TTI, RdxDesc, ReducedPartRdx);
4415     // If the reduction can be performed in a smaller type, we need to extend
4416     // the reduction to the wider type before we branch to the original loop.
4417     if (PhiTy != RdxDesc.getRecurrenceType())
4418       ReducedPartRdx = RdxDesc.isSigned()
4419                            ? Builder.CreateSExt(ReducedPartRdx, PhiTy)
4420                            : Builder.CreateZExt(ReducedPartRdx, PhiTy);
4421   }
4422 
4423   // Create a phi node that merges control-flow from the backedge-taken check
4424   // block and the middle block.
4425   PHINode *BCBlockPhi = PHINode::Create(PhiTy, 2, "bc.merge.rdx",
4426                                         LoopScalarPreHeader->getTerminator());
4427   for (unsigned I = 0, E = LoopBypassBlocks.size(); I != E; ++I)
4428     BCBlockPhi->addIncoming(ReductionStartValue, LoopBypassBlocks[I]);
4429   BCBlockPhi->addIncoming(ReducedPartRdx, LoopMiddleBlock);
4430 
4431   // Now, we need to fix the users of the reduction variable
4432   // inside and outside of the scalar remainder loop.
4433 
4434   // We know that the loop is in LCSSA form. We need to update the PHI nodes
4435   // in the exit blocks.  See comment on analogous loop in
4436   // fixFirstOrderRecurrence for a more complete explaination of the logic.
4437   for (PHINode &LCSSAPhi : LoopExitBlock->phis())
4438     if (any_of(LCSSAPhi.incoming_values(),
4439                [LoopExitInst](Value *V) { return V == LoopExitInst; }))
4440       LCSSAPhi.addIncoming(ReducedPartRdx, LoopMiddleBlock);
4441 
4442   // Fix the scalar loop reduction variable with the incoming reduction sum
4443   // from the vector body and from the backedge value.
4444   int IncomingEdgeBlockIdx =
4445     Phi->getBasicBlockIndex(OrigLoop->getLoopLatch());
4446   assert(IncomingEdgeBlockIdx >= 0 && "Invalid block index");
4447   // Pick the other block.
4448   int SelfEdgeBlockIdx = (IncomingEdgeBlockIdx ? 0 : 1);
4449   Phi->setIncomingValue(SelfEdgeBlockIdx, BCBlockPhi);
4450   Phi->setIncomingValue(IncomingEdgeBlockIdx, LoopExitInst);
4451 }
4452 
4453 void InnerLoopVectorizer::clearReductionWrapFlags(RecurrenceDescriptor &RdxDesc,
4454                                                   VPTransformState &State) {
4455   RecurKind RK = RdxDesc.getRecurrenceKind();
4456   if (RK != RecurKind::Add && RK != RecurKind::Mul)
4457     return;
4458 
4459   Instruction *LoopExitInstr = RdxDesc.getLoopExitInstr();
4460   assert(LoopExitInstr && "null loop exit instruction");
4461   SmallVector<Instruction *, 8> Worklist;
4462   SmallPtrSet<Instruction *, 8> Visited;
4463   Worklist.push_back(LoopExitInstr);
4464   Visited.insert(LoopExitInstr);
4465 
4466   while (!Worklist.empty()) {
4467     Instruction *Cur = Worklist.pop_back_val();
4468     if (isa<OverflowingBinaryOperator>(Cur))
4469       for (unsigned Part = 0; Part < UF; ++Part) {
4470         Value *V = State.get(State.Plan->getVPValue(Cur), Part);
4471         cast<Instruction>(V)->dropPoisonGeneratingFlags();
4472       }
4473 
4474     for (User *U : Cur->users()) {
4475       Instruction *UI = cast<Instruction>(U);
4476       if ((Cur != LoopExitInstr || OrigLoop->contains(UI->getParent())) &&
4477           Visited.insert(UI).second)
4478         Worklist.push_back(UI);
4479     }
4480   }
4481 }
4482 
4483 void InnerLoopVectorizer::fixLCSSAPHIs(VPTransformState &State) {
4484   for (PHINode &LCSSAPhi : LoopExitBlock->phis()) {
4485     if (LCSSAPhi.getBasicBlockIndex(LoopMiddleBlock) != -1)
4486       // Some phis were already hand updated by the reduction and recurrence
4487       // code above, leave them alone.
4488       continue;
4489 
4490     auto *IncomingValue = LCSSAPhi.getIncomingValue(0);
4491     // Non-instruction incoming values will have only one value.
4492 
4493     VPLane Lane = VPLane::getFirstLane();
4494     if (isa<Instruction>(IncomingValue) &&
4495         !Cost->isUniformAfterVectorization(cast<Instruction>(IncomingValue),
4496                                            VF))
4497       Lane = VPLane::getLastLaneForVF(VF);
4498 
4499     // Can be a loop invariant incoming value or the last scalar value to be
4500     // extracted from the vectorized loop.
4501     Builder.SetInsertPoint(LoopMiddleBlock->getTerminator());
4502     Value *lastIncomingValue =
4503         OrigLoop->isLoopInvariant(IncomingValue)
4504             ? IncomingValue
4505             : State.get(State.Plan->getVPValue(IncomingValue),
4506                         VPIteration(UF - 1, Lane));
4507     LCSSAPhi.addIncoming(lastIncomingValue, LoopMiddleBlock);
4508   }
4509 }
4510 
4511 void InnerLoopVectorizer::sinkScalarOperands(Instruction *PredInst) {
4512   // The basic block and loop containing the predicated instruction.
4513   auto *PredBB = PredInst->getParent();
4514   auto *VectorLoop = LI->getLoopFor(PredBB);
4515 
4516   // Initialize a worklist with the operands of the predicated instruction.
4517   SetVector<Value *> Worklist(PredInst->op_begin(), PredInst->op_end());
4518 
4519   // Holds instructions that we need to analyze again. An instruction may be
4520   // reanalyzed if we don't yet know if we can sink it or not.
4521   SmallVector<Instruction *, 8> InstsToReanalyze;
4522 
4523   // Returns true if a given use occurs in the predicated block. Phi nodes use
4524   // their operands in their corresponding predecessor blocks.
4525   auto isBlockOfUsePredicated = [&](Use &U) -> bool {
4526     auto *I = cast<Instruction>(U.getUser());
4527     BasicBlock *BB = I->getParent();
4528     if (auto *Phi = dyn_cast<PHINode>(I))
4529       BB = Phi->getIncomingBlock(
4530           PHINode::getIncomingValueNumForOperand(U.getOperandNo()));
4531     return BB == PredBB;
4532   };
4533 
4534   // Iteratively sink the scalarized operands of the predicated instruction
4535   // into the block we created for it. When an instruction is sunk, it's
4536   // operands are then added to the worklist. The algorithm ends after one pass
4537   // through the worklist doesn't sink a single instruction.
4538   bool Changed;
4539   do {
4540     // Add the instructions that need to be reanalyzed to the worklist, and
4541     // reset the changed indicator.
4542     Worklist.insert(InstsToReanalyze.begin(), InstsToReanalyze.end());
4543     InstsToReanalyze.clear();
4544     Changed = false;
4545 
4546     while (!Worklist.empty()) {
4547       auto *I = dyn_cast<Instruction>(Worklist.pop_back_val());
4548 
4549       // We can't sink an instruction if it is a phi node, is already in the
4550       // predicated block, is not in the loop, or may have side effects.
4551       if (!I || isa<PHINode>(I) || I->getParent() == PredBB ||
4552           !VectorLoop->contains(I) || I->mayHaveSideEffects())
4553         continue;
4554 
4555       // It's legal to sink the instruction if all its uses occur in the
4556       // predicated block. Otherwise, there's nothing to do yet, and we may
4557       // need to reanalyze the instruction.
4558       if (!llvm::all_of(I->uses(), isBlockOfUsePredicated)) {
4559         InstsToReanalyze.push_back(I);
4560         continue;
4561       }
4562 
4563       // Move the instruction to the beginning of the predicated block, and add
4564       // it's operands to the worklist.
4565       I->moveBefore(&*PredBB->getFirstInsertionPt());
4566       Worklist.insert(I->op_begin(), I->op_end());
4567 
4568       // The sinking may have enabled other instructions to be sunk, so we will
4569       // need to iterate.
4570       Changed = true;
4571     }
4572   } while (Changed);
4573 }
4574 
4575 void InnerLoopVectorizer::fixNonInductionPHIs(VPTransformState &State) {
4576   for (PHINode *OrigPhi : OrigPHIsToFix) {
4577     VPWidenPHIRecipe *VPPhi =
4578         cast<VPWidenPHIRecipe>(State.Plan->getVPValue(OrigPhi));
4579     PHINode *NewPhi = cast<PHINode>(State.get(VPPhi, 0));
4580     // Make sure the builder has a valid insert point.
4581     Builder.SetInsertPoint(NewPhi);
4582     for (unsigned i = 0; i < VPPhi->getNumOperands(); ++i) {
4583       VPValue *Inc = VPPhi->getIncomingValue(i);
4584       VPBasicBlock *VPBB = VPPhi->getIncomingBlock(i);
4585       NewPhi->addIncoming(State.get(Inc, 0), State.CFG.VPBB2IRBB[VPBB]);
4586     }
4587   }
4588 }
4589 
4590 void InnerLoopVectorizer::widenGEP(GetElementPtrInst *GEP, VPValue *VPDef,
4591                                    VPUser &Operands, unsigned UF,
4592                                    ElementCount VF, bool IsPtrLoopInvariant,
4593                                    SmallBitVector &IsIndexLoopInvariant,
4594                                    VPTransformState &State) {
4595   // Construct a vector GEP by widening the operands of the scalar GEP as
4596   // necessary. We mark the vector GEP 'inbounds' if appropriate. A GEP
4597   // results in a vector of pointers when at least one operand of the GEP
4598   // is vector-typed. Thus, to keep the representation compact, we only use
4599   // vector-typed operands for loop-varying values.
4600 
4601   if (VF.isVector() && IsPtrLoopInvariant && IsIndexLoopInvariant.all()) {
4602     // If we are vectorizing, but the GEP has only loop-invariant operands,
4603     // the GEP we build (by only using vector-typed operands for
4604     // loop-varying values) would be a scalar pointer. Thus, to ensure we
4605     // produce a vector of pointers, we need to either arbitrarily pick an
4606     // operand to broadcast, or broadcast a clone of the original GEP.
4607     // Here, we broadcast a clone of the original.
4608     //
4609     // TODO: If at some point we decide to scalarize instructions having
4610     //       loop-invariant operands, this special case will no longer be
4611     //       required. We would add the scalarization decision to
4612     //       collectLoopScalars() and teach getVectorValue() to broadcast
4613     //       the lane-zero scalar value.
4614     auto *Clone = Builder.Insert(GEP->clone());
4615     for (unsigned Part = 0; Part < UF; ++Part) {
4616       Value *EntryPart = Builder.CreateVectorSplat(VF, Clone);
4617       State.set(VPDef, EntryPart, Part);
4618       addMetadata(EntryPart, GEP);
4619     }
4620   } else {
4621     // If the GEP has at least one loop-varying operand, we are sure to
4622     // produce a vector of pointers. But if we are only unrolling, we want
4623     // to produce a scalar GEP for each unroll part. Thus, the GEP we
4624     // produce with the code below will be scalar (if VF == 1) or vector
4625     // (otherwise). Note that for the unroll-only case, we still maintain
4626     // values in the vector mapping with initVector, as we do for other
4627     // instructions.
4628     for (unsigned Part = 0; Part < UF; ++Part) {
4629       // The pointer operand of the new GEP. If it's loop-invariant, we
4630       // won't broadcast it.
4631       auto *Ptr = IsPtrLoopInvariant
4632                       ? State.get(Operands.getOperand(0), VPIteration(0, 0))
4633                       : State.get(Operands.getOperand(0), Part);
4634 
4635       // Collect all the indices for the new GEP. If any index is
4636       // loop-invariant, we won't broadcast it.
4637       SmallVector<Value *, 4> Indices;
4638       for (unsigned I = 1, E = Operands.getNumOperands(); I < E; I++) {
4639         VPValue *Operand = Operands.getOperand(I);
4640         if (IsIndexLoopInvariant[I - 1])
4641           Indices.push_back(State.get(Operand, VPIteration(0, 0)));
4642         else
4643           Indices.push_back(State.get(Operand, Part));
4644       }
4645 
4646       // Create the new GEP. Note that this GEP may be a scalar if VF == 1,
4647       // but it should be a vector, otherwise.
4648       auto *NewGEP =
4649           GEP->isInBounds()
4650               ? Builder.CreateInBoundsGEP(GEP->getSourceElementType(), Ptr,
4651                                           Indices)
4652               : Builder.CreateGEP(GEP->getSourceElementType(), Ptr, Indices);
4653       assert((VF.isScalar() || NewGEP->getType()->isVectorTy()) &&
4654              "NewGEP is not a pointer vector");
4655       State.set(VPDef, NewGEP, Part);
4656       addMetadata(NewGEP, GEP);
4657     }
4658   }
4659 }
4660 
4661 void InnerLoopVectorizer::widenPHIInstruction(Instruction *PN,
4662                                               RecurrenceDescriptor *RdxDesc,
4663                                               VPValue *StartVPV, VPValue *Def,
4664                                               VPTransformState &State) {
4665   PHINode *P = cast<PHINode>(PN);
4666   if (EnableVPlanNativePath) {
4667     // Currently we enter here in the VPlan-native path for non-induction
4668     // PHIs where all control flow is uniform. We simply widen these PHIs.
4669     // Create a vector phi with no operands - the vector phi operands will be
4670     // set at the end of vector code generation.
4671     Type *VecTy = (State.VF.isScalar())
4672                       ? PN->getType()
4673                       : VectorType::get(PN->getType(), State.VF);
4674     Value *VecPhi = Builder.CreatePHI(VecTy, PN->getNumOperands(), "vec.phi");
4675     State.set(Def, VecPhi, 0);
4676     OrigPHIsToFix.push_back(P);
4677 
4678     return;
4679   }
4680 
4681   assert(PN->getParent() == OrigLoop->getHeader() &&
4682          "Non-header phis should have been handled elsewhere");
4683 
4684   Value *StartV = StartVPV ? StartVPV->getLiveInIRValue() : nullptr;
4685   // In order to support recurrences we need to be able to vectorize Phi nodes.
4686   // Phi nodes have cycles, so we need to vectorize them in two stages. This is
4687   // stage #1: We create a new vector PHI node with no incoming edges. We'll use
4688   // this value when we vectorize all of the instructions that use the PHI.
4689   if (RdxDesc || Legal->isFirstOrderRecurrence(P)) {
4690     Value *Iden = nullptr;
4691     bool ScalarPHI =
4692         (State.VF.isScalar()) || Cost->isInLoopReduction(cast<PHINode>(PN));
4693     Type *VecTy =
4694         ScalarPHI ? PN->getType() : VectorType::get(PN->getType(), State.VF);
4695 
4696     if (RdxDesc) {
4697       assert(Legal->isReductionVariable(P) && StartV &&
4698              "RdxDesc should only be set for reduction variables; in that case "
4699              "a StartV is also required");
4700       RecurKind RK = RdxDesc->getRecurrenceKind();
4701       if (RecurrenceDescriptor::isMinMaxRecurrenceKind(RK)) {
4702         // MinMax reduction have the start value as their identify.
4703         if (ScalarPHI) {
4704           Iden = StartV;
4705         } else {
4706           IRBuilderBase::InsertPointGuard IPBuilder(Builder);
4707           Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
4708           StartV = Iden =
4709               Builder.CreateVectorSplat(State.VF, StartV, "minmax.ident");
4710         }
4711       } else {
4712         Constant *IdenC = RecurrenceDescriptor::getRecurrenceIdentity(
4713             RK, VecTy->getScalarType(), RdxDesc->getFastMathFlags());
4714         Iden = IdenC;
4715 
4716         if (!ScalarPHI) {
4717           Iden = ConstantVector::getSplat(State.VF, IdenC);
4718           IRBuilderBase::InsertPointGuard IPBuilder(Builder);
4719           Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
4720           Constant *Zero = Builder.getInt32(0);
4721           StartV = Builder.CreateInsertElement(Iden, StartV, Zero);
4722         }
4723       }
4724     }
4725 
4726     for (unsigned Part = 0; Part < State.UF; ++Part) {
4727       // This is phase one of vectorizing PHIs.
4728       Value *EntryPart = PHINode::Create(
4729           VecTy, 2, "vec.phi", &*LoopVectorBody->getFirstInsertionPt());
4730       State.set(Def, EntryPart, Part);
4731       if (StartV) {
4732         // Make sure to add the reduction start value only to the
4733         // first unroll part.
4734         Value *StartVal = (Part == 0) ? StartV : Iden;
4735         cast<PHINode>(EntryPart)->addIncoming(StartVal, LoopVectorPreHeader);
4736       }
4737     }
4738     return;
4739   }
4740 
4741   assert(!Legal->isReductionVariable(P) &&
4742          "reductions should be handled above");
4743 
4744   setDebugLocFromInst(Builder, P);
4745 
4746   // This PHINode must be an induction variable.
4747   // Make sure that we know about it.
4748   assert(Legal->getInductionVars().count(P) && "Not an induction variable");
4749 
4750   InductionDescriptor II = Legal->getInductionVars().lookup(P);
4751   const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout();
4752 
4753   // FIXME: The newly created binary instructions should contain nsw/nuw flags,
4754   // which can be found from the original scalar operations.
4755   switch (II.getKind()) {
4756   case InductionDescriptor::IK_NoInduction:
4757     llvm_unreachable("Unknown induction");
4758   case InductionDescriptor::IK_IntInduction:
4759   case InductionDescriptor::IK_FpInduction:
4760     llvm_unreachable("Integer/fp induction is handled elsewhere.");
4761   case InductionDescriptor::IK_PtrInduction: {
4762     // Handle the pointer induction variable case.
4763     assert(P->getType()->isPointerTy() && "Unexpected type.");
4764     assert(!VF.isScalable() && "Currently unsupported for scalable vectors");
4765 
4766     if (Cost->isScalarAfterVectorization(P, State.VF)) {
4767       // This is the normalized GEP that starts counting at zero.
4768       Value *PtrInd =
4769           Builder.CreateSExtOrTrunc(Induction, II.getStep()->getType());
4770       // Determine the number of scalars we need to generate for each unroll
4771       // iteration. If the instruction is uniform, we only need to generate the
4772       // first lane. Otherwise, we generate all VF values.
4773       unsigned Lanes = Cost->isUniformAfterVectorization(P, State.VF)
4774                            ? 1
4775                            : State.VF.getKnownMinValue();
4776       for (unsigned Part = 0; Part < UF; ++Part) {
4777         for (unsigned Lane = 0; Lane < Lanes; ++Lane) {
4778           Constant *Idx = ConstantInt::get(
4779               PtrInd->getType(), Lane + Part * State.VF.getKnownMinValue());
4780           Value *GlobalIdx = Builder.CreateAdd(PtrInd, Idx);
4781           Value *SclrGep =
4782               emitTransformedIndex(Builder, GlobalIdx, PSE.getSE(), DL, II);
4783           SclrGep->setName("next.gep");
4784           State.set(Def, SclrGep, VPIteration(Part, Lane));
4785         }
4786       }
4787       return;
4788     }
4789     assert(isa<SCEVConstant>(II.getStep()) &&
4790            "Induction step not a SCEV constant!");
4791     Type *PhiType = II.getStep()->getType();
4792 
4793     // Build a pointer phi
4794     Value *ScalarStartValue = II.getStartValue();
4795     Type *ScStValueType = ScalarStartValue->getType();
4796     PHINode *NewPointerPhi =
4797         PHINode::Create(ScStValueType, 2, "pointer.phi", Induction);
4798     NewPointerPhi->addIncoming(ScalarStartValue, LoopVectorPreHeader);
4799 
4800     // A pointer induction, performed by using a gep
4801     BasicBlock *LoopLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch();
4802     Instruction *InductionLoc = LoopLatch->getTerminator();
4803     const SCEV *ScalarStep = II.getStep();
4804     SCEVExpander Exp(*PSE.getSE(), DL, "induction");
4805     Value *ScalarStepValue =
4806         Exp.expandCodeFor(ScalarStep, PhiType, InductionLoc);
4807     Value *InductionGEP = GetElementPtrInst::Create(
4808         ScStValueType->getPointerElementType(), NewPointerPhi,
4809         Builder.CreateMul(
4810             ScalarStepValue,
4811             ConstantInt::get(PhiType, State.VF.getKnownMinValue() * State.UF)),
4812         "ptr.ind", InductionLoc);
4813     NewPointerPhi->addIncoming(InductionGEP, LoopLatch);
4814 
4815     // Create UF many actual address geps that use the pointer
4816     // phi as base and a vectorized version of the step value
4817     // (<step*0, ..., step*N>) as offset.
4818     for (unsigned Part = 0; Part < State.UF; ++Part) {
4819       Type *VecPhiType = VectorType::get(PhiType, State.VF);
4820       Value *StartOffset =
4821           ConstantInt::get(VecPhiType, Part * State.VF.getKnownMinValue());
4822       // Create a vector of consecutive numbers from zero to VF.
4823       StartOffset =
4824           Builder.CreateAdd(StartOffset, Builder.CreateStepVector(VecPhiType));
4825 
4826       Value *GEP = Builder.CreateGEP(
4827           ScStValueType->getPointerElementType(), NewPointerPhi,
4828           Builder.CreateMul(StartOffset,
4829                             Builder.CreateVectorSplat(
4830                                 State.VF.getKnownMinValue(), ScalarStepValue),
4831                             "vector.gep"));
4832       State.set(Def, GEP, Part);
4833     }
4834   }
4835   }
4836 }
4837 
4838 /// A helper function for checking whether an integer division-related
4839 /// instruction may divide by zero (in which case it must be predicated if
4840 /// executed conditionally in the scalar code).
4841 /// TODO: It may be worthwhile to generalize and check isKnownNonZero().
4842 /// Non-zero divisors that are non compile-time constants will not be
4843 /// converted into multiplication, so we will still end up scalarizing
4844 /// the division, but can do so w/o predication.
4845 static bool mayDivideByZero(Instruction &I) {
4846   assert((I.getOpcode() == Instruction::UDiv ||
4847           I.getOpcode() == Instruction::SDiv ||
4848           I.getOpcode() == Instruction::URem ||
4849           I.getOpcode() == Instruction::SRem) &&
4850          "Unexpected instruction");
4851   Value *Divisor = I.getOperand(1);
4852   auto *CInt = dyn_cast<ConstantInt>(Divisor);
4853   return !CInt || CInt->isZero();
4854 }
4855 
4856 void InnerLoopVectorizer::widenInstruction(Instruction &I, VPValue *Def,
4857                                            VPUser &User,
4858                                            VPTransformState &State) {
4859   switch (I.getOpcode()) {
4860   case Instruction::Call:
4861   case Instruction::Br:
4862   case Instruction::PHI:
4863   case Instruction::GetElementPtr:
4864   case Instruction::Select:
4865     llvm_unreachable("This instruction is handled by a different recipe.");
4866   case Instruction::UDiv:
4867   case Instruction::SDiv:
4868   case Instruction::SRem:
4869   case Instruction::URem:
4870   case Instruction::Add:
4871   case Instruction::FAdd:
4872   case Instruction::Sub:
4873   case Instruction::FSub:
4874   case Instruction::FNeg:
4875   case Instruction::Mul:
4876   case Instruction::FMul:
4877   case Instruction::FDiv:
4878   case Instruction::FRem:
4879   case Instruction::Shl:
4880   case Instruction::LShr:
4881   case Instruction::AShr:
4882   case Instruction::And:
4883   case Instruction::Or:
4884   case Instruction::Xor: {
4885     // Just widen unops and binops.
4886     setDebugLocFromInst(Builder, &I);
4887 
4888     for (unsigned Part = 0; Part < UF; ++Part) {
4889       SmallVector<Value *, 2> Ops;
4890       for (VPValue *VPOp : User.operands())
4891         Ops.push_back(State.get(VPOp, Part));
4892 
4893       Value *V = Builder.CreateNAryOp(I.getOpcode(), Ops);
4894 
4895       if (auto *VecOp = dyn_cast<Instruction>(V))
4896         VecOp->copyIRFlags(&I);
4897 
4898       // Use this vector value for all users of the original instruction.
4899       State.set(Def, V, Part);
4900       addMetadata(V, &I);
4901     }
4902 
4903     break;
4904   }
4905   case Instruction::ICmp:
4906   case Instruction::FCmp: {
4907     // Widen compares. Generate vector compares.
4908     bool FCmp = (I.getOpcode() == Instruction::FCmp);
4909     auto *Cmp = cast<CmpInst>(&I);
4910     setDebugLocFromInst(Builder, Cmp);
4911     for (unsigned Part = 0; Part < UF; ++Part) {
4912       Value *A = State.get(User.getOperand(0), Part);
4913       Value *B = State.get(User.getOperand(1), Part);
4914       Value *C = nullptr;
4915       if (FCmp) {
4916         // Propagate fast math flags.
4917         IRBuilder<>::FastMathFlagGuard FMFG(Builder);
4918         Builder.setFastMathFlags(Cmp->getFastMathFlags());
4919         C = Builder.CreateFCmp(Cmp->getPredicate(), A, B);
4920       } else {
4921         C = Builder.CreateICmp(Cmp->getPredicate(), A, B);
4922       }
4923       State.set(Def, C, Part);
4924       addMetadata(C, &I);
4925     }
4926 
4927     break;
4928   }
4929 
4930   case Instruction::ZExt:
4931   case Instruction::SExt:
4932   case Instruction::FPToUI:
4933   case Instruction::FPToSI:
4934   case Instruction::FPExt:
4935   case Instruction::PtrToInt:
4936   case Instruction::IntToPtr:
4937   case Instruction::SIToFP:
4938   case Instruction::UIToFP:
4939   case Instruction::Trunc:
4940   case Instruction::FPTrunc:
4941   case Instruction::BitCast: {
4942     auto *CI = cast<CastInst>(&I);
4943     setDebugLocFromInst(Builder, CI);
4944 
4945     /// Vectorize casts.
4946     Type *DestTy =
4947         (VF.isScalar()) ? CI->getType() : VectorType::get(CI->getType(), VF);
4948 
4949     for (unsigned Part = 0; Part < UF; ++Part) {
4950       Value *A = State.get(User.getOperand(0), Part);
4951       Value *Cast = Builder.CreateCast(CI->getOpcode(), A, DestTy);
4952       State.set(Def, Cast, Part);
4953       addMetadata(Cast, &I);
4954     }
4955     break;
4956   }
4957   default:
4958     // This instruction is not vectorized by simple widening.
4959     LLVM_DEBUG(dbgs() << "LV: Found an unhandled instruction: " << I);
4960     llvm_unreachable("Unhandled instruction!");
4961   } // end of switch.
4962 }
4963 
4964 void InnerLoopVectorizer::widenCallInstruction(CallInst &I, VPValue *Def,
4965                                                VPUser &ArgOperands,
4966                                                VPTransformState &State) {
4967   assert(!isa<DbgInfoIntrinsic>(I) &&
4968          "DbgInfoIntrinsic should have been dropped during VPlan construction");
4969   setDebugLocFromInst(Builder, &I);
4970 
4971   Module *M = I.getParent()->getParent()->getParent();
4972   auto *CI = cast<CallInst>(&I);
4973 
4974   SmallVector<Type *, 4> Tys;
4975   for (Value *ArgOperand : CI->arg_operands())
4976     Tys.push_back(ToVectorTy(ArgOperand->getType(), VF.getKnownMinValue()));
4977 
4978   Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
4979 
4980   // The flag shows whether we use Intrinsic or a usual Call for vectorized
4981   // version of the instruction.
4982   // Is it beneficial to perform intrinsic call compared to lib call?
4983   bool NeedToScalarize = false;
4984   InstructionCost CallCost = Cost->getVectorCallCost(CI, VF, NeedToScalarize);
4985   InstructionCost IntrinsicCost = ID ? Cost->getVectorIntrinsicCost(CI, VF) : 0;
4986   bool UseVectorIntrinsic = ID && IntrinsicCost <= CallCost;
4987   assert((UseVectorIntrinsic || !NeedToScalarize) &&
4988          "Instruction should be scalarized elsewhere.");
4989   assert((IntrinsicCost.isValid() || CallCost.isValid()) &&
4990          "Either the intrinsic cost or vector call cost must be valid");
4991 
4992   for (unsigned Part = 0; Part < UF; ++Part) {
4993     SmallVector<Value *, 4> Args;
4994     for (auto &I : enumerate(ArgOperands.operands())) {
4995       // Some intrinsics have a scalar argument - don't replace it with a
4996       // vector.
4997       Value *Arg;
4998       if (!UseVectorIntrinsic || !hasVectorInstrinsicScalarOpd(ID, I.index()))
4999         Arg = State.get(I.value(), Part);
5000       else
5001         Arg = State.get(I.value(), VPIteration(0, 0));
5002       Args.push_back(Arg);
5003     }
5004 
5005     Function *VectorF;
5006     if (UseVectorIntrinsic) {
5007       // Use vector version of the intrinsic.
5008       Type *TysForDecl[] = {CI->getType()};
5009       if (VF.isVector())
5010         TysForDecl[0] = VectorType::get(CI->getType()->getScalarType(), VF);
5011       VectorF = Intrinsic::getDeclaration(M, ID, TysForDecl);
5012       assert(VectorF && "Can't retrieve vector intrinsic.");
5013     } else {
5014       // Use vector version of the function call.
5015       const VFShape Shape = VFShape::get(*CI, VF, false /*HasGlobalPred*/);
5016 #ifndef NDEBUG
5017       assert(VFDatabase(*CI).getVectorizedFunction(Shape) != nullptr &&
5018              "Can't create vector function.");
5019 #endif
5020         VectorF = VFDatabase(*CI).getVectorizedFunction(Shape);
5021     }
5022       SmallVector<OperandBundleDef, 1> OpBundles;
5023       CI->getOperandBundlesAsDefs(OpBundles);
5024       CallInst *V = Builder.CreateCall(VectorF, Args, OpBundles);
5025 
5026       if (isa<FPMathOperator>(V))
5027         V->copyFastMathFlags(CI);
5028 
5029       State.set(Def, V, Part);
5030       addMetadata(V, &I);
5031   }
5032 }
5033 
5034 void InnerLoopVectorizer::widenSelectInstruction(SelectInst &I, VPValue *VPDef,
5035                                                  VPUser &Operands,
5036                                                  bool InvariantCond,
5037                                                  VPTransformState &State) {
5038   setDebugLocFromInst(Builder, &I);
5039 
5040   // The condition can be loop invariant  but still defined inside the
5041   // loop. This means that we can't just use the original 'cond' value.
5042   // We have to take the 'vectorized' value and pick the first lane.
5043   // Instcombine will make this a no-op.
5044   auto *InvarCond = InvariantCond
5045                         ? State.get(Operands.getOperand(0), VPIteration(0, 0))
5046                         : nullptr;
5047 
5048   for (unsigned Part = 0; Part < UF; ++Part) {
5049     Value *Cond =
5050         InvarCond ? InvarCond : State.get(Operands.getOperand(0), Part);
5051     Value *Op0 = State.get(Operands.getOperand(1), Part);
5052     Value *Op1 = State.get(Operands.getOperand(2), Part);
5053     Value *Sel = Builder.CreateSelect(Cond, Op0, Op1);
5054     State.set(VPDef, Sel, Part);
5055     addMetadata(Sel, &I);
5056   }
5057 }
5058 
5059 void LoopVectorizationCostModel::collectLoopScalars(ElementCount VF) {
5060   // We should not collect Scalars more than once per VF. Right now, this
5061   // function is called from collectUniformsAndScalars(), which already does
5062   // this check. Collecting Scalars for VF=1 does not make any sense.
5063   assert(VF.isVector() && Scalars.find(VF) == Scalars.end() &&
5064          "This function should not be visited twice for the same VF");
5065 
5066   SmallSetVector<Instruction *, 8> Worklist;
5067 
5068   // These sets are used to seed the analysis with pointers used by memory
5069   // accesses that will remain scalar.
5070   SmallSetVector<Instruction *, 8> ScalarPtrs;
5071   SmallPtrSet<Instruction *, 8> PossibleNonScalarPtrs;
5072   auto *Latch = TheLoop->getLoopLatch();
5073 
5074   // A helper that returns true if the use of Ptr by MemAccess will be scalar.
5075   // The pointer operands of loads and stores will be scalar as long as the
5076   // memory access is not a gather or scatter operation. The value operand of a
5077   // store will remain scalar if the store is scalarized.
5078   auto isScalarUse = [&](Instruction *MemAccess, Value *Ptr) {
5079     InstWidening WideningDecision = getWideningDecision(MemAccess, VF);
5080     assert(WideningDecision != CM_Unknown &&
5081            "Widening decision should be ready at this moment");
5082     if (auto *Store = dyn_cast<StoreInst>(MemAccess))
5083       if (Ptr == Store->getValueOperand())
5084         return WideningDecision == CM_Scalarize;
5085     assert(Ptr == getLoadStorePointerOperand(MemAccess) &&
5086            "Ptr is neither a value or pointer operand");
5087     return WideningDecision != CM_GatherScatter;
5088   };
5089 
5090   // A helper that returns true if the given value is a bitcast or
5091   // getelementptr instruction contained in the loop.
5092   auto isLoopVaryingBitCastOrGEP = [&](Value *V) {
5093     return ((isa<BitCastInst>(V) && V->getType()->isPointerTy()) ||
5094             isa<GetElementPtrInst>(V)) &&
5095            !TheLoop->isLoopInvariant(V);
5096   };
5097 
5098   auto isScalarPtrInduction = [&](Instruction *MemAccess, Value *Ptr) {
5099     if (!isa<PHINode>(Ptr) ||
5100         !Legal->getInductionVars().count(cast<PHINode>(Ptr)))
5101       return false;
5102     auto &Induction = Legal->getInductionVars()[cast<PHINode>(Ptr)];
5103     if (Induction.getKind() != InductionDescriptor::IK_PtrInduction)
5104       return false;
5105     return isScalarUse(MemAccess, Ptr);
5106   };
5107 
5108   // A helper that evaluates a memory access's use of a pointer. If the
5109   // pointer is actually the pointer induction of a loop, it is being
5110   // inserted into Worklist. If the use will be a scalar use, and the
5111   // pointer is only used by memory accesses, we place the pointer in
5112   // ScalarPtrs. Otherwise, the pointer is placed in PossibleNonScalarPtrs.
5113   auto evaluatePtrUse = [&](Instruction *MemAccess, Value *Ptr) {
5114     if (isScalarPtrInduction(MemAccess, Ptr)) {
5115       Worklist.insert(cast<Instruction>(Ptr));
5116       Instruction *Update = cast<Instruction>(
5117           cast<PHINode>(Ptr)->getIncomingValueForBlock(Latch));
5118       Worklist.insert(Update);
5119       LLVM_DEBUG(dbgs() << "LV: Found new scalar instruction: " << *Ptr
5120                         << "\n");
5121       LLVM_DEBUG(dbgs() << "LV: Found new scalar instruction: " << *Update
5122                         << "\n");
5123       return;
5124     }
5125     // We only care about bitcast and getelementptr instructions contained in
5126     // the loop.
5127     if (!isLoopVaryingBitCastOrGEP(Ptr))
5128       return;
5129 
5130     // If the pointer has already been identified as scalar (e.g., if it was
5131     // also identified as uniform), there's nothing to do.
5132     auto *I = cast<Instruction>(Ptr);
5133     if (Worklist.count(I))
5134       return;
5135 
5136     // If the use of the pointer will be a scalar use, and all users of the
5137     // pointer are memory accesses, place the pointer in ScalarPtrs. Otherwise,
5138     // place the pointer in PossibleNonScalarPtrs.
5139     if (isScalarUse(MemAccess, Ptr) && llvm::all_of(I->users(), [&](User *U) {
5140           return isa<LoadInst>(U) || isa<StoreInst>(U);
5141         }))
5142       ScalarPtrs.insert(I);
5143     else
5144       PossibleNonScalarPtrs.insert(I);
5145   };
5146 
5147   // We seed the scalars analysis with three classes of instructions: (1)
5148   // instructions marked uniform-after-vectorization and (2) bitcast,
5149   // getelementptr and (pointer) phi instructions used by memory accesses
5150   // requiring a scalar use.
5151   //
5152   // (1) Add to the worklist all instructions that have been identified as
5153   // uniform-after-vectorization.
5154   Worklist.insert(Uniforms[VF].begin(), Uniforms[VF].end());
5155 
5156   // (2) Add to the worklist all bitcast and getelementptr instructions used by
5157   // memory accesses requiring a scalar use. The pointer operands of loads and
5158   // stores will be scalar as long as the memory accesses is not a gather or
5159   // scatter operation. The value operand of a store will remain scalar if the
5160   // store is scalarized.
5161   for (auto *BB : TheLoop->blocks())
5162     for (auto &I : *BB) {
5163       if (auto *Load = dyn_cast<LoadInst>(&I)) {
5164         evaluatePtrUse(Load, Load->getPointerOperand());
5165       } else if (auto *Store = dyn_cast<StoreInst>(&I)) {
5166         evaluatePtrUse(Store, Store->getPointerOperand());
5167         evaluatePtrUse(Store, Store->getValueOperand());
5168       }
5169     }
5170   for (auto *I : ScalarPtrs)
5171     if (!PossibleNonScalarPtrs.count(I)) {
5172       LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *I << "\n");
5173       Worklist.insert(I);
5174     }
5175 
5176   // Insert the forced scalars.
5177   // FIXME: Currently widenPHIInstruction() often creates a dead vector
5178   // induction variable when the PHI user is scalarized.
5179   auto ForcedScalar = ForcedScalars.find(VF);
5180   if (ForcedScalar != ForcedScalars.end())
5181     for (auto *I : ForcedScalar->second)
5182       Worklist.insert(I);
5183 
5184   // Expand the worklist by looking through any bitcasts and getelementptr
5185   // instructions we've already identified as scalar. This is similar to the
5186   // expansion step in collectLoopUniforms(); however, here we're only
5187   // expanding to include additional bitcasts and getelementptr instructions.
5188   unsigned Idx = 0;
5189   while (Idx != Worklist.size()) {
5190     Instruction *Dst = Worklist[Idx++];
5191     if (!isLoopVaryingBitCastOrGEP(Dst->getOperand(0)))
5192       continue;
5193     auto *Src = cast<Instruction>(Dst->getOperand(0));
5194     if (llvm::all_of(Src->users(), [&](User *U) -> bool {
5195           auto *J = cast<Instruction>(U);
5196           return !TheLoop->contains(J) || Worklist.count(J) ||
5197                  ((isa<LoadInst>(J) || isa<StoreInst>(J)) &&
5198                   isScalarUse(J, Src));
5199         })) {
5200       Worklist.insert(Src);
5201       LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Src << "\n");
5202     }
5203   }
5204 
5205   // An induction variable will remain scalar if all users of the induction
5206   // variable and induction variable update remain scalar.
5207   for (auto &Induction : Legal->getInductionVars()) {
5208     auto *Ind = Induction.first;
5209     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
5210 
5211     // If tail-folding is applied, the primary induction variable will be used
5212     // to feed a vector compare.
5213     if (Ind == Legal->getPrimaryInduction() && foldTailByMasking())
5214       continue;
5215 
5216     // Determine if all users of the induction variable are scalar after
5217     // vectorization.
5218     auto ScalarInd = llvm::all_of(Ind->users(), [&](User *U) -> bool {
5219       auto *I = cast<Instruction>(U);
5220       return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I);
5221     });
5222     if (!ScalarInd)
5223       continue;
5224 
5225     // Determine if all users of the induction variable update instruction are
5226     // scalar after vectorization.
5227     auto ScalarIndUpdate =
5228         llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
5229           auto *I = cast<Instruction>(U);
5230           return I == Ind || !TheLoop->contains(I) || Worklist.count(I);
5231         });
5232     if (!ScalarIndUpdate)
5233       continue;
5234 
5235     // The induction variable and its update instruction will remain scalar.
5236     Worklist.insert(Ind);
5237     Worklist.insert(IndUpdate);
5238     LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n");
5239     LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate
5240                       << "\n");
5241   }
5242 
5243   Scalars[VF].insert(Worklist.begin(), Worklist.end());
5244 }
5245 
5246 bool LoopVectorizationCostModel::isScalarWithPredication(
5247     Instruction *I, ElementCount VF) const {
5248   if (!blockNeedsPredication(I->getParent()))
5249     return false;
5250   switch(I->getOpcode()) {
5251   default:
5252     break;
5253   case Instruction::Load:
5254   case Instruction::Store: {
5255     if (!Legal->isMaskRequired(I))
5256       return false;
5257     auto *Ptr = getLoadStorePointerOperand(I);
5258     auto *Ty = getMemInstValueType(I);
5259     // We have already decided how to vectorize this instruction, get that
5260     // result.
5261     if (VF.isVector()) {
5262       InstWidening WideningDecision = getWideningDecision(I, VF);
5263       assert(WideningDecision != CM_Unknown &&
5264              "Widening decision should be ready at this moment");
5265       return WideningDecision == CM_Scalarize;
5266     }
5267     const Align Alignment = getLoadStoreAlignment(I);
5268     return isa<LoadInst>(I) ? !(isLegalMaskedLoad(Ty, Ptr, Alignment) ||
5269                                 isLegalMaskedGather(Ty, Alignment))
5270                             : !(isLegalMaskedStore(Ty, Ptr, Alignment) ||
5271                                 isLegalMaskedScatter(Ty, Alignment));
5272   }
5273   case Instruction::UDiv:
5274   case Instruction::SDiv:
5275   case Instruction::SRem:
5276   case Instruction::URem:
5277     return mayDivideByZero(*I);
5278   }
5279   return false;
5280 }
5281 
5282 bool LoopVectorizationCostModel::interleavedAccessCanBeWidened(
5283     Instruction *I, ElementCount VF) {
5284   assert(isAccessInterleaved(I) && "Expecting interleaved access.");
5285   assert(getWideningDecision(I, VF) == CM_Unknown &&
5286          "Decision should not be set yet.");
5287   auto *Group = getInterleavedAccessGroup(I);
5288   assert(Group && "Must have a group.");
5289 
5290   // If the instruction's allocated size doesn't equal it's type size, it
5291   // requires padding and will be scalarized.
5292   auto &DL = I->getModule()->getDataLayout();
5293   auto *ScalarTy = getMemInstValueType(I);
5294   if (hasIrregularType(ScalarTy, DL))
5295     return false;
5296 
5297   // Check if masking is required.
5298   // A Group may need masking for one of two reasons: it resides in a block that
5299   // needs predication, or it was decided to use masking to deal with gaps.
5300   bool PredicatedAccessRequiresMasking =
5301       Legal->blockNeedsPredication(I->getParent()) && Legal->isMaskRequired(I);
5302   bool AccessWithGapsRequiresMasking =
5303       Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed();
5304   if (!PredicatedAccessRequiresMasking && !AccessWithGapsRequiresMasking)
5305     return true;
5306 
5307   // If masked interleaving is required, we expect that the user/target had
5308   // enabled it, because otherwise it either wouldn't have been created or
5309   // it should have been invalidated by the CostModel.
5310   assert(useMaskedInterleavedAccesses(TTI) &&
5311          "Masked interleave-groups for predicated accesses are not enabled.");
5312 
5313   auto *Ty = getMemInstValueType(I);
5314   const Align Alignment = getLoadStoreAlignment(I);
5315   return isa<LoadInst>(I) ? TTI.isLegalMaskedLoad(Ty, Alignment)
5316                           : TTI.isLegalMaskedStore(Ty, Alignment);
5317 }
5318 
5319 bool LoopVectorizationCostModel::memoryInstructionCanBeWidened(
5320     Instruction *I, ElementCount VF) {
5321   // Get and ensure we have a valid memory instruction.
5322   LoadInst *LI = dyn_cast<LoadInst>(I);
5323   StoreInst *SI = dyn_cast<StoreInst>(I);
5324   assert((LI || SI) && "Invalid memory instruction");
5325 
5326   auto *Ptr = getLoadStorePointerOperand(I);
5327 
5328   // In order to be widened, the pointer should be consecutive, first of all.
5329   if (!Legal->isConsecutivePtr(Ptr))
5330     return false;
5331 
5332   // If the instruction is a store located in a predicated block, it will be
5333   // scalarized.
5334   if (isScalarWithPredication(I))
5335     return false;
5336 
5337   // If the instruction's allocated size doesn't equal it's type size, it
5338   // requires padding and will be scalarized.
5339   auto &DL = I->getModule()->getDataLayout();
5340   auto *ScalarTy = LI ? LI->getType() : SI->getValueOperand()->getType();
5341   if (hasIrregularType(ScalarTy, DL))
5342     return false;
5343 
5344   return true;
5345 }
5346 
5347 void LoopVectorizationCostModel::collectLoopUniforms(ElementCount VF) {
5348   // We should not collect Uniforms more than once per VF. Right now,
5349   // this function is called from collectUniformsAndScalars(), which
5350   // already does this check. Collecting Uniforms for VF=1 does not make any
5351   // sense.
5352 
5353   assert(VF.isVector() && Uniforms.find(VF) == Uniforms.end() &&
5354          "This function should not be visited twice for the same VF");
5355 
5356   // Visit the list of Uniforms. If we'll not find any uniform value, we'll
5357   // not analyze again.  Uniforms.count(VF) will return 1.
5358   Uniforms[VF].clear();
5359 
5360   // We now know that the loop is vectorizable!
5361   // Collect instructions inside the loop that will remain uniform after
5362   // vectorization.
5363 
5364   // Global values, params and instructions outside of current loop are out of
5365   // scope.
5366   auto isOutOfScope = [&](Value *V) -> bool {
5367     Instruction *I = dyn_cast<Instruction>(V);
5368     return (!I || !TheLoop->contains(I));
5369   };
5370 
5371   SetVector<Instruction *> Worklist;
5372   BasicBlock *Latch = TheLoop->getLoopLatch();
5373 
5374   // Instructions that are scalar with predication must not be considered
5375   // uniform after vectorization, because that would create an erroneous
5376   // replicating region where only a single instance out of VF should be formed.
5377   // TODO: optimize such seldom cases if found important, see PR40816.
5378   auto addToWorklistIfAllowed = [&](Instruction *I) -> void {
5379     if (isOutOfScope(I)) {
5380       LLVM_DEBUG(dbgs() << "LV: Found not uniform due to scope: "
5381                         << *I << "\n");
5382       return;
5383     }
5384     if (isScalarWithPredication(I, VF)) {
5385       LLVM_DEBUG(dbgs() << "LV: Found not uniform being ScalarWithPredication: "
5386                         << *I << "\n");
5387       return;
5388     }
5389     LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *I << "\n");
5390     Worklist.insert(I);
5391   };
5392 
5393   // Start with the conditional branch. If the branch condition is an
5394   // instruction contained in the loop that is only used by the branch, it is
5395   // uniform.
5396   auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0));
5397   if (Cmp && TheLoop->contains(Cmp) && Cmp->hasOneUse())
5398     addToWorklistIfAllowed(Cmp);
5399 
5400   auto isUniformDecision = [&](Instruction *I, ElementCount VF) {
5401     InstWidening WideningDecision = getWideningDecision(I, VF);
5402     assert(WideningDecision != CM_Unknown &&
5403            "Widening decision should be ready at this moment");
5404 
5405     // A uniform memory op is itself uniform.  We exclude uniform stores
5406     // here as they demand the last lane, not the first one.
5407     if (isa<LoadInst>(I) && Legal->isUniformMemOp(*I)) {
5408       assert(WideningDecision == CM_Scalarize);
5409       return true;
5410     }
5411 
5412     return (WideningDecision == CM_Widen ||
5413             WideningDecision == CM_Widen_Reverse ||
5414             WideningDecision == CM_Interleave);
5415   };
5416 
5417 
5418   // Returns true if Ptr is the pointer operand of a memory access instruction
5419   // I, and I is known to not require scalarization.
5420   auto isVectorizedMemAccessUse = [&](Instruction *I, Value *Ptr) -> bool {
5421     return getLoadStorePointerOperand(I) == Ptr && isUniformDecision(I, VF);
5422   };
5423 
5424   // Holds a list of values which are known to have at least one uniform use.
5425   // Note that there may be other uses which aren't uniform.  A "uniform use"
5426   // here is something which only demands lane 0 of the unrolled iterations;
5427   // it does not imply that all lanes produce the same value (e.g. this is not
5428   // the usual meaning of uniform)
5429   SetVector<Value *> HasUniformUse;
5430 
5431   // Scan the loop for instructions which are either a) known to have only
5432   // lane 0 demanded or b) are uses which demand only lane 0 of their operand.
5433   for (auto *BB : TheLoop->blocks())
5434     for (auto &I : *BB) {
5435       // If there's no pointer operand, there's nothing to do.
5436       auto *Ptr = getLoadStorePointerOperand(&I);
5437       if (!Ptr)
5438         continue;
5439 
5440       // A uniform memory op is itself uniform.  We exclude uniform stores
5441       // here as they demand the last lane, not the first one.
5442       if (isa<LoadInst>(I) && Legal->isUniformMemOp(I))
5443         addToWorklistIfAllowed(&I);
5444 
5445       if (isUniformDecision(&I, VF)) {
5446         assert(isVectorizedMemAccessUse(&I, Ptr) && "consistency check");
5447         HasUniformUse.insert(Ptr);
5448       }
5449     }
5450 
5451   // Add to the worklist any operands which have *only* uniform (e.g. lane 0
5452   // demanding) users.  Since loops are assumed to be in LCSSA form, this
5453   // disallows uses outside the loop as well.
5454   for (auto *V : HasUniformUse) {
5455     if (isOutOfScope(V))
5456       continue;
5457     auto *I = cast<Instruction>(V);
5458     auto UsersAreMemAccesses =
5459       llvm::all_of(I->users(), [&](User *U) -> bool {
5460         return isVectorizedMemAccessUse(cast<Instruction>(U), V);
5461       });
5462     if (UsersAreMemAccesses)
5463       addToWorklistIfAllowed(I);
5464   }
5465 
5466   // Expand Worklist in topological order: whenever a new instruction
5467   // is added , its users should be already inside Worklist.  It ensures
5468   // a uniform instruction will only be used by uniform instructions.
5469   unsigned idx = 0;
5470   while (idx != Worklist.size()) {
5471     Instruction *I = Worklist[idx++];
5472 
5473     for (auto OV : I->operand_values()) {
5474       // isOutOfScope operands cannot be uniform instructions.
5475       if (isOutOfScope(OV))
5476         continue;
5477       // First order recurrence Phi's should typically be considered
5478       // non-uniform.
5479       auto *OP = dyn_cast<PHINode>(OV);
5480       if (OP && Legal->isFirstOrderRecurrence(OP))
5481         continue;
5482       // If all the users of the operand are uniform, then add the
5483       // operand into the uniform worklist.
5484       auto *OI = cast<Instruction>(OV);
5485       if (llvm::all_of(OI->users(), [&](User *U) -> bool {
5486             auto *J = cast<Instruction>(U);
5487             return Worklist.count(J) || isVectorizedMemAccessUse(J, OI);
5488           }))
5489         addToWorklistIfAllowed(OI);
5490     }
5491   }
5492 
5493   // For an instruction to be added into Worklist above, all its users inside
5494   // the loop should also be in Worklist. However, this condition cannot be
5495   // true for phi nodes that form a cyclic dependence. We must process phi
5496   // nodes separately. An induction variable will remain uniform if all users
5497   // of the induction variable and induction variable update remain uniform.
5498   // The code below handles both pointer and non-pointer induction variables.
5499   for (auto &Induction : Legal->getInductionVars()) {
5500     auto *Ind = Induction.first;
5501     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
5502 
5503     // Determine if all users of the induction variable are uniform after
5504     // vectorization.
5505     auto UniformInd = llvm::all_of(Ind->users(), [&](User *U) -> bool {
5506       auto *I = cast<Instruction>(U);
5507       return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) ||
5508              isVectorizedMemAccessUse(I, Ind);
5509     });
5510     if (!UniformInd)
5511       continue;
5512 
5513     // Determine if all users of the induction variable update instruction are
5514     // uniform after vectorization.
5515     auto UniformIndUpdate =
5516         llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
5517           auto *I = cast<Instruction>(U);
5518           return I == Ind || !TheLoop->contains(I) || Worklist.count(I) ||
5519                  isVectorizedMemAccessUse(I, IndUpdate);
5520         });
5521     if (!UniformIndUpdate)
5522       continue;
5523 
5524     // The induction variable and its update instruction will remain uniform.
5525     addToWorklistIfAllowed(Ind);
5526     addToWorklistIfAllowed(IndUpdate);
5527   }
5528 
5529   Uniforms[VF].insert(Worklist.begin(), Worklist.end());
5530 }
5531 
5532 bool LoopVectorizationCostModel::runtimeChecksRequired() {
5533   LLVM_DEBUG(dbgs() << "LV: Performing code size checks.\n");
5534 
5535   if (Legal->getRuntimePointerChecking()->Need) {
5536     reportVectorizationFailure("Runtime ptr check is required with -Os/-Oz",
5537         "runtime pointer checks needed. Enable vectorization of this "
5538         "loop with '#pragma clang loop vectorize(enable)' when "
5539         "compiling with -Os/-Oz",
5540         "CantVersionLoopWithOptForSize", ORE, TheLoop);
5541     return true;
5542   }
5543 
5544   if (!PSE.getUnionPredicate().getPredicates().empty()) {
5545     reportVectorizationFailure("Runtime SCEV check is required with -Os/-Oz",
5546         "runtime SCEV checks needed. Enable vectorization of this "
5547         "loop with '#pragma clang loop vectorize(enable)' when "
5548         "compiling with -Os/-Oz",
5549         "CantVersionLoopWithOptForSize", ORE, TheLoop);
5550     return true;
5551   }
5552 
5553   // FIXME: Avoid specializing for stride==1 instead of bailing out.
5554   if (!Legal->getLAI()->getSymbolicStrides().empty()) {
5555     reportVectorizationFailure("Runtime stride check for small trip count",
5556         "runtime stride == 1 checks needed. Enable vectorization of "
5557         "this loop without such check by compiling with -Os/-Oz",
5558         "CantVersionLoopWithOptForSize", ORE, TheLoop);
5559     return true;
5560   }
5561 
5562   return false;
5563 }
5564 
5565 Optional<ElementCount>
5566 LoopVectorizationCostModel::computeMaxVF(ElementCount UserVF, unsigned UserIC) {
5567   if (Legal->getRuntimePointerChecking()->Need && TTI.hasBranchDivergence()) {
5568     // TODO: It may by useful to do since it's still likely to be dynamically
5569     // uniform if the target can skip.
5570     reportVectorizationFailure(
5571         "Not inserting runtime ptr check for divergent target",
5572         "runtime pointer checks needed. Not enabled for divergent target",
5573         "CantVersionLoopWithDivergentTarget", ORE, TheLoop);
5574     return None;
5575   }
5576 
5577   unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop);
5578   LLVM_DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n');
5579   if (TC == 1) {
5580     reportVectorizationFailure("Single iteration (non) loop",
5581         "loop trip count is one, irrelevant for vectorization",
5582         "SingleIterationLoop", ORE, TheLoop);
5583     return None;
5584   }
5585 
5586   switch (ScalarEpilogueStatus) {
5587   case CM_ScalarEpilogueAllowed:
5588     return computeFeasibleMaxVF(TC, UserVF);
5589   case CM_ScalarEpilogueNotAllowedUsePredicate:
5590     LLVM_FALLTHROUGH;
5591   case CM_ScalarEpilogueNotNeededUsePredicate:
5592     LLVM_DEBUG(
5593         dbgs() << "LV: vector predicate hint/switch found.\n"
5594                << "LV: Not allowing scalar epilogue, creating predicated "
5595                << "vector loop.\n");
5596     break;
5597   case CM_ScalarEpilogueNotAllowedLowTripLoop:
5598     // fallthrough as a special case of OptForSize
5599   case CM_ScalarEpilogueNotAllowedOptSize:
5600     if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedOptSize)
5601       LLVM_DEBUG(
5602           dbgs() << "LV: Not allowing scalar epilogue due to -Os/-Oz.\n");
5603     else
5604       LLVM_DEBUG(dbgs() << "LV: Not allowing scalar epilogue due to low trip "
5605                         << "count.\n");
5606 
5607     // Bail if runtime checks are required, which are not good when optimising
5608     // for size.
5609     if (runtimeChecksRequired())
5610       return None;
5611 
5612     break;
5613   }
5614 
5615   // The only loops we can vectorize without a scalar epilogue, are loops with
5616   // a bottom-test and a single exiting block. We'd have to handle the fact
5617   // that not every instruction executes on the last iteration.  This will
5618   // require a lane mask which varies through the vector loop body.  (TODO)
5619   if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) {
5620     // If there was a tail-folding hint/switch, but we can't fold the tail by
5621     // masking, fallback to a vectorization with a scalar epilogue.
5622     if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) {
5623       LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a "
5624                            "scalar epilogue instead.\n");
5625       ScalarEpilogueStatus = CM_ScalarEpilogueAllowed;
5626       return computeFeasibleMaxVF(TC, UserVF);
5627     }
5628     return None;
5629   }
5630 
5631   // Now try the tail folding
5632 
5633   // Invalidate interleave groups that require an epilogue if we can't mask
5634   // the interleave-group.
5635   if (!useMaskedInterleavedAccesses(TTI)) {
5636     assert(WideningDecisions.empty() && Uniforms.empty() && Scalars.empty() &&
5637            "No decisions should have been taken at this point");
5638     // Note: There is no need to invalidate any cost modeling decisions here, as
5639     // non where taken so far.
5640     InterleaveInfo.invalidateGroupsRequiringScalarEpilogue();
5641   }
5642 
5643   ElementCount MaxVF = computeFeasibleMaxVF(TC, UserVF);
5644   assert(!MaxVF.isScalable() &&
5645          "Scalable vectors do not yet support tail folding");
5646   assert((UserVF.isNonZero() || isPowerOf2_32(MaxVF.getFixedValue())) &&
5647          "MaxVF must be a power of 2");
5648   unsigned MaxVFtimesIC =
5649       UserIC ? MaxVF.getFixedValue() * UserIC : MaxVF.getFixedValue();
5650   // Avoid tail folding if the trip count is known to be a multiple of any VF we
5651   // chose.
5652   ScalarEvolution *SE = PSE.getSE();
5653   const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount();
5654   const SCEV *ExitCount = SE->getAddExpr(
5655       BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType()));
5656   const SCEV *Rem = SE->getURemExpr(
5657       SE->applyLoopGuards(ExitCount, TheLoop),
5658       SE->getConstant(BackedgeTakenCount->getType(), MaxVFtimesIC));
5659   if (Rem->isZero()) {
5660     // Accept MaxVF if we do not have a tail.
5661     LLVM_DEBUG(dbgs() << "LV: No tail will remain for any chosen VF.\n");
5662     return MaxVF;
5663   }
5664 
5665   // If we don't know the precise trip count, or if the trip count that we
5666   // found modulo the vectorization factor is not zero, try to fold the tail
5667   // by masking.
5668   // FIXME: look for a smaller MaxVF that does divide TC rather than masking.
5669   if (Legal->prepareToFoldTailByMasking()) {
5670     FoldTailByMasking = true;
5671     return MaxVF;
5672   }
5673 
5674   // If there was a tail-folding hint/switch, but we can't fold the tail by
5675   // masking, fallback to a vectorization with a scalar epilogue.
5676   if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) {
5677     LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a "
5678                          "scalar epilogue instead.\n");
5679     ScalarEpilogueStatus = CM_ScalarEpilogueAllowed;
5680     return MaxVF;
5681   }
5682 
5683   if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedUsePredicate) {
5684     LLVM_DEBUG(dbgs() << "LV: Can't fold tail by masking: don't vectorize\n");
5685     return None;
5686   }
5687 
5688   if (TC == 0) {
5689     reportVectorizationFailure(
5690         "Unable to calculate the loop count due to complex control flow",
5691         "unable to calculate the loop count due to complex control flow",
5692         "UnknownLoopCountComplexCFG", ORE, TheLoop);
5693     return None;
5694   }
5695 
5696   reportVectorizationFailure(
5697       "Cannot optimize for size and vectorize at the same time.",
5698       "cannot optimize for size and vectorize at the same time. "
5699       "Enable vectorization of this loop with '#pragma clang loop "
5700       "vectorize(enable)' when compiling with -Os/-Oz",
5701       "NoTailLoopWithOptForSize", ORE, TheLoop);
5702   return None;
5703 }
5704 
5705 ElementCount
5706 LoopVectorizationCostModel::computeFeasibleMaxVF(unsigned ConstTripCount,
5707                                                  ElementCount UserVF) {
5708   bool IgnoreScalableUserVF = UserVF.isScalable() &&
5709                               !TTI.supportsScalableVectors() &&
5710                               !ForceTargetSupportsScalableVectors;
5711   if (IgnoreScalableUserVF) {
5712     LLVM_DEBUG(
5713         dbgs() << "LV: Ignoring VF=" << UserVF
5714                << " because target does not support scalable vectors.\n");
5715     ORE->emit([&]() {
5716       return OptimizationRemarkAnalysis(DEBUG_TYPE, "IgnoreScalableUserVF",
5717                                         TheLoop->getStartLoc(),
5718                                         TheLoop->getHeader())
5719              << "Ignoring VF=" << ore::NV("UserVF", UserVF)
5720              << " because target does not support scalable vectors.";
5721     });
5722   }
5723 
5724   // Beyond this point two scenarios are handled. If UserVF isn't specified
5725   // then a suitable VF is chosen. If UserVF is specified and there are
5726   // dependencies, check if it's legal. However, if a UserVF is specified and
5727   // there are no dependencies, then there's nothing to do.
5728   if (UserVF.isNonZero() && !IgnoreScalableUserVF) {
5729     if (!canVectorizeReductions(UserVF)) {
5730       reportVectorizationFailure(
5731           "LV: Scalable vectorization not supported for the reduction "
5732           "operations found in this loop. Using fixed-width "
5733           "vectorization instead.",
5734           "Scalable vectorization not supported for the reduction operations "
5735           "found in this loop. Using fixed-width vectorization instead.",
5736           "ScalableVFUnfeasible", ORE, TheLoop);
5737       return computeFeasibleMaxVF(
5738           ConstTripCount, ElementCount::getFixed(UserVF.getKnownMinValue()));
5739     }
5740 
5741     if (Legal->isSafeForAnyVectorWidth())
5742       return UserVF;
5743   }
5744 
5745   MinBWs = computeMinimumValueSizes(TheLoop->getBlocks(), *DB, &TTI);
5746   unsigned SmallestType, WidestType;
5747   std::tie(SmallestType, WidestType) = getSmallestAndWidestTypes();
5748   unsigned WidestRegister =
5749       TTI.getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector)
5750           .getFixedSize();
5751 
5752   // Get the maximum safe dependence distance in bits computed by LAA.
5753   // It is computed by MaxVF * sizeOf(type) * 8, where type is taken from
5754   // the memory accesses that is most restrictive (involved in the smallest
5755   // dependence distance).
5756   unsigned MaxSafeVectorWidthInBits = Legal->getMaxSafeVectorWidthInBits();
5757 
5758   // If the user vectorization factor is legally unsafe, clamp it to a safe
5759   // value. Otherwise, return as is.
5760   if (UserVF.isNonZero() && !IgnoreScalableUserVF) {
5761     unsigned MaxSafeElements =
5762         PowerOf2Floor(MaxSafeVectorWidthInBits / WidestType);
5763     ElementCount MaxSafeVF = ElementCount::getFixed(MaxSafeElements);
5764 
5765     if (UserVF.isScalable()) {
5766       Optional<unsigned> MaxVScale = TTI.getMaxVScale();
5767 
5768       // Scale VF by vscale before checking if it's safe.
5769       MaxSafeVF = ElementCount::getScalable(
5770           MaxVScale ? (MaxSafeElements / MaxVScale.getValue()) : 0);
5771 
5772       if (MaxSafeVF.isZero()) {
5773         // The dependence distance is too small to use scalable vectors,
5774         // fallback on fixed.
5775         LLVM_DEBUG(
5776             dbgs()
5777             << "LV: Max legal vector width too small, scalable vectorization "
5778                "unfeasible. Using fixed-width vectorization instead.\n");
5779         ORE->emit([&]() {
5780           return OptimizationRemarkAnalysis(DEBUG_TYPE, "ScalableVFUnfeasible",
5781                                             TheLoop->getStartLoc(),
5782                                             TheLoop->getHeader())
5783                  << "Max legal vector width too small, scalable vectorization "
5784                  << "unfeasible. Using fixed-width vectorization instead.";
5785         });
5786         return computeFeasibleMaxVF(
5787             ConstTripCount, ElementCount::getFixed(UserVF.getKnownMinValue()));
5788       }
5789     }
5790 
5791     LLVM_DEBUG(dbgs() << "LV: The max safe VF is: " << MaxSafeVF << ".\n");
5792 
5793     if (ElementCount::isKnownLE(UserVF, MaxSafeVF))
5794       return UserVF;
5795 
5796     LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF
5797                       << " is unsafe, clamping to max safe VF=" << MaxSafeVF
5798                       << ".\n");
5799     ORE->emit([&]() {
5800       return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor",
5801                                         TheLoop->getStartLoc(),
5802                                         TheLoop->getHeader())
5803              << "User-specified vectorization factor "
5804              << ore::NV("UserVectorizationFactor", UserVF)
5805              << " is unsafe, clamping to maximum safe vectorization factor "
5806              << ore::NV("VectorizationFactor", MaxSafeVF);
5807     });
5808     return MaxSafeVF;
5809   }
5810 
5811   WidestRegister = std::min(WidestRegister, MaxSafeVectorWidthInBits);
5812 
5813   // Ensure MaxVF is a power of 2; the dependence distance bound may not be.
5814   // Note that both WidestRegister and WidestType may not be a powers of 2.
5815   auto MaxVectorSize =
5816       ElementCount::getFixed(PowerOf2Floor(WidestRegister / WidestType));
5817 
5818   LLVM_DEBUG(dbgs() << "LV: The Smallest and Widest types: " << SmallestType
5819                     << " / " << WidestType << " bits.\n");
5820   LLVM_DEBUG(dbgs() << "LV: The Widest register safe to use is: "
5821                     << WidestRegister << " bits.\n");
5822 
5823   assert(MaxVectorSize.getFixedValue() <= WidestRegister &&
5824          "Did not expect to pack so many elements"
5825          " into one vector!");
5826   if (MaxVectorSize.getFixedValue() == 0) {
5827     LLVM_DEBUG(dbgs() << "LV: The target has no vector registers.\n");
5828     return ElementCount::getFixed(1);
5829   } else if (ConstTripCount && ConstTripCount < MaxVectorSize.getFixedValue() &&
5830              isPowerOf2_32(ConstTripCount)) {
5831     // We need to clamp the VF to be the ConstTripCount. There is no point in
5832     // choosing a higher viable VF as done in the loop below.
5833     LLVM_DEBUG(dbgs() << "LV: Clamping the MaxVF to the constant trip count: "
5834                       << ConstTripCount << "\n");
5835     return ElementCount::getFixed(ConstTripCount);
5836   }
5837 
5838   ElementCount MaxVF = MaxVectorSize;
5839   if (TTI.shouldMaximizeVectorBandwidth(!isScalarEpilogueAllowed()) ||
5840       (MaximizeBandwidth && isScalarEpilogueAllowed())) {
5841     // Collect all viable vectorization factors larger than the default MaxVF
5842     // (i.e. MaxVectorSize).
5843     SmallVector<ElementCount, 8> VFs;
5844     auto MaxVectorSizeMaxBW =
5845         ElementCount::getFixed(WidestRegister / SmallestType);
5846     for (ElementCount VS = MaxVectorSize * 2;
5847          ElementCount::isKnownLE(VS, MaxVectorSizeMaxBW); VS *= 2)
5848       VFs.push_back(VS);
5849 
5850     // For each VF calculate its register usage.
5851     auto RUs = calculateRegisterUsage(VFs);
5852 
5853     // Select the largest VF which doesn't require more registers than existing
5854     // ones.
5855     for (int i = RUs.size() - 1; i >= 0; --i) {
5856       bool Selected = true;
5857       for (auto &pair : RUs[i].MaxLocalUsers) {
5858         unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first);
5859         if (pair.second > TargetNumRegisters)
5860           Selected = false;
5861       }
5862       if (Selected) {
5863         MaxVF = VFs[i];
5864         break;
5865       }
5866     }
5867     if (ElementCount MinVF =
5868             TTI.getMinimumVF(SmallestType, /*IsScalable=*/false)) {
5869       if (ElementCount::isKnownLT(MaxVF, MinVF)) {
5870         LLVM_DEBUG(dbgs() << "LV: Overriding calculated MaxVF(" << MaxVF
5871                           << ") with target's minimum: " << MinVF << '\n');
5872         MaxVF = MinVF;
5873       }
5874     }
5875   }
5876   return MaxVF;
5877 }
5878 
5879 VectorizationFactor
5880 LoopVectorizationCostModel::selectVectorizationFactor(ElementCount MaxVF) {
5881   // FIXME: This can be fixed for scalable vectors later, because at this stage
5882   // the LoopVectorizer will only consider vectorizing a loop with scalable
5883   // vectors when the loop has a hint to enable vectorization for a given VF.
5884   assert(!MaxVF.isScalable() && "scalable vectors not yet supported");
5885 
5886   InstructionCost ExpectedCost = expectedCost(ElementCount::getFixed(1)).first;
5887   LLVM_DEBUG(dbgs() << "LV: Scalar loop costs: " << ExpectedCost << ".\n");
5888   assert(ExpectedCost.isValid() && "Unexpected invalid cost for scalar loop");
5889 
5890   auto Width = ElementCount::getFixed(1);
5891   const float ScalarCost = *ExpectedCost.getValue();
5892   float Cost = ScalarCost;
5893 
5894   bool ForceVectorization = Hints->getForce() == LoopVectorizeHints::FK_Enabled;
5895   if (ForceVectorization && MaxVF.isVector()) {
5896     // Ignore scalar width, because the user explicitly wants vectorization.
5897     // Initialize cost to max so that VF = 2 is, at least, chosen during cost
5898     // evaluation.
5899     Cost = std::numeric_limits<float>::max();
5900   }
5901 
5902   for (auto i = ElementCount::getFixed(2); ElementCount::isKnownLE(i, MaxVF);
5903        i *= 2) {
5904     // Notice that the vector loop needs to be executed less times, so
5905     // we need to divide the cost of the vector loops by the width of
5906     // the vector elements.
5907     VectorizationCostTy C = expectedCost(i);
5908     assert(C.first.isValid() && "Unexpected invalid cost for vector loop");
5909     float VectorCost = *C.first.getValue() / (float)i.getFixedValue();
5910     LLVM_DEBUG(dbgs() << "LV: Vector loop of width " << i
5911                       << " costs: " << (int)VectorCost << ".\n");
5912     if (!C.second && !ForceVectorization) {
5913       LLVM_DEBUG(
5914           dbgs() << "LV: Not considering vector loop of width " << i
5915                  << " because it will not generate any vector instructions.\n");
5916       continue;
5917     }
5918 
5919     // If profitable add it to ProfitableVF list.
5920     if (VectorCost < ScalarCost) {
5921       ProfitableVFs.push_back(VectorizationFactor(
5922           {i, (unsigned)VectorCost}));
5923     }
5924 
5925     if (VectorCost < Cost) {
5926       Cost = VectorCost;
5927       Width = i;
5928     }
5929   }
5930 
5931   if (!EnableCondStoresVectorization && NumPredStores) {
5932     reportVectorizationFailure("There are conditional stores.",
5933         "store that is conditionally executed prevents vectorization",
5934         "ConditionalStore", ORE, TheLoop);
5935     Width = ElementCount::getFixed(1);
5936     Cost = ScalarCost;
5937   }
5938 
5939   LLVM_DEBUG(if (ForceVectorization && !Width.isScalar() && Cost >= ScalarCost) dbgs()
5940              << "LV: Vectorization seems to be not beneficial, "
5941              << "but was forced by a user.\n");
5942   LLVM_DEBUG(dbgs() << "LV: Selecting VF: " << Width << ".\n");
5943   VectorizationFactor Factor = {Width,
5944                                 (unsigned)(Width.getKnownMinValue() * Cost)};
5945   return Factor;
5946 }
5947 
5948 bool LoopVectorizationCostModel::isCandidateForEpilogueVectorization(
5949     const Loop &L, ElementCount VF) const {
5950   // Cross iteration phis such as reductions need special handling and are
5951   // currently unsupported.
5952   if (any_of(L.getHeader()->phis(), [&](PHINode &Phi) {
5953         return Legal->isFirstOrderRecurrence(&Phi) ||
5954                Legal->isReductionVariable(&Phi);
5955       }))
5956     return false;
5957 
5958   // Phis with uses outside of the loop require special handling and are
5959   // currently unsupported.
5960   for (auto &Entry : Legal->getInductionVars()) {
5961     // Look for uses of the value of the induction at the last iteration.
5962     Value *PostInc = Entry.first->getIncomingValueForBlock(L.getLoopLatch());
5963     for (User *U : PostInc->users())
5964       if (!L.contains(cast<Instruction>(U)))
5965         return false;
5966     // Look for uses of penultimate value of the induction.
5967     for (User *U : Entry.first->users())
5968       if (!L.contains(cast<Instruction>(U)))
5969         return false;
5970   }
5971 
5972   // Induction variables that are widened require special handling that is
5973   // currently not supported.
5974   if (any_of(Legal->getInductionVars(), [&](auto &Entry) {
5975         return !(this->isScalarAfterVectorization(Entry.first, VF) ||
5976                  this->isProfitableToScalarize(Entry.first, VF));
5977       }))
5978     return false;
5979 
5980   return true;
5981 }
5982 
5983 bool LoopVectorizationCostModel::isEpilogueVectorizationProfitable(
5984     const ElementCount VF) const {
5985   // FIXME: We need a much better cost-model to take different parameters such
5986   // as register pressure, code size increase and cost of extra branches into
5987   // account. For now we apply a very crude heuristic and only consider loops
5988   // with vectorization factors larger than a certain value.
5989   // We also consider epilogue vectorization unprofitable for targets that don't
5990   // consider interleaving beneficial (eg. MVE).
5991   if (TTI.getMaxInterleaveFactor(VF.getKnownMinValue()) <= 1)
5992     return false;
5993   if (VF.getFixedValue() >= EpilogueVectorizationMinVF)
5994     return true;
5995   return false;
5996 }
5997 
5998 VectorizationFactor
5999 LoopVectorizationCostModel::selectEpilogueVectorizationFactor(
6000     const ElementCount MainLoopVF, const LoopVectorizationPlanner &LVP) {
6001   VectorizationFactor Result = VectorizationFactor::Disabled();
6002   if (!EnableEpilogueVectorization) {
6003     LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization is disabled.\n";);
6004     return Result;
6005   }
6006 
6007   if (!isScalarEpilogueAllowed()) {
6008     LLVM_DEBUG(
6009         dbgs() << "LEV: Unable to vectorize epilogue because no epilogue is "
6010                   "allowed.\n";);
6011     return Result;
6012   }
6013 
6014   // FIXME: This can be fixed for scalable vectors later, because at this stage
6015   // the LoopVectorizer will only consider vectorizing a loop with scalable
6016   // vectors when the loop has a hint to enable vectorization for a given VF.
6017   if (MainLoopVF.isScalable()) {
6018     LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization for scalable vectors not "
6019                          "yet supported.\n");
6020     return Result;
6021   }
6022 
6023   // Not really a cost consideration, but check for unsupported cases here to
6024   // simplify the logic.
6025   if (!isCandidateForEpilogueVectorization(*TheLoop, MainLoopVF)) {
6026     LLVM_DEBUG(
6027         dbgs() << "LEV: Unable to vectorize epilogue because the loop is "
6028                   "not a supported candidate.\n";);
6029     return Result;
6030   }
6031 
6032   if (EpilogueVectorizationForceVF > 1) {
6033     LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization factor is forced.\n";);
6034     if (LVP.hasPlanWithVFs(
6035             {MainLoopVF, ElementCount::getFixed(EpilogueVectorizationForceVF)}))
6036       return {ElementCount::getFixed(EpilogueVectorizationForceVF), 0};
6037     else {
6038       LLVM_DEBUG(
6039           dbgs()
6040               << "LEV: Epilogue vectorization forced factor is not viable.\n";);
6041       return Result;
6042     }
6043   }
6044 
6045   if (TheLoop->getHeader()->getParent()->hasOptSize() ||
6046       TheLoop->getHeader()->getParent()->hasMinSize()) {
6047     LLVM_DEBUG(
6048         dbgs()
6049             << "LEV: Epilogue vectorization skipped due to opt for size.\n";);
6050     return Result;
6051   }
6052 
6053   if (!isEpilogueVectorizationProfitable(MainLoopVF))
6054     return Result;
6055 
6056   for (auto &NextVF : ProfitableVFs)
6057     if (ElementCount::isKnownLT(NextVF.Width, MainLoopVF) &&
6058         (Result.Width.getFixedValue() == 1 || NextVF.Cost < Result.Cost) &&
6059         LVP.hasPlanWithVFs({MainLoopVF, NextVF.Width}))
6060       Result = NextVF;
6061 
6062   if (Result != VectorizationFactor::Disabled())
6063     LLVM_DEBUG(dbgs() << "LEV: Vectorizing epilogue loop with VF = "
6064                       << Result.Width.getFixedValue() << "\n";);
6065   return Result;
6066 }
6067 
6068 std::pair<unsigned, unsigned>
6069 LoopVectorizationCostModel::getSmallestAndWidestTypes() {
6070   unsigned MinWidth = -1U;
6071   unsigned MaxWidth = 8;
6072   const DataLayout &DL = TheFunction->getParent()->getDataLayout();
6073 
6074   // For each block.
6075   for (BasicBlock *BB : TheLoop->blocks()) {
6076     // For each instruction in the loop.
6077     for (Instruction &I : BB->instructionsWithoutDebug()) {
6078       Type *T = I.getType();
6079 
6080       // Skip ignored values.
6081       if (ValuesToIgnore.count(&I))
6082         continue;
6083 
6084       // Only examine Loads, Stores and PHINodes.
6085       if (!isa<LoadInst>(I) && !isa<StoreInst>(I) && !isa<PHINode>(I))
6086         continue;
6087 
6088       // Examine PHI nodes that are reduction variables. Update the type to
6089       // account for the recurrence type.
6090       if (auto *PN = dyn_cast<PHINode>(&I)) {
6091         if (!Legal->isReductionVariable(PN))
6092           continue;
6093         RecurrenceDescriptor RdxDesc = Legal->getReductionVars()[PN];
6094         if (PreferInLoopReductions || useOrderedReductions(RdxDesc) ||
6095             TTI.preferInLoopReduction(RdxDesc.getOpcode(),
6096                                       RdxDesc.getRecurrenceType(),
6097                                       TargetTransformInfo::ReductionFlags()))
6098           continue;
6099         T = RdxDesc.getRecurrenceType();
6100       }
6101 
6102       // Examine the stored values.
6103       if (auto *ST = dyn_cast<StoreInst>(&I))
6104         T = ST->getValueOperand()->getType();
6105 
6106       // Ignore loaded pointer types and stored pointer types that are not
6107       // vectorizable.
6108       //
6109       // FIXME: The check here attempts to predict whether a load or store will
6110       //        be vectorized. We only know this for certain after a VF has
6111       //        been selected. Here, we assume that if an access can be
6112       //        vectorized, it will be. We should also look at extending this
6113       //        optimization to non-pointer types.
6114       //
6115       if (T->isPointerTy() && !isConsecutiveLoadOrStore(&I) &&
6116           !isAccessInterleaved(&I) && !isLegalGatherOrScatter(&I))
6117         continue;
6118 
6119       MinWidth = std::min(MinWidth,
6120                           (unsigned)DL.getTypeSizeInBits(T->getScalarType()));
6121       MaxWidth = std::max(MaxWidth,
6122                           (unsigned)DL.getTypeSizeInBits(T->getScalarType()));
6123     }
6124   }
6125 
6126   return {MinWidth, MaxWidth};
6127 }
6128 
6129 unsigned LoopVectorizationCostModel::selectInterleaveCount(ElementCount VF,
6130                                                            unsigned LoopCost) {
6131   // -- The interleave heuristics --
6132   // We interleave the loop in order to expose ILP and reduce the loop overhead.
6133   // There are many micro-architectural considerations that we can't predict
6134   // at this level. For example, frontend pressure (on decode or fetch) due to
6135   // code size, or the number and capabilities of the execution ports.
6136   //
6137   // We use the following heuristics to select the interleave count:
6138   // 1. If the code has reductions, then we interleave to break the cross
6139   // iteration dependency.
6140   // 2. If the loop is really small, then we interleave to reduce the loop
6141   // overhead.
6142   // 3. We don't interleave if we think that we will spill registers to memory
6143   // due to the increased register pressure.
6144 
6145   if (!isScalarEpilogueAllowed())
6146     return 1;
6147 
6148   // We used the distance for the interleave count.
6149   if (Legal->getMaxSafeDepDistBytes() != -1U)
6150     return 1;
6151 
6152   auto BestKnownTC = getSmallBestKnownTC(*PSE.getSE(), TheLoop);
6153   const bool HasReductions = !Legal->getReductionVars().empty();
6154   // Do not interleave loops with a relatively small known or estimated trip
6155   // count. But we will interleave when InterleaveSmallLoopScalarReduction is
6156   // enabled, and the code has scalar reductions(HasReductions && VF = 1),
6157   // because with the above conditions interleaving can expose ILP and break
6158   // cross iteration dependences for reductions.
6159   if (BestKnownTC && (*BestKnownTC < TinyTripCountInterleaveThreshold) &&
6160       !(InterleaveSmallLoopScalarReduction && HasReductions && VF.isScalar()))
6161     return 1;
6162 
6163   RegisterUsage R = calculateRegisterUsage({VF})[0];
6164   // We divide by these constants so assume that we have at least one
6165   // instruction that uses at least one register.
6166   for (auto& pair : R.MaxLocalUsers) {
6167     pair.second = std::max(pair.second, 1U);
6168   }
6169 
6170   // We calculate the interleave count using the following formula.
6171   // Subtract the number of loop invariants from the number of available
6172   // registers. These registers are used by all of the interleaved instances.
6173   // Next, divide the remaining registers by the number of registers that is
6174   // required by the loop, in order to estimate how many parallel instances
6175   // fit without causing spills. All of this is rounded down if necessary to be
6176   // a power of two. We want power of two interleave count to simplify any
6177   // addressing operations or alignment considerations.
6178   // We also want power of two interleave counts to ensure that the induction
6179   // variable of the vector loop wraps to zero, when tail is folded by masking;
6180   // this currently happens when OptForSize, in which case IC is set to 1 above.
6181   unsigned IC = UINT_MAX;
6182 
6183   for (auto& pair : R.MaxLocalUsers) {
6184     unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first);
6185     LLVM_DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters
6186                       << " registers of "
6187                       << TTI.getRegisterClassName(pair.first) << " register class\n");
6188     if (VF.isScalar()) {
6189       if (ForceTargetNumScalarRegs.getNumOccurrences() > 0)
6190         TargetNumRegisters = ForceTargetNumScalarRegs;
6191     } else {
6192       if (ForceTargetNumVectorRegs.getNumOccurrences() > 0)
6193         TargetNumRegisters = ForceTargetNumVectorRegs;
6194     }
6195     unsigned MaxLocalUsers = pair.second;
6196     unsigned LoopInvariantRegs = 0;
6197     if (R.LoopInvariantRegs.find(pair.first) != R.LoopInvariantRegs.end())
6198       LoopInvariantRegs = R.LoopInvariantRegs[pair.first];
6199 
6200     unsigned TmpIC = PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs) / MaxLocalUsers);
6201     // Don't count the induction variable as interleaved.
6202     if (EnableIndVarRegisterHeur) {
6203       TmpIC =
6204           PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs - 1) /
6205                         std::max(1U, (MaxLocalUsers - 1)));
6206     }
6207 
6208     IC = std::min(IC, TmpIC);
6209   }
6210 
6211   // Clamp the interleave ranges to reasonable counts.
6212   unsigned MaxInterleaveCount =
6213       TTI.getMaxInterleaveFactor(VF.getKnownMinValue());
6214 
6215   // Check if the user has overridden the max.
6216   if (VF.isScalar()) {
6217     if (ForceTargetMaxScalarInterleaveFactor.getNumOccurrences() > 0)
6218       MaxInterleaveCount = ForceTargetMaxScalarInterleaveFactor;
6219   } else {
6220     if (ForceTargetMaxVectorInterleaveFactor.getNumOccurrences() > 0)
6221       MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor;
6222   }
6223 
6224   // If trip count is known or estimated compile time constant, limit the
6225   // interleave count to be less than the trip count divided by VF, provided it
6226   // is at least 1.
6227   //
6228   // For scalable vectors we can't know if interleaving is beneficial. It may
6229   // not be beneficial for small loops if none of the lanes in the second vector
6230   // iterations is enabled. However, for larger loops, there is likely to be a
6231   // similar benefit as for fixed-width vectors. For now, we choose to leave
6232   // the InterleaveCount as if vscale is '1', although if some information about
6233   // the vector is known (e.g. min vector size), we can make a better decision.
6234   if (BestKnownTC) {
6235     MaxInterleaveCount =
6236         std::min(*BestKnownTC / VF.getKnownMinValue(), MaxInterleaveCount);
6237     // Make sure MaxInterleaveCount is greater than 0.
6238     MaxInterleaveCount = std::max(1u, MaxInterleaveCount);
6239   }
6240 
6241   assert(MaxInterleaveCount > 0 &&
6242          "Maximum interleave count must be greater than 0");
6243 
6244   // Clamp the calculated IC to be between the 1 and the max interleave count
6245   // that the target and trip count allows.
6246   if (IC > MaxInterleaveCount)
6247     IC = MaxInterleaveCount;
6248   else
6249     // Make sure IC is greater than 0.
6250     IC = std::max(1u, IC);
6251 
6252   assert(IC > 0 && "Interleave count must be greater than 0.");
6253 
6254   // If we did not calculate the cost for VF (because the user selected the VF)
6255   // then we calculate the cost of VF here.
6256   if (LoopCost == 0) {
6257     assert(expectedCost(VF).first.isValid() && "Expected a valid cost");
6258     LoopCost = *expectedCost(VF).first.getValue();
6259   }
6260 
6261   assert(LoopCost && "Non-zero loop cost expected");
6262 
6263   // Interleave if we vectorized this loop and there is a reduction that could
6264   // benefit from interleaving.
6265   if (VF.isVector() && HasReductions) {
6266     LLVM_DEBUG(dbgs() << "LV: Interleaving because of reductions.\n");
6267     return IC;
6268   }
6269 
6270   // Note that if we've already vectorized the loop we will have done the
6271   // runtime check and so interleaving won't require further checks.
6272   bool InterleavingRequiresRuntimePointerCheck =
6273       (VF.isScalar() && Legal->getRuntimePointerChecking()->Need);
6274 
6275   // We want to interleave small loops in order to reduce the loop overhead and
6276   // potentially expose ILP opportunities.
6277   LLVM_DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n'
6278                     << "LV: IC is " << IC << '\n'
6279                     << "LV: VF is " << VF << '\n');
6280   const bool AggressivelyInterleaveReductions =
6281       TTI.enableAggressiveInterleaving(HasReductions);
6282   if (!InterleavingRequiresRuntimePointerCheck && LoopCost < SmallLoopCost) {
6283     // We assume that the cost overhead is 1 and we use the cost model
6284     // to estimate the cost of the loop and interleave until the cost of the
6285     // loop overhead is about 5% of the cost of the loop.
6286     unsigned SmallIC =
6287         std::min(IC, (unsigned)PowerOf2Floor(SmallLoopCost / LoopCost));
6288 
6289     // Interleave until store/load ports (estimated by max interleave count) are
6290     // saturated.
6291     unsigned NumStores = Legal->getNumStores();
6292     unsigned NumLoads = Legal->getNumLoads();
6293     unsigned StoresIC = IC / (NumStores ? NumStores : 1);
6294     unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1);
6295 
6296     // If we have a scalar reduction (vector reductions are already dealt with
6297     // by this point), we can increase the critical path length if the loop
6298     // we're interleaving is inside another loop. Limit, by default to 2, so the
6299     // critical path only gets increased by one reduction operation.
6300     if (HasReductions && TheLoop->getLoopDepth() > 1) {
6301       unsigned F = static_cast<unsigned>(MaxNestedScalarReductionIC);
6302       SmallIC = std::min(SmallIC, F);
6303       StoresIC = std::min(StoresIC, F);
6304       LoadsIC = std::min(LoadsIC, F);
6305     }
6306 
6307     if (EnableLoadStoreRuntimeInterleave &&
6308         std::max(StoresIC, LoadsIC) > SmallIC) {
6309       LLVM_DEBUG(
6310           dbgs() << "LV: Interleaving to saturate store or load ports.\n");
6311       return std::max(StoresIC, LoadsIC);
6312     }
6313 
6314     // If there are scalar reductions and TTI has enabled aggressive
6315     // interleaving for reductions, we will interleave to expose ILP.
6316     if (InterleaveSmallLoopScalarReduction && VF.isScalar() &&
6317         AggressivelyInterleaveReductions) {
6318       LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n");
6319       // Interleave no less than SmallIC but not as aggressive as the normal IC
6320       // to satisfy the rare situation when resources are too limited.
6321       return std::max(IC / 2, SmallIC);
6322     } else {
6323       LLVM_DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n");
6324       return SmallIC;
6325     }
6326   }
6327 
6328   // Interleave if this is a large loop (small loops are already dealt with by
6329   // this point) that could benefit from interleaving.
6330   if (AggressivelyInterleaveReductions) {
6331     LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n");
6332     return IC;
6333   }
6334 
6335   LLVM_DEBUG(dbgs() << "LV: Not Interleaving.\n");
6336   return 1;
6337 }
6338 
6339 SmallVector<LoopVectorizationCostModel::RegisterUsage, 8>
6340 LoopVectorizationCostModel::calculateRegisterUsage(ArrayRef<ElementCount> VFs) {
6341   // This function calculates the register usage by measuring the highest number
6342   // of values that are alive at a single location. Obviously, this is a very
6343   // rough estimation. We scan the loop in a topological order in order and
6344   // assign a number to each instruction. We use RPO to ensure that defs are
6345   // met before their users. We assume that each instruction that has in-loop
6346   // users starts an interval. We record every time that an in-loop value is
6347   // used, so we have a list of the first and last occurrences of each
6348   // instruction. Next, we transpose this data structure into a multi map that
6349   // holds the list of intervals that *end* at a specific location. This multi
6350   // map allows us to perform a linear search. We scan the instructions linearly
6351   // and record each time that a new interval starts, by placing it in a set.
6352   // If we find this value in the multi-map then we remove it from the set.
6353   // The max register usage is the maximum size of the set.
6354   // We also search for instructions that are defined outside the loop, but are
6355   // used inside the loop. We need this number separately from the max-interval
6356   // usage number because when we unroll, loop-invariant values do not take
6357   // more register.
6358   LoopBlocksDFS DFS(TheLoop);
6359   DFS.perform(LI);
6360 
6361   RegisterUsage RU;
6362 
6363   // Each 'key' in the map opens a new interval. The values
6364   // of the map are the index of the 'last seen' usage of the
6365   // instruction that is the key.
6366   using IntervalMap = DenseMap<Instruction *, unsigned>;
6367 
6368   // Maps instruction to its index.
6369   SmallVector<Instruction *, 64> IdxToInstr;
6370   // Marks the end of each interval.
6371   IntervalMap EndPoint;
6372   // Saves the list of instruction indices that are used in the loop.
6373   SmallPtrSet<Instruction *, 8> Ends;
6374   // Saves the list of values that are used in the loop but are
6375   // defined outside the loop, such as arguments and constants.
6376   SmallPtrSet<Value *, 8> LoopInvariants;
6377 
6378   for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) {
6379     for (Instruction &I : BB->instructionsWithoutDebug()) {
6380       IdxToInstr.push_back(&I);
6381 
6382       // Save the end location of each USE.
6383       for (Value *U : I.operands()) {
6384         auto *Instr = dyn_cast<Instruction>(U);
6385 
6386         // Ignore non-instruction values such as arguments, constants, etc.
6387         if (!Instr)
6388           continue;
6389 
6390         // If this instruction is outside the loop then record it and continue.
6391         if (!TheLoop->contains(Instr)) {
6392           LoopInvariants.insert(Instr);
6393           continue;
6394         }
6395 
6396         // Overwrite previous end points.
6397         EndPoint[Instr] = IdxToInstr.size();
6398         Ends.insert(Instr);
6399       }
6400     }
6401   }
6402 
6403   // Saves the list of intervals that end with the index in 'key'.
6404   using InstrList = SmallVector<Instruction *, 2>;
6405   DenseMap<unsigned, InstrList> TransposeEnds;
6406 
6407   // Transpose the EndPoints to a list of values that end at each index.
6408   for (auto &Interval : EndPoint)
6409     TransposeEnds[Interval.second].push_back(Interval.first);
6410 
6411   SmallPtrSet<Instruction *, 8> OpenIntervals;
6412   SmallVector<RegisterUsage, 8> RUs(VFs.size());
6413   SmallVector<SmallMapVector<unsigned, unsigned, 4>, 8> MaxUsages(VFs.size());
6414 
6415   LLVM_DEBUG(dbgs() << "LV(REG): Calculating max register usage:\n");
6416 
6417   // A lambda that gets the register usage for the given type and VF.
6418   const auto &TTICapture = TTI;
6419   auto GetRegUsage = [&TTICapture](Type *Ty, ElementCount VF) {
6420     if (Ty->isTokenTy() || !VectorType::isValidElementType(Ty))
6421       return 0U;
6422     return TTICapture.getRegUsageForType(VectorType::get(Ty, VF));
6423   };
6424 
6425   for (unsigned int i = 0, s = IdxToInstr.size(); i < s; ++i) {
6426     Instruction *I = IdxToInstr[i];
6427 
6428     // Remove all of the instructions that end at this location.
6429     InstrList &List = TransposeEnds[i];
6430     for (Instruction *ToRemove : List)
6431       OpenIntervals.erase(ToRemove);
6432 
6433     // Ignore instructions that are never used within the loop.
6434     if (!Ends.count(I))
6435       continue;
6436 
6437     // Skip ignored values.
6438     if (ValuesToIgnore.count(I))
6439       continue;
6440 
6441     // For each VF find the maximum usage of registers.
6442     for (unsigned j = 0, e = VFs.size(); j < e; ++j) {
6443       // Count the number of live intervals.
6444       SmallMapVector<unsigned, unsigned, 4> RegUsage;
6445 
6446       if (VFs[j].isScalar()) {
6447         for (auto Inst : OpenIntervals) {
6448           unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType());
6449           if (RegUsage.find(ClassID) == RegUsage.end())
6450             RegUsage[ClassID] = 1;
6451           else
6452             RegUsage[ClassID] += 1;
6453         }
6454       } else {
6455         collectUniformsAndScalars(VFs[j]);
6456         for (auto Inst : OpenIntervals) {
6457           // Skip ignored values for VF > 1.
6458           if (VecValuesToIgnore.count(Inst))
6459             continue;
6460           if (isScalarAfterVectorization(Inst, VFs[j])) {
6461             unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType());
6462             if (RegUsage.find(ClassID) == RegUsage.end())
6463               RegUsage[ClassID] = 1;
6464             else
6465               RegUsage[ClassID] += 1;
6466           } else {
6467             unsigned ClassID = TTI.getRegisterClassForType(true, Inst->getType());
6468             if (RegUsage.find(ClassID) == RegUsage.end())
6469               RegUsage[ClassID] = GetRegUsage(Inst->getType(), VFs[j]);
6470             else
6471               RegUsage[ClassID] += GetRegUsage(Inst->getType(), VFs[j]);
6472           }
6473         }
6474       }
6475 
6476       for (auto& pair : RegUsage) {
6477         if (MaxUsages[j].find(pair.first) != MaxUsages[j].end())
6478           MaxUsages[j][pair.first] = std::max(MaxUsages[j][pair.first], pair.second);
6479         else
6480           MaxUsages[j][pair.first] = pair.second;
6481       }
6482     }
6483 
6484     LLVM_DEBUG(dbgs() << "LV(REG): At #" << i << " Interval # "
6485                       << OpenIntervals.size() << '\n');
6486 
6487     // Add the current instruction to the list of open intervals.
6488     OpenIntervals.insert(I);
6489   }
6490 
6491   for (unsigned i = 0, e = VFs.size(); i < e; ++i) {
6492     SmallMapVector<unsigned, unsigned, 4> Invariant;
6493 
6494     for (auto Inst : LoopInvariants) {
6495       unsigned Usage =
6496           VFs[i].isScalar() ? 1 : GetRegUsage(Inst->getType(), VFs[i]);
6497       unsigned ClassID =
6498           TTI.getRegisterClassForType(VFs[i].isVector(), Inst->getType());
6499       if (Invariant.find(ClassID) == Invariant.end())
6500         Invariant[ClassID] = Usage;
6501       else
6502         Invariant[ClassID] += Usage;
6503     }
6504 
6505     LLVM_DEBUG({
6506       dbgs() << "LV(REG): VF = " << VFs[i] << '\n';
6507       dbgs() << "LV(REG): Found max usage: " << MaxUsages[i].size()
6508              << " item\n";
6509       for (const auto &pair : MaxUsages[i]) {
6510         dbgs() << "LV(REG): RegisterClass: "
6511                << TTI.getRegisterClassName(pair.first) << ", " << pair.second
6512                << " registers\n";
6513       }
6514       dbgs() << "LV(REG): Found invariant usage: " << Invariant.size()
6515              << " item\n";
6516       for (const auto &pair : Invariant) {
6517         dbgs() << "LV(REG): RegisterClass: "
6518                << TTI.getRegisterClassName(pair.first) << ", " << pair.second
6519                << " registers\n";
6520       }
6521     });
6522 
6523     RU.LoopInvariantRegs = Invariant;
6524     RU.MaxLocalUsers = MaxUsages[i];
6525     RUs[i] = RU;
6526   }
6527 
6528   return RUs;
6529 }
6530 
6531 bool LoopVectorizationCostModel::useEmulatedMaskMemRefHack(Instruction *I){
6532   // TODO: Cost model for emulated masked load/store is completely
6533   // broken. This hack guides the cost model to use an artificially
6534   // high enough value to practically disable vectorization with such
6535   // operations, except where previously deployed legality hack allowed
6536   // using very low cost values. This is to avoid regressions coming simply
6537   // from moving "masked load/store" check from legality to cost model.
6538   // Masked Load/Gather emulation was previously never allowed.
6539   // Limited number of Masked Store/Scatter emulation was allowed.
6540   assert(isPredicatedInst(I) && "Expecting a scalar emulated instruction");
6541   return isa<LoadInst>(I) ||
6542          (isa<StoreInst>(I) &&
6543           NumPredStores > NumberOfStoresToPredicate);
6544 }
6545 
6546 void LoopVectorizationCostModel::collectInstsToScalarize(ElementCount VF) {
6547   // If we aren't vectorizing the loop, or if we've already collected the
6548   // instructions to scalarize, there's nothing to do. Collection may already
6549   // have occurred if we have a user-selected VF and are now computing the
6550   // expected cost for interleaving.
6551   if (VF.isScalar() || VF.isZero() ||
6552       InstsToScalarize.find(VF) != InstsToScalarize.end())
6553     return;
6554 
6555   // Initialize a mapping for VF in InstsToScalalarize. If we find that it's
6556   // not profitable to scalarize any instructions, the presence of VF in the
6557   // map will indicate that we've analyzed it already.
6558   ScalarCostsTy &ScalarCostsVF = InstsToScalarize[VF];
6559 
6560   // Find all the instructions that are scalar with predication in the loop and
6561   // determine if it would be better to not if-convert the blocks they are in.
6562   // If so, we also record the instructions to scalarize.
6563   for (BasicBlock *BB : TheLoop->blocks()) {
6564     if (!blockNeedsPredication(BB))
6565       continue;
6566     for (Instruction &I : *BB)
6567       if (isScalarWithPredication(&I)) {
6568         ScalarCostsTy ScalarCosts;
6569         // Do not apply discount logic if hacked cost is needed
6570         // for emulated masked memrefs.
6571         if (!useEmulatedMaskMemRefHack(&I) &&
6572             computePredInstDiscount(&I, ScalarCosts, VF) >= 0)
6573           ScalarCostsVF.insert(ScalarCosts.begin(), ScalarCosts.end());
6574         // Remember that BB will remain after vectorization.
6575         PredicatedBBsAfterVectorization.insert(BB);
6576       }
6577   }
6578 }
6579 
6580 int LoopVectorizationCostModel::computePredInstDiscount(
6581     Instruction *PredInst, ScalarCostsTy &ScalarCosts, ElementCount VF) {
6582   assert(!isUniformAfterVectorization(PredInst, VF) &&
6583          "Instruction marked uniform-after-vectorization will be predicated");
6584 
6585   // Initialize the discount to zero, meaning that the scalar version and the
6586   // vector version cost the same.
6587   InstructionCost Discount = 0;
6588 
6589   // Holds instructions to analyze. The instructions we visit are mapped in
6590   // ScalarCosts. Those instructions are the ones that would be scalarized if
6591   // we find that the scalar version costs less.
6592   SmallVector<Instruction *, 8> Worklist;
6593 
6594   // Returns true if the given instruction can be scalarized.
6595   auto canBeScalarized = [&](Instruction *I) -> bool {
6596     // We only attempt to scalarize instructions forming a single-use chain
6597     // from the original predicated block that would otherwise be vectorized.
6598     // Although not strictly necessary, we give up on instructions we know will
6599     // already be scalar to avoid traversing chains that are unlikely to be
6600     // beneficial.
6601     if (!I->hasOneUse() || PredInst->getParent() != I->getParent() ||
6602         isScalarAfterVectorization(I, VF))
6603       return false;
6604 
6605     // If the instruction is scalar with predication, it will be analyzed
6606     // separately. We ignore it within the context of PredInst.
6607     if (isScalarWithPredication(I))
6608       return false;
6609 
6610     // If any of the instruction's operands are uniform after vectorization,
6611     // the instruction cannot be scalarized. This prevents, for example, a
6612     // masked load from being scalarized.
6613     //
6614     // We assume we will only emit a value for lane zero of an instruction
6615     // marked uniform after vectorization, rather than VF identical values.
6616     // Thus, if we scalarize an instruction that uses a uniform, we would
6617     // create uses of values corresponding to the lanes we aren't emitting code
6618     // for. This behavior can be changed by allowing getScalarValue to clone
6619     // the lane zero values for uniforms rather than asserting.
6620     for (Use &U : I->operands())
6621       if (auto *J = dyn_cast<Instruction>(U.get()))
6622         if (isUniformAfterVectorization(J, VF))
6623           return false;
6624 
6625     // Otherwise, we can scalarize the instruction.
6626     return true;
6627   };
6628 
6629   // Compute the expected cost discount from scalarizing the entire expression
6630   // feeding the predicated instruction. We currently only consider expressions
6631   // that are single-use instruction chains.
6632   Worklist.push_back(PredInst);
6633   while (!Worklist.empty()) {
6634     Instruction *I = Worklist.pop_back_val();
6635 
6636     // If we've already analyzed the instruction, there's nothing to do.
6637     if (ScalarCosts.find(I) != ScalarCosts.end())
6638       continue;
6639 
6640     // Compute the cost of the vector instruction. Note that this cost already
6641     // includes the scalarization overhead of the predicated instruction.
6642     InstructionCost VectorCost = getInstructionCost(I, VF).first;
6643 
6644     // Compute the cost of the scalarized instruction. This cost is the cost of
6645     // the instruction as if it wasn't if-converted and instead remained in the
6646     // predicated block. We will scale this cost by block probability after
6647     // computing the scalarization overhead.
6648     assert(!VF.isScalable() && "scalable vectors not yet supported.");
6649     InstructionCost ScalarCost =
6650         VF.getKnownMinValue() *
6651         getInstructionCost(I, ElementCount::getFixed(1)).first;
6652 
6653     // Compute the scalarization overhead of needed insertelement instructions
6654     // and phi nodes.
6655     if (isScalarWithPredication(I) && !I->getType()->isVoidTy()) {
6656       ScalarCost += TTI.getScalarizationOverhead(
6657           cast<VectorType>(ToVectorTy(I->getType(), VF)),
6658           APInt::getAllOnesValue(VF.getKnownMinValue()), true, false);
6659       assert(!VF.isScalable() && "scalable vectors not yet supported.");
6660       ScalarCost +=
6661           VF.getKnownMinValue() *
6662           TTI.getCFInstrCost(Instruction::PHI, TTI::TCK_RecipThroughput);
6663     }
6664 
6665     // Compute the scalarization overhead of needed extractelement
6666     // instructions. For each of the instruction's operands, if the operand can
6667     // be scalarized, add it to the worklist; otherwise, account for the
6668     // overhead.
6669     for (Use &U : I->operands())
6670       if (auto *J = dyn_cast<Instruction>(U.get())) {
6671         assert(VectorType::isValidElementType(J->getType()) &&
6672                "Instruction has non-scalar type");
6673         if (canBeScalarized(J))
6674           Worklist.push_back(J);
6675         else if (needsExtract(J, VF)) {
6676           assert(!VF.isScalable() && "scalable vectors not yet supported.");
6677           ScalarCost += TTI.getScalarizationOverhead(
6678               cast<VectorType>(ToVectorTy(J->getType(), VF)),
6679               APInt::getAllOnesValue(VF.getKnownMinValue()), false, true);
6680         }
6681       }
6682 
6683     // Scale the total scalar cost by block probability.
6684     ScalarCost /= getReciprocalPredBlockProb();
6685 
6686     // Compute the discount. A non-negative discount means the vector version
6687     // of the instruction costs more, and scalarizing would be beneficial.
6688     Discount += VectorCost - ScalarCost;
6689     ScalarCosts[I] = ScalarCost;
6690   }
6691 
6692   return *Discount.getValue();
6693 }
6694 
6695 LoopVectorizationCostModel::VectorizationCostTy
6696 LoopVectorizationCostModel::expectedCost(ElementCount VF) {
6697   VectorizationCostTy Cost;
6698 
6699   // For each block.
6700   for (BasicBlock *BB : TheLoop->blocks()) {
6701     VectorizationCostTy BlockCost;
6702 
6703     // For each instruction in the old loop.
6704     for (Instruction &I : BB->instructionsWithoutDebug()) {
6705       // Skip ignored values.
6706       if (ValuesToIgnore.count(&I) ||
6707           (VF.isVector() && VecValuesToIgnore.count(&I)))
6708         continue;
6709 
6710       VectorizationCostTy C = getInstructionCost(&I, VF);
6711 
6712       // Check if we should override the cost.
6713       if (ForceTargetInstructionCost.getNumOccurrences() > 0)
6714         C.first = InstructionCost(ForceTargetInstructionCost);
6715 
6716       BlockCost.first += C.first;
6717       BlockCost.second |= C.second;
6718       LLVM_DEBUG(dbgs() << "LV: Found an estimated cost of " << C.first
6719                         << " for VF " << VF << " For instruction: " << I
6720                         << '\n');
6721     }
6722 
6723     // If we are vectorizing a predicated block, it will have been
6724     // if-converted. This means that the block's instructions (aside from
6725     // stores and instructions that may divide by zero) will now be
6726     // unconditionally executed. For the scalar case, we may not always execute
6727     // the predicated block, if it is an if-else block. Thus, scale the block's
6728     // cost by the probability of executing it. blockNeedsPredication from
6729     // Legal is used so as to not include all blocks in tail folded loops.
6730     if (VF.isScalar() && Legal->blockNeedsPredication(BB))
6731       BlockCost.first /= getReciprocalPredBlockProb();
6732 
6733     Cost.first += BlockCost.first;
6734     Cost.second |= BlockCost.second;
6735   }
6736 
6737   return Cost;
6738 }
6739 
6740 /// Gets Address Access SCEV after verifying that the access pattern
6741 /// is loop invariant except the induction variable dependence.
6742 ///
6743 /// This SCEV can be sent to the Target in order to estimate the address
6744 /// calculation cost.
6745 static const SCEV *getAddressAccessSCEV(
6746               Value *Ptr,
6747               LoopVectorizationLegality *Legal,
6748               PredicatedScalarEvolution &PSE,
6749               const Loop *TheLoop) {
6750 
6751   auto *Gep = dyn_cast<GetElementPtrInst>(Ptr);
6752   if (!Gep)
6753     return nullptr;
6754 
6755   // We are looking for a gep with all loop invariant indices except for one
6756   // which should be an induction variable.
6757   auto SE = PSE.getSE();
6758   unsigned NumOperands = Gep->getNumOperands();
6759   for (unsigned i = 1; i < NumOperands; ++i) {
6760     Value *Opd = Gep->getOperand(i);
6761     if (!SE->isLoopInvariant(SE->getSCEV(Opd), TheLoop) &&
6762         !Legal->isInductionVariable(Opd))
6763       return nullptr;
6764   }
6765 
6766   // Now we know we have a GEP ptr, %inv, %ind, %inv. return the Ptr SCEV.
6767   return PSE.getSCEV(Ptr);
6768 }
6769 
6770 static bool isStrideMul(Instruction *I, LoopVectorizationLegality *Legal) {
6771   return Legal->hasStride(I->getOperand(0)) ||
6772          Legal->hasStride(I->getOperand(1));
6773 }
6774 
6775 InstructionCost
6776 LoopVectorizationCostModel::getMemInstScalarizationCost(Instruction *I,
6777                                                         ElementCount VF) {
6778   assert(VF.isVector() &&
6779          "Scalarization cost of instruction implies vectorization.");
6780   assert(!VF.isScalable() && "scalable vectors not yet supported.");
6781   Type *ValTy = getMemInstValueType(I);
6782   auto SE = PSE.getSE();
6783 
6784   unsigned AS = getLoadStoreAddressSpace(I);
6785   Value *Ptr = getLoadStorePointerOperand(I);
6786   Type *PtrTy = ToVectorTy(Ptr->getType(), VF);
6787 
6788   // Figure out whether the access is strided and get the stride value
6789   // if it's known in compile time
6790   const SCEV *PtrSCEV = getAddressAccessSCEV(Ptr, Legal, PSE, TheLoop);
6791 
6792   // Get the cost of the scalar memory instruction and address computation.
6793   InstructionCost Cost =
6794       VF.getKnownMinValue() * TTI.getAddressComputationCost(PtrTy, SE, PtrSCEV);
6795 
6796   // Don't pass *I here, since it is scalar but will actually be part of a
6797   // vectorized loop where the user of it is a vectorized instruction.
6798   const Align Alignment = getLoadStoreAlignment(I);
6799   Cost += VF.getKnownMinValue() *
6800           TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), Alignment,
6801                               AS, TTI::TCK_RecipThroughput);
6802 
6803   // Get the overhead of the extractelement and insertelement instructions
6804   // we might create due to scalarization.
6805   Cost += getScalarizationOverhead(I, VF);
6806 
6807   // If we have a predicated load/store, it will need extra i1 extracts and
6808   // conditional branches, but may not be executed for each vector lane. Scale
6809   // the cost by the probability of executing the predicated block.
6810   if (isPredicatedInst(I)) {
6811     Cost /= getReciprocalPredBlockProb();
6812 
6813     // Add the cost of an i1 extract and a branch
6814     auto *Vec_i1Ty =
6815         VectorType::get(IntegerType::getInt1Ty(ValTy->getContext()), VF);
6816     Cost += TTI.getScalarizationOverhead(
6817         Vec_i1Ty, APInt::getAllOnesValue(VF.getKnownMinValue()),
6818         /*Insert=*/false, /*Extract=*/true);
6819     Cost += TTI.getCFInstrCost(Instruction::Br, TTI::TCK_RecipThroughput);
6820 
6821     if (useEmulatedMaskMemRefHack(I))
6822       // Artificially setting to a high enough value to practically disable
6823       // vectorization with such operations.
6824       Cost = 3000000;
6825   }
6826 
6827   return Cost;
6828 }
6829 
6830 InstructionCost
6831 LoopVectorizationCostModel::getConsecutiveMemOpCost(Instruction *I,
6832                                                     ElementCount VF) {
6833   Type *ValTy = getMemInstValueType(I);
6834   auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
6835   Value *Ptr = getLoadStorePointerOperand(I);
6836   unsigned AS = getLoadStoreAddressSpace(I);
6837   int ConsecutiveStride = Legal->isConsecutivePtr(Ptr);
6838   enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
6839 
6840   assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) &&
6841          "Stride should be 1 or -1 for consecutive memory access");
6842   const Align Alignment = getLoadStoreAlignment(I);
6843   InstructionCost Cost = 0;
6844   if (Legal->isMaskRequired(I))
6845     Cost += TTI.getMaskedMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS,
6846                                       CostKind);
6847   else
6848     Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS,
6849                                 CostKind, I);
6850 
6851   bool Reverse = ConsecutiveStride < 0;
6852   if (Reverse)
6853     Cost +=
6854         TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, None, 0);
6855   return Cost;
6856 }
6857 
6858 InstructionCost
6859 LoopVectorizationCostModel::getUniformMemOpCost(Instruction *I,
6860                                                 ElementCount VF) {
6861   assert(Legal->isUniformMemOp(*I));
6862 
6863   Type *ValTy = getMemInstValueType(I);
6864   auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
6865   const Align Alignment = getLoadStoreAlignment(I);
6866   unsigned AS = getLoadStoreAddressSpace(I);
6867   enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
6868   if (isa<LoadInst>(I)) {
6869     return TTI.getAddressComputationCost(ValTy) +
6870            TTI.getMemoryOpCost(Instruction::Load, ValTy, Alignment, AS,
6871                                CostKind) +
6872            TTI.getShuffleCost(TargetTransformInfo::SK_Broadcast, VectorTy);
6873   }
6874   StoreInst *SI = cast<StoreInst>(I);
6875 
6876   bool isLoopInvariantStoreValue = Legal->isUniform(SI->getValueOperand());
6877   return TTI.getAddressComputationCost(ValTy) +
6878          TTI.getMemoryOpCost(Instruction::Store, ValTy, Alignment, AS,
6879                              CostKind) +
6880          (isLoopInvariantStoreValue
6881               ? 0
6882               : TTI.getVectorInstrCost(Instruction::ExtractElement, VectorTy,
6883                                        VF.getKnownMinValue() - 1));
6884 }
6885 
6886 InstructionCost
6887 LoopVectorizationCostModel::getGatherScatterCost(Instruction *I,
6888                                                  ElementCount VF) {
6889   Type *ValTy = getMemInstValueType(I);
6890   auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
6891   const Align Alignment = getLoadStoreAlignment(I);
6892   const Value *Ptr = getLoadStorePointerOperand(I);
6893 
6894   return TTI.getAddressComputationCost(VectorTy) +
6895          TTI.getGatherScatterOpCost(
6896              I->getOpcode(), VectorTy, Ptr, Legal->isMaskRequired(I), Alignment,
6897              TargetTransformInfo::TCK_RecipThroughput, I);
6898 }
6899 
6900 InstructionCost
6901 LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I,
6902                                                    ElementCount VF) {
6903   // TODO: Once we have support for interleaving with scalable vectors
6904   // we can calculate the cost properly here.
6905   if (VF.isScalable())
6906     return InstructionCost::getInvalid();
6907 
6908   Type *ValTy = getMemInstValueType(I);
6909   auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
6910   unsigned AS = getLoadStoreAddressSpace(I);
6911 
6912   auto Group = getInterleavedAccessGroup(I);
6913   assert(Group && "Fail to get an interleaved access group.");
6914 
6915   unsigned InterleaveFactor = Group->getFactor();
6916   auto *WideVecTy = VectorType::get(ValTy, VF * InterleaveFactor);
6917 
6918   // Holds the indices of existing members in an interleaved load group.
6919   // An interleaved store group doesn't need this as it doesn't allow gaps.
6920   SmallVector<unsigned, 4> Indices;
6921   if (isa<LoadInst>(I)) {
6922     for (unsigned i = 0; i < InterleaveFactor; i++)
6923       if (Group->getMember(i))
6924         Indices.push_back(i);
6925   }
6926 
6927   // Calculate the cost of the whole interleaved group.
6928   bool UseMaskForGaps =
6929       Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed();
6930   InstructionCost Cost = TTI.getInterleavedMemoryOpCost(
6931       I->getOpcode(), WideVecTy, Group->getFactor(), Indices, Group->getAlign(),
6932       AS, TTI::TCK_RecipThroughput, Legal->isMaskRequired(I), UseMaskForGaps);
6933 
6934   if (Group->isReverse()) {
6935     // TODO: Add support for reversed masked interleaved access.
6936     assert(!Legal->isMaskRequired(I) &&
6937            "Reverse masked interleaved access not supported.");
6938     Cost +=
6939         Group->getNumMembers() *
6940         TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, None, 0);
6941   }
6942   return Cost;
6943 }
6944 
6945 InstructionCost LoopVectorizationCostModel::getReductionPatternCost(
6946     Instruction *I, ElementCount VF, Type *Ty, TTI::TargetCostKind CostKind) {
6947   // Early exit for no inloop reductions
6948   if (InLoopReductionChains.empty() || VF.isScalar() || !isa<VectorType>(Ty))
6949     return InstructionCost::getInvalid();
6950   auto *VectorTy = cast<VectorType>(Ty);
6951 
6952   // We are looking for a pattern of, and finding the minimal acceptable cost:
6953   //  reduce(mul(ext(A), ext(B))) or
6954   //  reduce(mul(A, B)) or
6955   //  reduce(ext(A)) or
6956   //  reduce(A).
6957   // The basic idea is that we walk down the tree to do that, finding the root
6958   // reduction instruction in InLoopReductionImmediateChains. From there we find
6959   // the pattern of mul/ext and test the cost of the entire pattern vs the cost
6960   // of the components. If the reduction cost is lower then we return it for the
6961   // reduction instruction and 0 for the other instructions in the pattern. If
6962   // it is not we return an invalid cost specifying the orignal cost method
6963   // should be used.
6964   Instruction *RetI = I;
6965   if ((RetI->getOpcode() == Instruction::SExt ||
6966        RetI->getOpcode() == Instruction::ZExt)) {
6967     if (!RetI->hasOneUser())
6968       return InstructionCost::getInvalid();
6969     RetI = RetI->user_back();
6970   }
6971   if (RetI->getOpcode() == Instruction::Mul &&
6972       RetI->user_back()->getOpcode() == Instruction::Add) {
6973     if (!RetI->hasOneUser())
6974       return InstructionCost::getInvalid();
6975     RetI = RetI->user_back();
6976   }
6977 
6978   // Test if the found instruction is a reduction, and if not return an invalid
6979   // cost specifying the parent to use the original cost modelling.
6980   if (!InLoopReductionImmediateChains.count(RetI))
6981     return InstructionCost::getInvalid();
6982 
6983   // Find the reduction this chain is a part of and calculate the basic cost of
6984   // the reduction on its own.
6985   Instruction *LastChain = InLoopReductionImmediateChains[RetI];
6986   Instruction *ReductionPhi = LastChain;
6987   while (!isa<PHINode>(ReductionPhi))
6988     ReductionPhi = InLoopReductionImmediateChains[ReductionPhi];
6989 
6990   RecurrenceDescriptor RdxDesc =
6991       Legal->getReductionVars()[cast<PHINode>(ReductionPhi)];
6992   unsigned BaseCost = TTI.getArithmeticReductionCost(RdxDesc.getOpcode(),
6993                                                      VectorTy, false, CostKind);
6994 
6995   // Get the operand that was not the reduction chain and match it to one of the
6996   // patterns, returning the better cost if it is found.
6997   Instruction *RedOp = RetI->getOperand(1) == LastChain
6998                            ? dyn_cast<Instruction>(RetI->getOperand(0))
6999                            : dyn_cast<Instruction>(RetI->getOperand(1));
7000 
7001   VectorTy = VectorType::get(I->getOperand(0)->getType(), VectorTy);
7002 
7003   if (RedOp && (isa<SExtInst>(RedOp) || isa<ZExtInst>(RedOp)) &&
7004       !TheLoop->isLoopInvariant(RedOp)) {
7005     bool IsUnsigned = isa<ZExtInst>(RedOp);
7006     auto *ExtType = VectorType::get(RedOp->getOperand(0)->getType(), VectorTy);
7007     InstructionCost RedCost = TTI.getExtendedAddReductionCost(
7008         /*IsMLA=*/false, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType,
7009         CostKind);
7010 
7011     unsigned ExtCost =
7012         TTI.getCastInstrCost(RedOp->getOpcode(), VectorTy, ExtType,
7013                              TTI::CastContextHint::None, CostKind, RedOp);
7014     if (RedCost.isValid() && RedCost < BaseCost + ExtCost)
7015       return I == RetI ? *RedCost.getValue() : 0;
7016   } else if (RedOp && RedOp->getOpcode() == Instruction::Mul) {
7017     Instruction *Mul = RedOp;
7018     Instruction *Op0 = dyn_cast<Instruction>(Mul->getOperand(0));
7019     Instruction *Op1 = dyn_cast<Instruction>(Mul->getOperand(1));
7020     if (Op0 && Op1 && (isa<SExtInst>(Op0) || isa<ZExtInst>(Op0)) &&
7021         Op0->getOpcode() == Op1->getOpcode() &&
7022         Op0->getOperand(0)->getType() == Op1->getOperand(0)->getType() &&
7023         !TheLoop->isLoopInvariant(Op0) && !TheLoop->isLoopInvariant(Op1)) {
7024       bool IsUnsigned = isa<ZExtInst>(Op0);
7025       auto *ExtType = VectorType::get(Op0->getOperand(0)->getType(), VectorTy);
7026       // reduce(mul(ext, ext))
7027       unsigned ExtCost =
7028           TTI.getCastInstrCost(Op0->getOpcode(), VectorTy, ExtType,
7029                                TTI::CastContextHint::None, CostKind, Op0);
7030       InstructionCost MulCost =
7031           TTI.getArithmeticInstrCost(Mul->getOpcode(), VectorTy, CostKind);
7032 
7033       InstructionCost RedCost = TTI.getExtendedAddReductionCost(
7034           /*IsMLA=*/true, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType,
7035           CostKind);
7036 
7037       if (RedCost.isValid() && RedCost < ExtCost * 2 + MulCost + BaseCost)
7038         return I == RetI ? *RedCost.getValue() : 0;
7039     } else {
7040       InstructionCost MulCost =
7041           TTI.getArithmeticInstrCost(Mul->getOpcode(), VectorTy, CostKind);
7042 
7043       InstructionCost RedCost = TTI.getExtendedAddReductionCost(
7044           /*IsMLA=*/true, true, RdxDesc.getRecurrenceType(), VectorTy,
7045           CostKind);
7046 
7047       if (RedCost.isValid() && RedCost < MulCost + BaseCost)
7048         return I == RetI ? *RedCost.getValue() : 0;
7049     }
7050   }
7051 
7052   return I == RetI ? BaseCost : InstructionCost::getInvalid();
7053 }
7054 
7055 InstructionCost
7056 LoopVectorizationCostModel::getMemoryInstructionCost(Instruction *I,
7057                                                      ElementCount VF) {
7058   // Calculate scalar cost only. Vectorization cost should be ready at this
7059   // moment.
7060   if (VF.isScalar()) {
7061     Type *ValTy = getMemInstValueType(I);
7062     const Align Alignment = getLoadStoreAlignment(I);
7063     unsigned AS = getLoadStoreAddressSpace(I);
7064 
7065     return TTI.getAddressComputationCost(ValTy) +
7066            TTI.getMemoryOpCost(I->getOpcode(), ValTy, Alignment, AS,
7067                                TTI::TCK_RecipThroughput, I);
7068   }
7069   return getWideningCost(I, VF);
7070 }
7071 
7072 LoopVectorizationCostModel::VectorizationCostTy
7073 LoopVectorizationCostModel::getInstructionCost(Instruction *I,
7074                                                ElementCount VF) {
7075   // If we know that this instruction will remain uniform, check the cost of
7076   // the scalar version.
7077   if (isUniformAfterVectorization(I, VF))
7078     VF = ElementCount::getFixed(1);
7079 
7080   if (VF.isVector() && isProfitableToScalarize(I, VF))
7081     return VectorizationCostTy(InstsToScalarize[VF][I], false);
7082 
7083   // Forced scalars do not have any scalarization overhead.
7084   auto ForcedScalar = ForcedScalars.find(VF);
7085   if (VF.isVector() && ForcedScalar != ForcedScalars.end()) {
7086     auto InstSet = ForcedScalar->second;
7087     if (InstSet.count(I))
7088       return VectorizationCostTy(
7089           (getInstructionCost(I, ElementCount::getFixed(1)).first *
7090            VF.getKnownMinValue()),
7091           false);
7092   }
7093 
7094   Type *VectorTy;
7095   InstructionCost C = getInstructionCost(I, VF, VectorTy);
7096 
7097   bool TypeNotScalarized =
7098       VF.isVector() && VectorTy->isVectorTy() &&
7099       TTI.getNumberOfParts(VectorTy) < VF.getKnownMinValue();
7100   return VectorizationCostTy(C, TypeNotScalarized);
7101 }
7102 
7103 InstructionCost
7104 LoopVectorizationCostModel::getScalarizationOverhead(Instruction *I,
7105                                                      ElementCount VF) const {
7106 
7107   if (VF.isScalable())
7108     return InstructionCost::getInvalid();
7109 
7110   if (VF.isScalar())
7111     return 0;
7112 
7113   InstructionCost Cost = 0;
7114   Type *RetTy = ToVectorTy(I->getType(), VF);
7115   if (!RetTy->isVoidTy() &&
7116       (!isa<LoadInst>(I) || !TTI.supportsEfficientVectorElementLoadStore()))
7117     Cost += TTI.getScalarizationOverhead(
7118         cast<VectorType>(RetTy), APInt::getAllOnesValue(VF.getKnownMinValue()),
7119         true, false);
7120 
7121   // Some targets keep addresses scalar.
7122   if (isa<LoadInst>(I) && !TTI.prefersVectorizedAddressing())
7123     return Cost;
7124 
7125   // Some targets support efficient element stores.
7126   if (isa<StoreInst>(I) && TTI.supportsEfficientVectorElementLoadStore())
7127     return Cost;
7128 
7129   // Collect operands to consider.
7130   CallInst *CI = dyn_cast<CallInst>(I);
7131   Instruction::op_range Ops = CI ? CI->arg_operands() : I->operands();
7132 
7133   // Skip operands that do not require extraction/scalarization and do not incur
7134   // any overhead.
7135   SmallVector<Type *> Tys;
7136   for (auto *V : filterExtractingOperands(Ops, VF))
7137     Tys.push_back(MaybeVectorizeType(V->getType(), VF));
7138   return Cost + TTI.getOperandsScalarizationOverhead(
7139                     filterExtractingOperands(Ops, VF), Tys);
7140 }
7141 
7142 void LoopVectorizationCostModel::setCostBasedWideningDecision(ElementCount VF) {
7143   if (VF.isScalar())
7144     return;
7145   NumPredStores = 0;
7146   for (BasicBlock *BB : TheLoop->blocks()) {
7147     // For each instruction in the old loop.
7148     for (Instruction &I : *BB) {
7149       Value *Ptr =  getLoadStorePointerOperand(&I);
7150       if (!Ptr)
7151         continue;
7152 
7153       // TODO: We should generate better code and update the cost model for
7154       // predicated uniform stores. Today they are treated as any other
7155       // predicated store (see added test cases in
7156       // invariant-store-vectorization.ll).
7157       if (isa<StoreInst>(&I) && isScalarWithPredication(&I))
7158         NumPredStores++;
7159 
7160       if (Legal->isUniformMemOp(I)) {
7161         // TODO: Avoid replicating loads and stores instead of
7162         // relying on instcombine to remove them.
7163         // Load: Scalar load + broadcast
7164         // Store: Scalar store + isLoopInvariantStoreValue ? 0 : extract
7165         InstructionCost Cost = getUniformMemOpCost(&I, VF);
7166         setWideningDecision(&I, VF, CM_Scalarize, Cost);
7167         continue;
7168       }
7169 
7170       // We assume that widening is the best solution when possible.
7171       if (memoryInstructionCanBeWidened(&I, VF)) {
7172         InstructionCost Cost = getConsecutiveMemOpCost(&I, VF);
7173         int ConsecutiveStride =
7174                Legal->isConsecutivePtr(getLoadStorePointerOperand(&I));
7175         assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) &&
7176                "Expected consecutive stride.");
7177         InstWidening Decision =
7178             ConsecutiveStride == 1 ? CM_Widen : CM_Widen_Reverse;
7179         setWideningDecision(&I, VF, Decision, Cost);
7180         continue;
7181       }
7182 
7183       // Choose between Interleaving, Gather/Scatter or Scalarization.
7184       InstructionCost InterleaveCost = InstructionCost::getInvalid();
7185       unsigned NumAccesses = 1;
7186       if (isAccessInterleaved(&I)) {
7187         auto Group = getInterleavedAccessGroup(&I);
7188         assert(Group && "Fail to get an interleaved access group.");
7189 
7190         // Make one decision for the whole group.
7191         if (getWideningDecision(&I, VF) != CM_Unknown)
7192           continue;
7193 
7194         NumAccesses = Group->getNumMembers();
7195         if (interleavedAccessCanBeWidened(&I, VF))
7196           InterleaveCost = getInterleaveGroupCost(&I, VF);
7197       }
7198 
7199       InstructionCost GatherScatterCost =
7200           isLegalGatherOrScatter(&I)
7201               ? getGatherScatterCost(&I, VF) * NumAccesses
7202               : InstructionCost::getInvalid();
7203 
7204       InstructionCost ScalarizationCost =
7205           !VF.isScalable() ? getMemInstScalarizationCost(&I, VF) * NumAccesses
7206                            : InstructionCost::getInvalid();
7207 
7208       // Choose better solution for the current VF,
7209       // write down this decision and use it during vectorization.
7210       InstructionCost Cost;
7211       InstWidening Decision;
7212       if (InterleaveCost <= GatherScatterCost &&
7213           InterleaveCost < ScalarizationCost) {
7214         Decision = CM_Interleave;
7215         Cost = InterleaveCost;
7216       } else if (GatherScatterCost < ScalarizationCost) {
7217         Decision = CM_GatherScatter;
7218         Cost = GatherScatterCost;
7219       } else {
7220         assert(!VF.isScalable() &&
7221                "We cannot yet scalarise for scalable vectors");
7222         Decision = CM_Scalarize;
7223         Cost = ScalarizationCost;
7224       }
7225       // If the instructions belongs to an interleave group, the whole group
7226       // receives the same decision. The whole group receives the cost, but
7227       // the cost will actually be assigned to one instruction.
7228       if (auto Group = getInterleavedAccessGroup(&I))
7229         setWideningDecision(Group, VF, Decision, Cost);
7230       else
7231         setWideningDecision(&I, VF, Decision, Cost);
7232     }
7233   }
7234 
7235   // Make sure that any load of address and any other address computation
7236   // remains scalar unless there is gather/scatter support. This avoids
7237   // inevitable extracts into address registers, and also has the benefit of
7238   // activating LSR more, since that pass can't optimize vectorized
7239   // addresses.
7240   if (TTI.prefersVectorizedAddressing())
7241     return;
7242 
7243   // Start with all scalar pointer uses.
7244   SmallPtrSet<Instruction *, 8> AddrDefs;
7245   for (BasicBlock *BB : TheLoop->blocks())
7246     for (Instruction &I : *BB) {
7247       Instruction *PtrDef =
7248         dyn_cast_or_null<Instruction>(getLoadStorePointerOperand(&I));
7249       if (PtrDef && TheLoop->contains(PtrDef) &&
7250           getWideningDecision(&I, VF) != CM_GatherScatter)
7251         AddrDefs.insert(PtrDef);
7252     }
7253 
7254   // Add all instructions used to generate the addresses.
7255   SmallVector<Instruction *, 4> Worklist;
7256   append_range(Worklist, AddrDefs);
7257   while (!Worklist.empty()) {
7258     Instruction *I = Worklist.pop_back_val();
7259     for (auto &Op : I->operands())
7260       if (auto *InstOp = dyn_cast<Instruction>(Op))
7261         if ((InstOp->getParent() == I->getParent()) && !isa<PHINode>(InstOp) &&
7262             AddrDefs.insert(InstOp).second)
7263           Worklist.push_back(InstOp);
7264   }
7265 
7266   for (auto *I : AddrDefs) {
7267     if (isa<LoadInst>(I)) {
7268       // Setting the desired widening decision should ideally be handled in
7269       // by cost functions, but since this involves the task of finding out
7270       // if the loaded register is involved in an address computation, it is
7271       // instead changed here when we know this is the case.
7272       InstWidening Decision = getWideningDecision(I, VF);
7273       if (Decision == CM_Widen || Decision == CM_Widen_Reverse)
7274         // Scalarize a widened load of address.
7275         setWideningDecision(
7276             I, VF, CM_Scalarize,
7277             (VF.getKnownMinValue() *
7278              getMemoryInstructionCost(I, ElementCount::getFixed(1))));
7279       else if (auto Group = getInterleavedAccessGroup(I)) {
7280         // Scalarize an interleave group of address loads.
7281         for (unsigned I = 0; I < Group->getFactor(); ++I) {
7282           if (Instruction *Member = Group->getMember(I))
7283             setWideningDecision(
7284                 Member, VF, CM_Scalarize,
7285                 (VF.getKnownMinValue() *
7286                  getMemoryInstructionCost(Member, ElementCount::getFixed(1))));
7287         }
7288       }
7289     } else
7290       // Make sure I gets scalarized and a cost estimate without
7291       // scalarization overhead.
7292       ForcedScalars[VF].insert(I);
7293   }
7294 }
7295 
7296 InstructionCost
7297 LoopVectorizationCostModel::getInstructionCost(Instruction *I, ElementCount VF,
7298                                                Type *&VectorTy) {
7299   Type *RetTy = I->getType();
7300   if (canTruncateToMinimalBitwidth(I, VF))
7301     RetTy = IntegerType::get(RetTy->getContext(), MinBWs[I]);
7302   VectorTy = isScalarAfterVectorization(I, VF) ? RetTy : ToVectorTy(RetTy, VF);
7303   auto SE = PSE.getSE();
7304   TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
7305 
7306   // TODO: We need to estimate the cost of intrinsic calls.
7307   switch (I->getOpcode()) {
7308   case Instruction::GetElementPtr:
7309     // We mark this instruction as zero-cost because the cost of GEPs in
7310     // vectorized code depends on whether the corresponding memory instruction
7311     // is scalarized or not. Therefore, we handle GEPs with the memory
7312     // instruction cost.
7313     return 0;
7314   case Instruction::Br: {
7315     // In cases of scalarized and predicated instructions, there will be VF
7316     // predicated blocks in the vectorized loop. Each branch around these
7317     // blocks requires also an extract of its vector compare i1 element.
7318     bool ScalarPredicatedBB = false;
7319     BranchInst *BI = cast<BranchInst>(I);
7320     if (VF.isVector() && BI->isConditional() &&
7321         (PredicatedBBsAfterVectorization.count(BI->getSuccessor(0)) ||
7322          PredicatedBBsAfterVectorization.count(BI->getSuccessor(1))))
7323       ScalarPredicatedBB = true;
7324 
7325     if (ScalarPredicatedBB) {
7326       // Return cost for branches around scalarized and predicated blocks.
7327       assert(!VF.isScalable() && "scalable vectors not yet supported.");
7328       auto *Vec_i1Ty =
7329           VectorType::get(IntegerType::getInt1Ty(RetTy->getContext()), VF);
7330       return (TTI.getScalarizationOverhead(
7331                   Vec_i1Ty, APInt::getAllOnesValue(VF.getKnownMinValue()),
7332                   false, true) +
7333               (TTI.getCFInstrCost(Instruction::Br, CostKind) *
7334                VF.getKnownMinValue()));
7335     } else if (I->getParent() == TheLoop->getLoopLatch() || VF.isScalar())
7336       // The back-edge branch will remain, as will all scalar branches.
7337       return TTI.getCFInstrCost(Instruction::Br, CostKind);
7338     else
7339       // This branch will be eliminated by if-conversion.
7340       return 0;
7341     // Note: We currently assume zero cost for an unconditional branch inside
7342     // a predicated block since it will become a fall-through, although we
7343     // may decide in the future to call TTI for all branches.
7344   }
7345   case Instruction::PHI: {
7346     auto *Phi = cast<PHINode>(I);
7347 
7348     // First-order recurrences are replaced by vector shuffles inside the loop.
7349     // NOTE: Don't use ToVectorTy as SK_ExtractSubvector expects a vector type.
7350     if (VF.isVector() && Legal->isFirstOrderRecurrence(Phi))
7351       return TTI.getShuffleCost(
7352           TargetTransformInfo::SK_ExtractSubvector, cast<VectorType>(VectorTy),
7353           None, VF.getKnownMinValue() - 1, FixedVectorType::get(RetTy, 1));
7354 
7355     // Phi nodes in non-header blocks (not inductions, reductions, etc.) are
7356     // converted into select instructions. We require N - 1 selects per phi
7357     // node, where N is the number of incoming values.
7358     if (VF.isVector() && Phi->getParent() != TheLoop->getHeader())
7359       return (Phi->getNumIncomingValues() - 1) *
7360              TTI.getCmpSelInstrCost(
7361                  Instruction::Select, ToVectorTy(Phi->getType(), VF),
7362                  ToVectorTy(Type::getInt1Ty(Phi->getContext()), VF),
7363                  CmpInst::BAD_ICMP_PREDICATE, CostKind);
7364 
7365     return TTI.getCFInstrCost(Instruction::PHI, CostKind);
7366   }
7367   case Instruction::UDiv:
7368   case Instruction::SDiv:
7369   case Instruction::URem:
7370   case Instruction::SRem:
7371     // If we have a predicated instruction, it may not be executed for each
7372     // vector lane. Get the scalarization cost and scale this amount by the
7373     // probability of executing the predicated block. If the instruction is not
7374     // predicated, we fall through to the next case.
7375     if (VF.isVector() && isScalarWithPredication(I)) {
7376       InstructionCost Cost = 0;
7377 
7378       // These instructions have a non-void type, so account for the phi nodes
7379       // that we will create. This cost is likely to be zero. The phi node
7380       // cost, if any, should be scaled by the block probability because it
7381       // models a copy at the end of each predicated block.
7382       Cost += VF.getKnownMinValue() *
7383               TTI.getCFInstrCost(Instruction::PHI, CostKind);
7384 
7385       // The cost of the non-predicated instruction.
7386       Cost += VF.getKnownMinValue() *
7387               TTI.getArithmeticInstrCost(I->getOpcode(), RetTy, CostKind);
7388 
7389       // The cost of insertelement and extractelement instructions needed for
7390       // scalarization.
7391       Cost += getScalarizationOverhead(I, VF);
7392 
7393       // Scale the cost by the probability of executing the predicated blocks.
7394       // This assumes the predicated block for each vector lane is equally
7395       // likely.
7396       return Cost / getReciprocalPredBlockProb();
7397     }
7398     LLVM_FALLTHROUGH;
7399   case Instruction::Add:
7400   case Instruction::FAdd:
7401   case Instruction::Sub:
7402   case Instruction::FSub:
7403   case Instruction::Mul:
7404   case Instruction::FMul:
7405   case Instruction::FDiv:
7406   case Instruction::FRem:
7407   case Instruction::Shl:
7408   case Instruction::LShr:
7409   case Instruction::AShr:
7410   case Instruction::And:
7411   case Instruction::Or:
7412   case Instruction::Xor: {
7413     // Since we will replace the stride by 1 the multiplication should go away.
7414     if (I->getOpcode() == Instruction::Mul && isStrideMul(I, Legal))
7415       return 0;
7416 
7417     // Detect reduction patterns
7418     InstructionCost RedCost;
7419     if ((RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind))
7420             .isValid())
7421       return RedCost;
7422 
7423     // Certain instructions can be cheaper to vectorize if they have a constant
7424     // second vector operand. One example of this are shifts on x86.
7425     Value *Op2 = I->getOperand(1);
7426     TargetTransformInfo::OperandValueProperties Op2VP;
7427     TargetTransformInfo::OperandValueKind Op2VK =
7428         TTI.getOperandInfo(Op2, Op2VP);
7429     if (Op2VK == TargetTransformInfo::OK_AnyValue && Legal->isUniform(Op2))
7430       Op2VK = TargetTransformInfo::OK_UniformValue;
7431 
7432     SmallVector<const Value *, 4> Operands(I->operand_values());
7433     unsigned N = isScalarAfterVectorization(I, VF) ? VF.getKnownMinValue() : 1;
7434     return N * TTI.getArithmeticInstrCost(
7435                    I->getOpcode(), VectorTy, CostKind,
7436                    TargetTransformInfo::OK_AnyValue,
7437                    Op2VK, TargetTransformInfo::OP_None, Op2VP, Operands, I);
7438   }
7439   case Instruction::FNeg: {
7440     assert(!VF.isScalable() && "VF is assumed to be non scalable.");
7441     unsigned N = isScalarAfterVectorization(I, VF) ? VF.getKnownMinValue() : 1;
7442     return N * TTI.getArithmeticInstrCost(
7443                    I->getOpcode(), VectorTy, CostKind,
7444                    TargetTransformInfo::OK_AnyValue,
7445                    TargetTransformInfo::OK_AnyValue,
7446                    TargetTransformInfo::OP_None, TargetTransformInfo::OP_None,
7447                    I->getOperand(0), I);
7448   }
7449   case Instruction::Select: {
7450     SelectInst *SI = cast<SelectInst>(I);
7451     const SCEV *CondSCEV = SE->getSCEV(SI->getCondition());
7452     bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop));
7453     Type *CondTy = SI->getCondition()->getType();
7454     if (!ScalarCond)
7455       CondTy = VectorType::get(CondTy, VF);
7456     return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, CondTy,
7457                                   CmpInst::BAD_ICMP_PREDICATE, CostKind, I);
7458   }
7459   case Instruction::ICmp:
7460   case Instruction::FCmp: {
7461     Type *ValTy = I->getOperand(0)->getType();
7462     Instruction *Op0AsInstruction = dyn_cast<Instruction>(I->getOperand(0));
7463     if (canTruncateToMinimalBitwidth(Op0AsInstruction, VF))
7464       ValTy = IntegerType::get(ValTy->getContext(), MinBWs[Op0AsInstruction]);
7465     VectorTy = ToVectorTy(ValTy, VF);
7466     return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, nullptr,
7467                                   CmpInst::BAD_ICMP_PREDICATE, CostKind, I);
7468   }
7469   case Instruction::Store:
7470   case Instruction::Load: {
7471     ElementCount Width = VF;
7472     if (Width.isVector()) {
7473       InstWidening Decision = getWideningDecision(I, Width);
7474       assert(Decision != CM_Unknown &&
7475              "CM decision should be taken at this point");
7476       if (Decision == CM_Scalarize)
7477         Width = ElementCount::getFixed(1);
7478     }
7479     VectorTy = ToVectorTy(getMemInstValueType(I), Width);
7480     return getMemoryInstructionCost(I, VF);
7481   }
7482   case Instruction::ZExt:
7483   case Instruction::SExt:
7484   case Instruction::FPToUI:
7485   case Instruction::FPToSI:
7486   case Instruction::FPExt:
7487   case Instruction::PtrToInt:
7488   case Instruction::IntToPtr:
7489   case Instruction::SIToFP:
7490   case Instruction::UIToFP:
7491   case Instruction::Trunc:
7492   case Instruction::FPTrunc:
7493   case Instruction::BitCast: {
7494     // Computes the CastContextHint from a Load/Store instruction.
7495     auto ComputeCCH = [&](Instruction *I) -> TTI::CastContextHint {
7496       assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
7497              "Expected a load or a store!");
7498 
7499       if (VF.isScalar() || !TheLoop->contains(I))
7500         return TTI::CastContextHint::Normal;
7501 
7502       switch (getWideningDecision(I, VF)) {
7503       case LoopVectorizationCostModel::CM_GatherScatter:
7504         return TTI::CastContextHint::GatherScatter;
7505       case LoopVectorizationCostModel::CM_Interleave:
7506         return TTI::CastContextHint::Interleave;
7507       case LoopVectorizationCostModel::CM_Scalarize:
7508       case LoopVectorizationCostModel::CM_Widen:
7509         return Legal->isMaskRequired(I) ? TTI::CastContextHint::Masked
7510                                         : TTI::CastContextHint::Normal;
7511       case LoopVectorizationCostModel::CM_Widen_Reverse:
7512         return TTI::CastContextHint::Reversed;
7513       case LoopVectorizationCostModel::CM_Unknown:
7514         llvm_unreachable("Instr did not go through cost modelling?");
7515       }
7516 
7517       llvm_unreachable("Unhandled case!");
7518     };
7519 
7520     unsigned Opcode = I->getOpcode();
7521     TTI::CastContextHint CCH = TTI::CastContextHint::None;
7522     // For Trunc, the context is the only user, which must be a StoreInst.
7523     if (Opcode == Instruction::Trunc || Opcode == Instruction::FPTrunc) {
7524       if (I->hasOneUse())
7525         if (StoreInst *Store = dyn_cast<StoreInst>(*I->user_begin()))
7526           CCH = ComputeCCH(Store);
7527     }
7528     // For Z/Sext, the context is the operand, which must be a LoadInst.
7529     else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt ||
7530              Opcode == Instruction::FPExt) {
7531       if (LoadInst *Load = dyn_cast<LoadInst>(I->getOperand(0)))
7532         CCH = ComputeCCH(Load);
7533     }
7534 
7535     // We optimize the truncation of induction variables having constant
7536     // integer steps. The cost of these truncations is the same as the scalar
7537     // operation.
7538     if (isOptimizableIVTruncate(I, VF)) {
7539       auto *Trunc = cast<TruncInst>(I);
7540       return TTI.getCastInstrCost(Instruction::Trunc, Trunc->getDestTy(),
7541                                   Trunc->getSrcTy(), CCH, CostKind, Trunc);
7542     }
7543 
7544     // Detect reduction patterns
7545     InstructionCost RedCost;
7546     if ((RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind))
7547             .isValid())
7548       return RedCost;
7549 
7550     Type *SrcScalarTy = I->getOperand(0)->getType();
7551     Type *SrcVecTy =
7552         VectorTy->isVectorTy() ? ToVectorTy(SrcScalarTy, VF) : SrcScalarTy;
7553     if (canTruncateToMinimalBitwidth(I, VF)) {
7554       // This cast is going to be shrunk. This may remove the cast or it might
7555       // turn it into slightly different cast. For example, if MinBW == 16,
7556       // "zext i8 %1 to i32" becomes "zext i8 %1 to i16".
7557       //
7558       // Calculate the modified src and dest types.
7559       Type *MinVecTy = VectorTy;
7560       if (Opcode == Instruction::Trunc) {
7561         SrcVecTy = smallestIntegerVectorType(SrcVecTy, MinVecTy);
7562         VectorTy =
7563             largestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy);
7564       } else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt) {
7565         SrcVecTy = largestIntegerVectorType(SrcVecTy, MinVecTy);
7566         VectorTy =
7567             smallestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy);
7568       }
7569     }
7570 
7571     unsigned N;
7572     if (isScalarAfterVectorization(I, VF)) {
7573       assert(!VF.isScalable() && "VF is assumed to be non scalable");
7574       N = VF.getKnownMinValue();
7575     } else
7576       N = 1;
7577     return N *
7578            TTI.getCastInstrCost(Opcode, VectorTy, SrcVecTy, CCH, CostKind, I);
7579   }
7580   case Instruction::Call: {
7581     bool NeedToScalarize;
7582     CallInst *CI = cast<CallInst>(I);
7583     InstructionCost CallCost = getVectorCallCost(CI, VF, NeedToScalarize);
7584     if (getVectorIntrinsicIDForCall(CI, TLI)) {
7585       InstructionCost IntrinsicCost = getVectorIntrinsicCost(CI, VF);
7586       return std::min(CallCost, IntrinsicCost);
7587     }
7588     return CallCost;
7589   }
7590   case Instruction::ExtractValue:
7591     return TTI.getInstructionCost(I, TTI::TCK_RecipThroughput);
7592   default:
7593     // The cost of executing VF copies of the scalar instruction. This opcode
7594     // is unknown. Assume that it is the same as 'mul'.
7595     return VF.getKnownMinValue() * TTI.getArithmeticInstrCost(
7596                                        Instruction::Mul, VectorTy, CostKind) +
7597            getScalarizationOverhead(I, VF);
7598   } // end of switch.
7599 }
7600 
7601 char LoopVectorize::ID = 0;
7602 
7603 static const char lv_name[] = "Loop Vectorization";
7604 
7605 INITIALIZE_PASS_BEGIN(LoopVectorize, LV_NAME, lv_name, false, false)
7606 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
7607 INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass)
7608 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
7609 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
7610 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
7611 INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass)
7612 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
7613 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
7614 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
7615 INITIALIZE_PASS_DEPENDENCY(LoopAccessLegacyAnalysis)
7616 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass)
7617 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass)
7618 INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass)
7619 INITIALIZE_PASS_DEPENDENCY(InjectTLIMappingsLegacy)
7620 INITIALIZE_PASS_END(LoopVectorize, LV_NAME, lv_name, false, false)
7621 
7622 namespace llvm {
7623 
7624 Pass *createLoopVectorizePass() { return new LoopVectorize(); }
7625 
7626 Pass *createLoopVectorizePass(bool InterleaveOnlyWhenForced,
7627                               bool VectorizeOnlyWhenForced) {
7628   return new LoopVectorize(InterleaveOnlyWhenForced, VectorizeOnlyWhenForced);
7629 }
7630 
7631 } // end namespace llvm
7632 
7633 bool LoopVectorizationCostModel::isConsecutiveLoadOrStore(Instruction *Inst) {
7634   // Check if the pointer operand of a load or store instruction is
7635   // consecutive.
7636   if (auto *Ptr = getLoadStorePointerOperand(Inst))
7637     return Legal->isConsecutivePtr(Ptr);
7638   return false;
7639 }
7640 
7641 void LoopVectorizationCostModel::collectValuesToIgnore() {
7642   // Ignore ephemeral values.
7643   CodeMetrics::collectEphemeralValues(TheLoop, AC, ValuesToIgnore);
7644 
7645   // Ignore type-promoting instructions we identified during reduction
7646   // detection.
7647   for (auto &Reduction : Legal->getReductionVars()) {
7648     RecurrenceDescriptor &RedDes = Reduction.second;
7649     const SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts();
7650     VecValuesToIgnore.insert(Casts.begin(), Casts.end());
7651   }
7652   // Ignore type-casting instructions we identified during induction
7653   // detection.
7654   for (auto &Induction : Legal->getInductionVars()) {
7655     InductionDescriptor &IndDes = Induction.second;
7656     const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts();
7657     VecValuesToIgnore.insert(Casts.begin(), Casts.end());
7658   }
7659 }
7660 
7661 void LoopVectorizationCostModel::collectInLoopReductions() {
7662   for (auto &Reduction : Legal->getReductionVars()) {
7663     PHINode *Phi = Reduction.first;
7664     RecurrenceDescriptor &RdxDesc = Reduction.second;
7665 
7666     // We don't collect reductions that are type promoted (yet).
7667     if (RdxDesc.getRecurrenceType() != Phi->getType())
7668       continue;
7669 
7670     // If the target would prefer this reduction to happen "in-loop", then we
7671     // want to record it as such.
7672     unsigned Opcode = RdxDesc.getOpcode();
7673     if (!PreferInLoopReductions && !useOrderedReductions(RdxDesc) &&
7674         !TTI.preferInLoopReduction(Opcode, Phi->getType(),
7675                                    TargetTransformInfo::ReductionFlags()))
7676       continue;
7677 
7678     // Check that we can correctly put the reductions into the loop, by
7679     // finding the chain of operations that leads from the phi to the loop
7680     // exit value.
7681     SmallVector<Instruction *, 4> ReductionOperations =
7682         RdxDesc.getReductionOpChain(Phi, TheLoop);
7683     bool InLoop = !ReductionOperations.empty();
7684     if (InLoop) {
7685       InLoopReductionChains[Phi] = ReductionOperations;
7686       // Add the elements to InLoopReductionImmediateChains for cost modelling.
7687       Instruction *LastChain = Phi;
7688       for (auto *I : ReductionOperations) {
7689         InLoopReductionImmediateChains[I] = LastChain;
7690         LastChain = I;
7691       }
7692     }
7693     LLVM_DEBUG(dbgs() << "LV: Using " << (InLoop ? "inloop" : "out of loop")
7694                       << " reduction for phi: " << *Phi << "\n");
7695   }
7696 }
7697 
7698 // TODO: we could return a pair of values that specify the max VF and
7699 // min VF, to be used in `buildVPlans(MinVF, MaxVF)` instead of
7700 // `buildVPlans(VF, VF)`. We cannot do it because VPLAN at the moment
7701 // doesn't have a cost model that can choose which plan to execute if
7702 // more than one is generated.
7703 static unsigned determineVPlanVF(const unsigned WidestVectorRegBits,
7704                                  LoopVectorizationCostModel &CM) {
7705   unsigned WidestType;
7706   std::tie(std::ignore, WidestType) = CM.getSmallestAndWidestTypes();
7707   return WidestVectorRegBits / WidestType;
7708 }
7709 
7710 VectorizationFactor
7711 LoopVectorizationPlanner::planInVPlanNativePath(ElementCount UserVF) {
7712   assert(!UserVF.isScalable() && "scalable vectors not yet supported");
7713   ElementCount VF = UserVF;
7714   // Outer loop handling: They may require CFG and instruction level
7715   // transformations before even evaluating whether vectorization is profitable.
7716   // Since we cannot modify the incoming IR, we need to build VPlan upfront in
7717   // the vectorization pipeline.
7718   if (!OrigLoop->isInnermost()) {
7719     // If the user doesn't provide a vectorization factor, determine a
7720     // reasonable one.
7721     if (UserVF.isZero()) {
7722       VF = ElementCount::getFixed(determineVPlanVF(
7723           TTI->getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector)
7724               .getFixedSize(),
7725           CM));
7726       LLVM_DEBUG(dbgs() << "LV: VPlan computed VF " << VF << ".\n");
7727 
7728       // Make sure we have a VF > 1 for stress testing.
7729       if (VPlanBuildStressTest && (VF.isScalar() || VF.isZero())) {
7730         LLVM_DEBUG(dbgs() << "LV: VPlan stress testing: "
7731                           << "overriding computed VF.\n");
7732         VF = ElementCount::getFixed(4);
7733       }
7734     }
7735     assert(EnableVPlanNativePath && "VPlan-native path is not enabled.");
7736     assert(isPowerOf2_32(VF.getKnownMinValue()) &&
7737            "VF needs to be a power of two");
7738     LLVM_DEBUG(dbgs() << "LV: Using " << (!UserVF.isZero() ? "user " : "")
7739                       << "VF " << VF << " to build VPlans.\n");
7740     buildVPlans(VF, VF);
7741 
7742     // For VPlan build stress testing, we bail out after VPlan construction.
7743     if (VPlanBuildStressTest)
7744       return VectorizationFactor::Disabled();
7745 
7746     return {VF, 0 /*Cost*/};
7747   }
7748 
7749   LLVM_DEBUG(
7750       dbgs() << "LV: Not vectorizing. Inner loops aren't supported in the "
7751                 "VPlan-native path.\n");
7752   return VectorizationFactor::Disabled();
7753 }
7754 
7755 Optional<VectorizationFactor>
7756 LoopVectorizationPlanner::plan(ElementCount UserVF, unsigned UserIC) {
7757   assert(OrigLoop->isInnermost() && "Inner loop expected.");
7758   Optional<ElementCount> MaybeMaxVF = CM.computeMaxVF(UserVF, UserIC);
7759   if (!MaybeMaxVF) // Cases that should not to be vectorized nor interleaved.
7760     return None;
7761 
7762   // Invalidate interleave groups if all blocks of loop will be predicated.
7763   if (CM.blockNeedsPredication(OrigLoop->getHeader()) &&
7764       !useMaskedInterleavedAccesses(*TTI)) {
7765     LLVM_DEBUG(
7766         dbgs()
7767         << "LV: Invalidate all interleaved groups due to fold-tail by masking "
7768            "which requires masked-interleaved support.\n");
7769     if (CM.InterleaveInfo.invalidateGroups())
7770       // Invalidating interleave groups also requires invalidating all decisions
7771       // based on them, which includes widening decisions and uniform and scalar
7772       // values.
7773       CM.invalidateCostModelingDecisions();
7774   }
7775 
7776   ElementCount MaxVF = MaybeMaxVF.getValue();
7777   assert(MaxVF.isNonZero() && "MaxVF is zero.");
7778 
7779   bool UserVFIsLegal = ElementCount::isKnownLE(UserVF, MaxVF);
7780   if (!UserVF.isZero() &&
7781       (UserVFIsLegal || (UserVF.isScalable() && MaxVF.isScalable()))) {
7782     // FIXME: MaxVF is temporarily used inplace of UserVF for illegal scalable
7783     // VFs here, this should be reverted to only use legal UserVFs once the
7784     // loop below supports scalable VFs.
7785     ElementCount VF = UserVFIsLegal ? UserVF : MaxVF;
7786     LLVM_DEBUG(dbgs() << "LV: Using " << (UserVFIsLegal ? "user" : "max")
7787                       << " VF " << VF << ".\n");
7788     assert(isPowerOf2_32(VF.getKnownMinValue()) &&
7789            "VF needs to be a power of two");
7790     // Collect the instructions (and their associated costs) that will be more
7791     // profitable to scalarize.
7792     CM.selectUserVectorizationFactor(VF);
7793     CM.collectInLoopReductions();
7794     buildVPlansWithVPRecipes(VF, VF);
7795     LLVM_DEBUG(printPlans(dbgs()));
7796     return {{VF, 0}};
7797   }
7798 
7799   assert(!MaxVF.isScalable() &&
7800          "Scalable vectors not yet supported beyond this point");
7801 
7802   for (ElementCount VF = ElementCount::getFixed(1);
7803        ElementCount::isKnownLE(VF, MaxVF); VF *= 2) {
7804     // Collect Uniform and Scalar instructions after vectorization with VF.
7805     CM.collectUniformsAndScalars(VF);
7806 
7807     // Collect the instructions (and their associated costs) that will be more
7808     // profitable to scalarize.
7809     if (VF.isVector())
7810       CM.collectInstsToScalarize(VF);
7811   }
7812 
7813   CM.collectInLoopReductions();
7814 
7815   buildVPlansWithVPRecipes(ElementCount::getFixed(1), MaxVF);
7816   LLVM_DEBUG(printPlans(dbgs()));
7817   if (MaxVF.isScalar())
7818     return VectorizationFactor::Disabled();
7819 
7820   // Select the optimal vectorization factor.
7821   auto SelectedVF = CM.selectVectorizationFactor(MaxVF);
7822 
7823   // Check if it is profitable to vectorize with runtime checks.
7824   unsigned NumRuntimePointerChecks = Requirements.getNumRuntimePointerChecks();
7825   if (SelectedVF.Width.getKnownMinValue() > 1 && NumRuntimePointerChecks) {
7826     bool PragmaThresholdReached =
7827         NumRuntimePointerChecks > PragmaVectorizeMemoryCheckThreshold;
7828     bool ThresholdReached =
7829         NumRuntimePointerChecks > VectorizerParams::RuntimeMemoryCheckThreshold;
7830     if ((ThresholdReached && !Hints.allowReordering()) ||
7831         PragmaThresholdReached) {
7832       ORE->emit([&]() {
7833         return OptimizationRemarkAnalysisAliasing(
7834                    DEBUG_TYPE, "CantReorderMemOps", OrigLoop->getStartLoc(),
7835                    OrigLoop->getHeader())
7836                << "loop not vectorized: cannot prove it is safe to reorder "
7837                   "memory operations";
7838       });
7839       LLVM_DEBUG(dbgs() << "LV: Too many memory checks needed.\n");
7840       Hints.emitRemarkWithHints();
7841       return VectorizationFactor::Disabled();
7842     }
7843   }
7844   return SelectedVF;
7845 }
7846 
7847 void LoopVectorizationPlanner::setBestPlan(ElementCount VF, unsigned UF) {
7848   LLVM_DEBUG(dbgs() << "Setting best plan to VF=" << VF << ", UF=" << UF
7849                     << '\n');
7850   BestVF = VF;
7851   BestUF = UF;
7852 
7853   erase_if(VPlans, [VF](const VPlanPtr &Plan) {
7854     return !Plan->hasVF(VF);
7855   });
7856   assert(VPlans.size() == 1 && "Best VF has not a single VPlan.");
7857 }
7858 
7859 void LoopVectorizationPlanner::executePlan(InnerLoopVectorizer &ILV,
7860                                            DominatorTree *DT) {
7861   // Perform the actual loop transformation.
7862 
7863   // 1. Create a new empty loop. Unlink the old loop and connect the new one.
7864   assert(BestVF.hasValue() && "Vectorization Factor is missing");
7865   assert(VPlans.size() == 1 && "Not a single VPlan to execute.");
7866 
7867   VPTransformState State{
7868       *BestVF, BestUF, LI, DT, ILV.Builder, &ILV, VPlans.front().get()};
7869   State.CFG.PrevBB = ILV.createVectorizedLoopSkeleton();
7870   State.TripCount = ILV.getOrCreateTripCount(nullptr);
7871   State.CanonicalIV = ILV.Induction;
7872 
7873   ILV.printDebugTracesAtStart();
7874 
7875   //===------------------------------------------------===//
7876   //
7877   // Notice: any optimization or new instruction that go
7878   // into the code below should also be implemented in
7879   // the cost-model.
7880   //
7881   //===------------------------------------------------===//
7882 
7883   // 2. Copy and widen instructions from the old loop into the new loop.
7884   VPlans.front()->execute(&State);
7885 
7886   // 3. Fix the vectorized code: take care of header phi's, live-outs,
7887   //    predication, updating analyses.
7888   ILV.fixVectorizedLoop(State);
7889 
7890   ILV.printDebugTracesAtEnd();
7891 }
7892 
7893 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
7894 void LoopVectorizationPlanner::printPlans(raw_ostream &O) {
7895   for (const auto &Plan : VPlans)
7896     if (PrintVPlansInDotFormat)
7897       Plan->printDOT(O);
7898     else
7899       Plan->print(O);
7900 }
7901 #endif
7902 
7903 void LoopVectorizationPlanner::collectTriviallyDeadInstructions(
7904     SmallPtrSetImpl<Instruction *> &DeadInstructions) {
7905 
7906   // We create new control-flow for the vectorized loop, so the original exit
7907   // conditions will be dead after vectorization if it's only used by the
7908   // terminator
7909   SmallVector<BasicBlock*> ExitingBlocks;
7910   OrigLoop->getExitingBlocks(ExitingBlocks);
7911   for (auto *BB : ExitingBlocks) {
7912     auto *Cmp = dyn_cast<Instruction>(BB->getTerminator()->getOperand(0));
7913     if (!Cmp || !Cmp->hasOneUse())
7914       continue;
7915 
7916     // TODO: we should introduce a getUniqueExitingBlocks on Loop
7917     if (!DeadInstructions.insert(Cmp).second)
7918       continue;
7919 
7920     // The operands of the icmp is often a dead trunc, used by IndUpdate.
7921     // TODO: can recurse through operands in general
7922     for (Value *Op : Cmp->operands()) {
7923       if (isa<TruncInst>(Op) && Op->hasOneUse())
7924           DeadInstructions.insert(cast<Instruction>(Op));
7925     }
7926   }
7927 
7928   // We create new "steps" for induction variable updates to which the original
7929   // induction variables map. An original update instruction will be dead if
7930   // all its users except the induction variable are dead.
7931   auto *Latch = OrigLoop->getLoopLatch();
7932   for (auto &Induction : Legal->getInductionVars()) {
7933     PHINode *Ind = Induction.first;
7934     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
7935 
7936     // If the tail is to be folded by masking, the primary induction variable,
7937     // if exists, isn't dead: it will be used for masking. Don't kill it.
7938     if (CM.foldTailByMasking() && IndUpdate == Legal->getPrimaryInduction())
7939       continue;
7940 
7941     if (llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
7942           return U == Ind || DeadInstructions.count(cast<Instruction>(U));
7943         }))
7944       DeadInstructions.insert(IndUpdate);
7945 
7946     // We record as "Dead" also the type-casting instructions we had identified
7947     // during induction analysis. We don't need any handling for them in the
7948     // vectorized loop because we have proven that, under a proper runtime
7949     // test guarding the vectorized loop, the value of the phi, and the casted
7950     // value of the phi, are the same. The last instruction in this casting chain
7951     // will get its scalar/vector/widened def from the scalar/vector/widened def
7952     // of the respective phi node. Any other casts in the induction def-use chain
7953     // have no other uses outside the phi update chain, and will be ignored.
7954     InductionDescriptor &IndDes = Induction.second;
7955     const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts();
7956     DeadInstructions.insert(Casts.begin(), Casts.end());
7957   }
7958 }
7959 
7960 Value *InnerLoopUnroller::reverseVector(Value *Vec) { return Vec; }
7961 
7962 Value *InnerLoopUnroller::getBroadcastInstrs(Value *V) { return V; }
7963 
7964 Value *InnerLoopUnroller::getStepVector(Value *Val, int StartIdx, Value *Step,
7965                                         Instruction::BinaryOps BinOp) {
7966   // When unrolling and the VF is 1, we only need to add a simple scalar.
7967   Type *Ty = Val->getType();
7968   assert(!Ty->isVectorTy() && "Val must be a scalar");
7969 
7970   if (Ty->isFloatingPointTy()) {
7971     Constant *C = ConstantFP::get(Ty, (double)StartIdx);
7972 
7973     // Floating-point operations inherit FMF via the builder's flags.
7974     Value *MulOp = Builder.CreateFMul(C, Step);
7975     return Builder.CreateBinOp(BinOp, Val, MulOp);
7976   }
7977   Constant *C = ConstantInt::get(Ty, StartIdx);
7978   return Builder.CreateAdd(Val, Builder.CreateMul(C, Step), "induction");
7979 }
7980 
7981 static void AddRuntimeUnrollDisableMetaData(Loop *L) {
7982   SmallVector<Metadata *, 4> MDs;
7983   // Reserve first location for self reference to the LoopID metadata node.
7984   MDs.push_back(nullptr);
7985   bool IsUnrollMetadata = false;
7986   MDNode *LoopID = L->getLoopID();
7987   if (LoopID) {
7988     // First find existing loop unrolling disable metadata.
7989     for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) {
7990       auto *MD = dyn_cast<MDNode>(LoopID->getOperand(i));
7991       if (MD) {
7992         const auto *S = dyn_cast<MDString>(MD->getOperand(0));
7993         IsUnrollMetadata =
7994             S && S->getString().startswith("llvm.loop.unroll.disable");
7995       }
7996       MDs.push_back(LoopID->getOperand(i));
7997     }
7998   }
7999 
8000   if (!IsUnrollMetadata) {
8001     // Add runtime unroll disable metadata.
8002     LLVMContext &Context = L->getHeader()->getContext();
8003     SmallVector<Metadata *, 1> DisableOperands;
8004     DisableOperands.push_back(
8005         MDString::get(Context, "llvm.loop.unroll.runtime.disable"));
8006     MDNode *DisableNode = MDNode::get(Context, DisableOperands);
8007     MDs.push_back(DisableNode);
8008     MDNode *NewLoopID = MDNode::get(Context, MDs);
8009     // Set operand 0 to refer to the loop id itself.
8010     NewLoopID->replaceOperandWith(0, NewLoopID);
8011     L->setLoopID(NewLoopID);
8012   }
8013 }
8014 
8015 //===--------------------------------------------------------------------===//
8016 // EpilogueVectorizerMainLoop
8017 //===--------------------------------------------------------------------===//
8018 
8019 /// This function is partially responsible for generating the control flow
8020 /// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization.
8021 BasicBlock *EpilogueVectorizerMainLoop::createEpilogueVectorizedLoopSkeleton() {
8022   MDNode *OrigLoopID = OrigLoop->getLoopID();
8023   Loop *Lp = createVectorLoopSkeleton("");
8024 
8025   // Generate the code to check the minimum iteration count of the vector
8026   // epilogue (see below).
8027   EPI.EpilogueIterationCountCheck =
8028       emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader, true);
8029   EPI.EpilogueIterationCountCheck->setName("iter.check");
8030 
8031   // Generate the code to check any assumptions that we've made for SCEV
8032   // expressions.
8033   EPI.SCEVSafetyCheck = emitSCEVChecks(Lp, LoopScalarPreHeader);
8034 
8035   // Generate the code that checks at runtime if arrays overlap. We put the
8036   // checks into a separate block to make the more common case of few elements
8037   // faster.
8038   EPI.MemSafetyCheck = emitMemRuntimeChecks(Lp, LoopScalarPreHeader);
8039 
8040   // Generate the iteration count check for the main loop, *after* the check
8041   // for the epilogue loop, so that the path-length is shorter for the case
8042   // that goes directly through the vector epilogue. The longer-path length for
8043   // the main loop is compensated for, by the gain from vectorizing the larger
8044   // trip count. Note: the branch will get updated later on when we vectorize
8045   // the epilogue.
8046   EPI.MainLoopIterationCountCheck =
8047       emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader, false);
8048 
8049   // Generate the induction variable.
8050   OldInduction = Legal->getPrimaryInduction();
8051   Type *IdxTy = Legal->getWidestInductionType();
8052   Value *StartIdx = ConstantInt::get(IdxTy, 0);
8053   Constant *Step = ConstantInt::get(IdxTy, VF.getKnownMinValue() * UF);
8054   Value *CountRoundDown = getOrCreateVectorTripCount(Lp);
8055   EPI.VectorTripCount = CountRoundDown;
8056   Induction =
8057       createInductionVariable(Lp, StartIdx, CountRoundDown, Step,
8058                               getDebugLocFromInstOrOperands(OldInduction));
8059 
8060   // Skip induction resume value creation here because they will be created in
8061   // the second pass. If we created them here, they wouldn't be used anyway,
8062   // because the vplan in the second pass still contains the inductions from the
8063   // original loop.
8064 
8065   return completeLoopSkeleton(Lp, OrigLoopID);
8066 }
8067 
8068 void EpilogueVectorizerMainLoop::printDebugTracesAtStart() {
8069   LLVM_DEBUG({
8070     dbgs() << "Create Skeleton for epilogue vectorized loop (first pass)\n"
8071            << "Main Loop VF:" << EPI.MainLoopVF.getKnownMinValue()
8072            << ", Main Loop UF:" << EPI.MainLoopUF
8073            << ", Epilogue Loop VF:" << EPI.EpilogueVF.getKnownMinValue()
8074            << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n";
8075   });
8076 }
8077 
8078 void EpilogueVectorizerMainLoop::printDebugTracesAtEnd() {
8079   DEBUG_WITH_TYPE(VerboseDebug, {
8080     dbgs() << "intermediate fn:\n" << *Induction->getFunction() << "\n";
8081   });
8082 }
8083 
8084 BasicBlock *EpilogueVectorizerMainLoop::emitMinimumIterationCountCheck(
8085     Loop *L, BasicBlock *Bypass, bool ForEpilogue) {
8086   assert(L && "Expected valid Loop.");
8087   assert(Bypass && "Expected valid bypass basic block.");
8088   unsigned VFactor =
8089       ForEpilogue ? EPI.EpilogueVF.getKnownMinValue() : VF.getKnownMinValue();
8090   unsigned UFactor = ForEpilogue ? EPI.EpilogueUF : UF;
8091   Value *Count = getOrCreateTripCount(L);
8092   // Reuse existing vector loop preheader for TC checks.
8093   // Note that new preheader block is generated for vector loop.
8094   BasicBlock *const TCCheckBlock = LoopVectorPreHeader;
8095   IRBuilder<> Builder(TCCheckBlock->getTerminator());
8096 
8097   // Generate code to check if the loop's trip count is less than VF * UF of the
8098   // main vector loop.
8099   auto P =
8100       Cost->requiresScalarEpilogue() ? ICmpInst::ICMP_ULE : ICmpInst::ICMP_ULT;
8101 
8102   Value *CheckMinIters = Builder.CreateICmp(
8103       P, Count, ConstantInt::get(Count->getType(), VFactor * UFactor),
8104       "min.iters.check");
8105 
8106   if (!ForEpilogue)
8107     TCCheckBlock->setName("vector.main.loop.iter.check");
8108 
8109   // Create new preheader for vector loop.
8110   LoopVectorPreHeader = SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(),
8111                                    DT, LI, nullptr, "vector.ph");
8112 
8113   if (ForEpilogue) {
8114     assert(DT->properlyDominates(DT->getNode(TCCheckBlock),
8115                                  DT->getNode(Bypass)->getIDom()) &&
8116            "TC check is expected to dominate Bypass");
8117 
8118     // Update dominator for Bypass & LoopExit.
8119     DT->changeImmediateDominator(Bypass, TCCheckBlock);
8120     DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock);
8121 
8122     LoopBypassBlocks.push_back(TCCheckBlock);
8123 
8124     // Save the trip count so we don't have to regenerate it in the
8125     // vec.epilog.iter.check. This is safe to do because the trip count
8126     // generated here dominates the vector epilog iter check.
8127     EPI.TripCount = Count;
8128   }
8129 
8130   ReplaceInstWithInst(
8131       TCCheckBlock->getTerminator(),
8132       BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters));
8133 
8134   return TCCheckBlock;
8135 }
8136 
8137 //===--------------------------------------------------------------------===//
8138 // EpilogueVectorizerEpilogueLoop
8139 //===--------------------------------------------------------------------===//
8140 
8141 /// This function is partially responsible for generating the control flow
8142 /// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization.
8143 BasicBlock *
8144 EpilogueVectorizerEpilogueLoop::createEpilogueVectorizedLoopSkeleton() {
8145   MDNode *OrigLoopID = OrigLoop->getLoopID();
8146   Loop *Lp = createVectorLoopSkeleton("vec.epilog.");
8147 
8148   // Now, compare the remaining count and if there aren't enough iterations to
8149   // execute the vectorized epilogue skip to the scalar part.
8150   BasicBlock *VecEpilogueIterationCountCheck = LoopVectorPreHeader;
8151   VecEpilogueIterationCountCheck->setName("vec.epilog.iter.check");
8152   LoopVectorPreHeader =
8153       SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT,
8154                  LI, nullptr, "vec.epilog.ph");
8155   emitMinimumVectorEpilogueIterCountCheck(Lp, LoopScalarPreHeader,
8156                                           VecEpilogueIterationCountCheck);
8157 
8158   // Adjust the control flow taking the state info from the main loop
8159   // vectorization into account.
8160   assert(EPI.MainLoopIterationCountCheck && EPI.EpilogueIterationCountCheck &&
8161          "expected this to be saved from the previous pass.");
8162   EPI.MainLoopIterationCountCheck->getTerminator()->replaceUsesOfWith(
8163       VecEpilogueIterationCountCheck, LoopVectorPreHeader);
8164 
8165   DT->changeImmediateDominator(LoopVectorPreHeader,
8166                                EPI.MainLoopIterationCountCheck);
8167 
8168   EPI.EpilogueIterationCountCheck->getTerminator()->replaceUsesOfWith(
8169       VecEpilogueIterationCountCheck, LoopScalarPreHeader);
8170 
8171   if (EPI.SCEVSafetyCheck)
8172     EPI.SCEVSafetyCheck->getTerminator()->replaceUsesOfWith(
8173         VecEpilogueIterationCountCheck, LoopScalarPreHeader);
8174   if (EPI.MemSafetyCheck)
8175     EPI.MemSafetyCheck->getTerminator()->replaceUsesOfWith(
8176         VecEpilogueIterationCountCheck, LoopScalarPreHeader);
8177 
8178   DT->changeImmediateDominator(
8179       VecEpilogueIterationCountCheck,
8180       VecEpilogueIterationCountCheck->getSinglePredecessor());
8181 
8182   DT->changeImmediateDominator(LoopScalarPreHeader,
8183                                EPI.EpilogueIterationCountCheck);
8184   DT->changeImmediateDominator(LoopExitBlock, EPI.EpilogueIterationCountCheck);
8185 
8186   // Keep track of bypass blocks, as they feed start values to the induction
8187   // phis in the scalar loop preheader.
8188   if (EPI.SCEVSafetyCheck)
8189     LoopBypassBlocks.push_back(EPI.SCEVSafetyCheck);
8190   if (EPI.MemSafetyCheck)
8191     LoopBypassBlocks.push_back(EPI.MemSafetyCheck);
8192   LoopBypassBlocks.push_back(EPI.EpilogueIterationCountCheck);
8193 
8194   // Generate a resume induction for the vector epilogue and put it in the
8195   // vector epilogue preheader
8196   Type *IdxTy = Legal->getWidestInductionType();
8197   PHINode *EPResumeVal = PHINode::Create(IdxTy, 2, "vec.epilog.resume.val",
8198                                          LoopVectorPreHeader->getFirstNonPHI());
8199   EPResumeVal->addIncoming(EPI.VectorTripCount, VecEpilogueIterationCountCheck);
8200   EPResumeVal->addIncoming(ConstantInt::get(IdxTy, 0),
8201                            EPI.MainLoopIterationCountCheck);
8202 
8203   // Generate the induction variable.
8204   OldInduction = Legal->getPrimaryInduction();
8205   Value *CountRoundDown = getOrCreateVectorTripCount(Lp);
8206   Constant *Step = ConstantInt::get(IdxTy, VF.getKnownMinValue() * UF);
8207   Value *StartIdx = EPResumeVal;
8208   Induction =
8209       createInductionVariable(Lp, StartIdx, CountRoundDown, Step,
8210                               getDebugLocFromInstOrOperands(OldInduction));
8211 
8212   // Generate induction resume values. These variables save the new starting
8213   // indexes for the scalar loop. They are used to test if there are any tail
8214   // iterations left once the vector loop has completed.
8215   // Note that when the vectorized epilogue is skipped due to iteration count
8216   // check, then the resume value for the induction variable comes from
8217   // the trip count of the main vector loop, hence passing the AdditionalBypass
8218   // argument.
8219   createInductionResumeValues(Lp, CountRoundDown,
8220                               {VecEpilogueIterationCountCheck,
8221                                EPI.VectorTripCount} /* AdditionalBypass */);
8222 
8223   AddRuntimeUnrollDisableMetaData(Lp);
8224   return completeLoopSkeleton(Lp, OrigLoopID);
8225 }
8226 
8227 BasicBlock *
8228 EpilogueVectorizerEpilogueLoop::emitMinimumVectorEpilogueIterCountCheck(
8229     Loop *L, BasicBlock *Bypass, BasicBlock *Insert) {
8230 
8231   assert(EPI.TripCount &&
8232          "Expected trip count to have been safed in the first pass.");
8233   assert(
8234       (!isa<Instruction>(EPI.TripCount) ||
8235        DT->dominates(cast<Instruction>(EPI.TripCount)->getParent(), Insert)) &&
8236       "saved trip count does not dominate insertion point.");
8237   Value *TC = EPI.TripCount;
8238   IRBuilder<> Builder(Insert->getTerminator());
8239   Value *Count = Builder.CreateSub(TC, EPI.VectorTripCount, "n.vec.remaining");
8240 
8241   // Generate code to check if the loop's trip count is less than VF * UF of the
8242   // vector epilogue loop.
8243   auto P =
8244       Cost->requiresScalarEpilogue() ? ICmpInst::ICMP_ULE : ICmpInst::ICMP_ULT;
8245 
8246   Value *CheckMinIters = Builder.CreateICmp(
8247       P, Count,
8248       ConstantInt::get(Count->getType(),
8249                        EPI.EpilogueVF.getKnownMinValue() * EPI.EpilogueUF),
8250       "min.epilog.iters.check");
8251 
8252   ReplaceInstWithInst(
8253       Insert->getTerminator(),
8254       BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters));
8255 
8256   LoopBypassBlocks.push_back(Insert);
8257   return Insert;
8258 }
8259 
8260 void EpilogueVectorizerEpilogueLoop::printDebugTracesAtStart() {
8261   LLVM_DEBUG({
8262     dbgs() << "Create Skeleton for epilogue vectorized loop (second pass)\n"
8263            << "Main Loop VF:" << EPI.MainLoopVF.getKnownMinValue()
8264            << ", Main Loop UF:" << EPI.MainLoopUF
8265            << ", Epilogue Loop VF:" << EPI.EpilogueVF.getKnownMinValue()
8266            << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n";
8267   });
8268 }
8269 
8270 void EpilogueVectorizerEpilogueLoop::printDebugTracesAtEnd() {
8271   DEBUG_WITH_TYPE(VerboseDebug, {
8272     dbgs() << "final fn:\n" << *Induction->getFunction() << "\n";
8273   });
8274 }
8275 
8276 bool LoopVectorizationPlanner::getDecisionAndClampRange(
8277     const std::function<bool(ElementCount)> &Predicate, VFRange &Range) {
8278   assert(!Range.isEmpty() && "Trying to test an empty VF range.");
8279   bool PredicateAtRangeStart = Predicate(Range.Start);
8280 
8281   for (ElementCount TmpVF = Range.Start * 2;
8282        ElementCount::isKnownLT(TmpVF, Range.End); TmpVF *= 2)
8283     if (Predicate(TmpVF) != PredicateAtRangeStart) {
8284       Range.End = TmpVF;
8285       break;
8286     }
8287 
8288   return PredicateAtRangeStart;
8289 }
8290 
8291 /// Build VPlans for the full range of feasible VF's = {\p MinVF, 2 * \p MinVF,
8292 /// 4 * \p MinVF, ..., \p MaxVF} by repeatedly building a VPlan for a sub-range
8293 /// of VF's starting at a given VF and extending it as much as possible. Each
8294 /// vectorization decision can potentially shorten this sub-range during
8295 /// buildVPlan().
8296 void LoopVectorizationPlanner::buildVPlans(ElementCount MinVF,
8297                                            ElementCount MaxVF) {
8298   auto MaxVFPlusOne = MaxVF.getWithIncrement(1);
8299   for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFPlusOne);) {
8300     VFRange SubRange = {VF, MaxVFPlusOne};
8301     VPlans.push_back(buildVPlan(SubRange));
8302     VF = SubRange.End;
8303   }
8304 }
8305 
8306 VPValue *VPRecipeBuilder::createEdgeMask(BasicBlock *Src, BasicBlock *Dst,
8307                                          VPlanPtr &Plan) {
8308   assert(is_contained(predecessors(Dst), Src) && "Invalid edge");
8309 
8310   // Look for cached value.
8311   std::pair<BasicBlock *, BasicBlock *> Edge(Src, Dst);
8312   EdgeMaskCacheTy::iterator ECEntryIt = EdgeMaskCache.find(Edge);
8313   if (ECEntryIt != EdgeMaskCache.end())
8314     return ECEntryIt->second;
8315 
8316   VPValue *SrcMask = createBlockInMask(Src, Plan);
8317 
8318   // The terminator has to be a branch inst!
8319   BranchInst *BI = dyn_cast<BranchInst>(Src->getTerminator());
8320   assert(BI && "Unexpected terminator found");
8321 
8322   if (!BI->isConditional() || BI->getSuccessor(0) == BI->getSuccessor(1))
8323     return EdgeMaskCache[Edge] = SrcMask;
8324 
8325   // If source is an exiting block, we know the exit edge is dynamically dead
8326   // in the vector loop, and thus we don't need to restrict the mask.  Avoid
8327   // adding uses of an otherwise potentially dead instruction.
8328   if (OrigLoop->isLoopExiting(Src))
8329     return EdgeMaskCache[Edge] = SrcMask;
8330 
8331   VPValue *EdgeMask = Plan->getOrAddVPValue(BI->getCondition());
8332   assert(EdgeMask && "No Edge Mask found for condition");
8333 
8334   if (BI->getSuccessor(0) != Dst)
8335     EdgeMask = Builder.createNot(EdgeMask);
8336 
8337   if (SrcMask) { // Otherwise block in-mask is all-one, no need to AND.
8338     // The condition is 'SrcMask && EdgeMask', which is equivalent to
8339     // 'select i1 SrcMask, i1 EdgeMask, i1 false'.
8340     // The select version does not introduce new UB if SrcMask is false and
8341     // EdgeMask is poison. Using 'and' here introduces undefined behavior.
8342     VPValue *False = Plan->getOrAddVPValue(
8343         ConstantInt::getFalse(BI->getCondition()->getType()));
8344     EdgeMask = Builder.createSelect(SrcMask, EdgeMask, False);
8345   }
8346 
8347   return EdgeMaskCache[Edge] = EdgeMask;
8348 }
8349 
8350 VPValue *VPRecipeBuilder::createBlockInMask(BasicBlock *BB, VPlanPtr &Plan) {
8351   assert(OrigLoop->contains(BB) && "Block is not a part of a loop");
8352 
8353   // Look for cached value.
8354   BlockMaskCacheTy::iterator BCEntryIt = BlockMaskCache.find(BB);
8355   if (BCEntryIt != BlockMaskCache.end())
8356     return BCEntryIt->second;
8357 
8358   // All-one mask is modelled as no-mask following the convention for masked
8359   // load/store/gather/scatter. Initialize BlockMask to no-mask.
8360   VPValue *BlockMask = nullptr;
8361 
8362   if (OrigLoop->getHeader() == BB) {
8363     if (!CM.blockNeedsPredication(BB))
8364       return BlockMaskCache[BB] = BlockMask; // Loop incoming mask is all-one.
8365 
8366     // Create the block in mask as the first non-phi instruction in the block.
8367     VPBuilder::InsertPointGuard Guard(Builder);
8368     auto NewInsertionPoint = Builder.getInsertBlock()->getFirstNonPhi();
8369     Builder.setInsertPoint(Builder.getInsertBlock(), NewInsertionPoint);
8370 
8371     // Introduce the early-exit compare IV <= BTC to form header block mask.
8372     // This is used instead of IV < TC because TC may wrap, unlike BTC.
8373     // Start by constructing the desired canonical IV.
8374     VPValue *IV = nullptr;
8375     if (Legal->getPrimaryInduction())
8376       IV = Plan->getOrAddVPValue(Legal->getPrimaryInduction());
8377     else {
8378       auto IVRecipe = new VPWidenCanonicalIVRecipe();
8379       Builder.getInsertBlock()->insert(IVRecipe, NewInsertionPoint);
8380       IV = IVRecipe->getVPValue();
8381     }
8382     VPValue *BTC = Plan->getOrCreateBackedgeTakenCount();
8383     bool TailFolded = !CM.isScalarEpilogueAllowed();
8384 
8385     if (TailFolded && CM.TTI.emitGetActiveLaneMask()) {
8386       // While ActiveLaneMask is a binary op that consumes the loop tripcount
8387       // as a second argument, we only pass the IV here and extract the
8388       // tripcount from the transform state where codegen of the VP instructions
8389       // happen.
8390       BlockMask = Builder.createNaryOp(VPInstruction::ActiveLaneMask, {IV});
8391     } else {
8392       BlockMask = Builder.createNaryOp(VPInstruction::ICmpULE, {IV, BTC});
8393     }
8394     return BlockMaskCache[BB] = BlockMask;
8395   }
8396 
8397   // This is the block mask. We OR all incoming edges.
8398   for (auto *Predecessor : predecessors(BB)) {
8399     VPValue *EdgeMask = createEdgeMask(Predecessor, BB, Plan);
8400     if (!EdgeMask) // Mask of predecessor is all-one so mask of block is too.
8401       return BlockMaskCache[BB] = EdgeMask;
8402 
8403     if (!BlockMask) { // BlockMask has its initialized nullptr value.
8404       BlockMask = EdgeMask;
8405       continue;
8406     }
8407 
8408     BlockMask = Builder.createOr(BlockMask, EdgeMask);
8409   }
8410 
8411   return BlockMaskCache[BB] = BlockMask;
8412 }
8413 
8414 VPRecipeBase *VPRecipeBuilder::tryToWidenMemory(Instruction *I,
8415                                                 ArrayRef<VPValue *> Operands,
8416                                                 VFRange &Range,
8417                                                 VPlanPtr &Plan) {
8418   assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
8419          "Must be called with either a load or store");
8420 
8421   auto willWiden = [&](ElementCount VF) -> bool {
8422     if (VF.isScalar())
8423       return false;
8424     LoopVectorizationCostModel::InstWidening Decision =
8425         CM.getWideningDecision(I, VF);
8426     assert(Decision != LoopVectorizationCostModel::CM_Unknown &&
8427            "CM decision should be taken at this point.");
8428     if (Decision == LoopVectorizationCostModel::CM_Interleave)
8429       return true;
8430     if (CM.isScalarAfterVectorization(I, VF) ||
8431         CM.isProfitableToScalarize(I, VF))
8432       return false;
8433     return Decision != LoopVectorizationCostModel::CM_Scalarize;
8434   };
8435 
8436   if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range))
8437     return nullptr;
8438 
8439   VPValue *Mask = nullptr;
8440   if (Legal->isMaskRequired(I))
8441     Mask = createBlockInMask(I->getParent(), Plan);
8442 
8443   if (LoadInst *Load = dyn_cast<LoadInst>(I))
8444     return new VPWidenMemoryInstructionRecipe(*Load, Operands[0], Mask);
8445 
8446   StoreInst *Store = cast<StoreInst>(I);
8447   return new VPWidenMemoryInstructionRecipe(*Store, Operands[1], Operands[0],
8448                                             Mask);
8449 }
8450 
8451 VPWidenIntOrFpInductionRecipe *
8452 VPRecipeBuilder::tryToOptimizeInductionPHI(PHINode *Phi,
8453                                            ArrayRef<VPValue *> Operands) const {
8454   // Check if this is an integer or fp induction. If so, build the recipe that
8455   // produces its scalar and vector values.
8456   InductionDescriptor II = Legal->getInductionVars().lookup(Phi);
8457   if (II.getKind() == InductionDescriptor::IK_IntInduction ||
8458       II.getKind() == InductionDescriptor::IK_FpInduction) {
8459     assert(II.getStartValue() ==
8460            Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader()));
8461     const SmallVectorImpl<Instruction *> &Casts = II.getCastInsts();
8462     return new VPWidenIntOrFpInductionRecipe(
8463         Phi, Operands[0], Casts.empty() ? nullptr : Casts.front());
8464   }
8465 
8466   return nullptr;
8467 }
8468 
8469 VPWidenIntOrFpInductionRecipe *VPRecipeBuilder::tryToOptimizeInductionTruncate(
8470     TruncInst *I, ArrayRef<VPValue *> Operands, VFRange &Range,
8471     VPlan &Plan) const {
8472   // Optimize the special case where the source is a constant integer
8473   // induction variable. Notice that we can only optimize the 'trunc' case
8474   // because (a) FP conversions lose precision, (b) sext/zext may wrap, and
8475   // (c) other casts depend on pointer size.
8476 
8477   // Determine whether \p K is a truncation based on an induction variable that
8478   // can be optimized.
8479   auto isOptimizableIVTruncate =
8480       [&](Instruction *K) -> std::function<bool(ElementCount)> {
8481     return [=](ElementCount VF) -> bool {
8482       return CM.isOptimizableIVTruncate(K, VF);
8483     };
8484   };
8485 
8486   if (LoopVectorizationPlanner::getDecisionAndClampRange(
8487           isOptimizableIVTruncate(I), Range)) {
8488 
8489     InductionDescriptor II =
8490         Legal->getInductionVars().lookup(cast<PHINode>(I->getOperand(0)));
8491     VPValue *Start = Plan.getOrAddVPValue(II.getStartValue());
8492     return new VPWidenIntOrFpInductionRecipe(cast<PHINode>(I->getOperand(0)),
8493                                              Start, nullptr, I);
8494   }
8495   return nullptr;
8496 }
8497 
8498 VPRecipeOrVPValueTy VPRecipeBuilder::tryToBlend(PHINode *Phi,
8499                                                 ArrayRef<VPValue *> Operands,
8500                                                 VPlanPtr &Plan) {
8501   // If all incoming values are equal, the incoming VPValue can be used directly
8502   // instead of creating a new VPBlendRecipe.
8503   VPValue *FirstIncoming = Operands[0];
8504   if (all_of(Operands, [FirstIncoming](const VPValue *Inc) {
8505         return FirstIncoming == Inc;
8506       })) {
8507     return Operands[0];
8508   }
8509 
8510   // We know that all PHIs in non-header blocks are converted into selects, so
8511   // we don't have to worry about the insertion order and we can just use the
8512   // builder. At this point we generate the predication tree. There may be
8513   // duplications since this is a simple recursive scan, but future
8514   // optimizations will clean it up.
8515   SmallVector<VPValue *, 2> OperandsWithMask;
8516   unsigned NumIncoming = Phi->getNumIncomingValues();
8517 
8518   for (unsigned In = 0; In < NumIncoming; In++) {
8519     VPValue *EdgeMask =
8520       createEdgeMask(Phi->getIncomingBlock(In), Phi->getParent(), Plan);
8521     assert((EdgeMask || NumIncoming == 1) &&
8522            "Multiple predecessors with one having a full mask");
8523     OperandsWithMask.push_back(Operands[In]);
8524     if (EdgeMask)
8525       OperandsWithMask.push_back(EdgeMask);
8526   }
8527   return toVPRecipeResult(new VPBlendRecipe(Phi, OperandsWithMask));
8528 }
8529 
8530 VPWidenCallRecipe *VPRecipeBuilder::tryToWidenCall(CallInst *CI,
8531                                                    ArrayRef<VPValue *> Operands,
8532                                                    VFRange &Range) const {
8533 
8534   bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange(
8535       [this, CI](ElementCount VF) {
8536         return CM.isScalarWithPredication(CI, VF);
8537       },
8538       Range);
8539 
8540   if (IsPredicated)
8541     return nullptr;
8542 
8543   Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
8544   if (ID && (ID == Intrinsic::assume || ID == Intrinsic::lifetime_end ||
8545              ID == Intrinsic::lifetime_start || ID == Intrinsic::sideeffect ||
8546              ID == Intrinsic::pseudoprobe ||
8547              ID == Intrinsic::experimental_noalias_scope_decl))
8548     return nullptr;
8549 
8550   auto willWiden = [&](ElementCount VF) -> bool {
8551     Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
8552     // The following case may be scalarized depending on the VF.
8553     // The flag shows whether we use Intrinsic or a usual Call for vectorized
8554     // version of the instruction.
8555     // Is it beneficial to perform intrinsic call compared to lib call?
8556     bool NeedToScalarize = false;
8557     InstructionCost CallCost = CM.getVectorCallCost(CI, VF, NeedToScalarize);
8558     InstructionCost IntrinsicCost = ID ? CM.getVectorIntrinsicCost(CI, VF) : 0;
8559     bool UseVectorIntrinsic = ID && IntrinsicCost <= CallCost;
8560     assert((IntrinsicCost.isValid() || CallCost.isValid()) &&
8561            "Either the intrinsic cost or vector call cost must be valid");
8562     return UseVectorIntrinsic || !NeedToScalarize;
8563   };
8564 
8565   if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range))
8566     return nullptr;
8567 
8568   ArrayRef<VPValue *> Ops = Operands.take_front(CI->getNumArgOperands());
8569   return new VPWidenCallRecipe(*CI, make_range(Ops.begin(), Ops.end()));
8570 }
8571 
8572 bool VPRecipeBuilder::shouldWiden(Instruction *I, VFRange &Range) const {
8573   assert(!isa<BranchInst>(I) && !isa<PHINode>(I) && !isa<LoadInst>(I) &&
8574          !isa<StoreInst>(I) && "Instruction should have been handled earlier");
8575   // Instruction should be widened, unless it is scalar after vectorization,
8576   // scalarization is profitable or it is predicated.
8577   auto WillScalarize = [this, I](ElementCount VF) -> bool {
8578     return CM.isScalarAfterVectorization(I, VF) ||
8579            CM.isProfitableToScalarize(I, VF) ||
8580            CM.isScalarWithPredication(I, VF);
8581   };
8582   return !LoopVectorizationPlanner::getDecisionAndClampRange(WillScalarize,
8583                                                              Range);
8584 }
8585 
8586 VPWidenRecipe *VPRecipeBuilder::tryToWiden(Instruction *I,
8587                                            ArrayRef<VPValue *> Operands) const {
8588   auto IsVectorizableOpcode = [](unsigned Opcode) {
8589     switch (Opcode) {
8590     case Instruction::Add:
8591     case Instruction::And:
8592     case Instruction::AShr:
8593     case Instruction::BitCast:
8594     case Instruction::FAdd:
8595     case Instruction::FCmp:
8596     case Instruction::FDiv:
8597     case Instruction::FMul:
8598     case Instruction::FNeg:
8599     case Instruction::FPExt:
8600     case Instruction::FPToSI:
8601     case Instruction::FPToUI:
8602     case Instruction::FPTrunc:
8603     case Instruction::FRem:
8604     case Instruction::FSub:
8605     case Instruction::ICmp:
8606     case Instruction::IntToPtr:
8607     case Instruction::LShr:
8608     case Instruction::Mul:
8609     case Instruction::Or:
8610     case Instruction::PtrToInt:
8611     case Instruction::SDiv:
8612     case Instruction::Select:
8613     case Instruction::SExt:
8614     case Instruction::Shl:
8615     case Instruction::SIToFP:
8616     case Instruction::SRem:
8617     case Instruction::Sub:
8618     case Instruction::Trunc:
8619     case Instruction::UDiv:
8620     case Instruction::UIToFP:
8621     case Instruction::URem:
8622     case Instruction::Xor:
8623     case Instruction::ZExt:
8624       return true;
8625     }
8626     return false;
8627   };
8628 
8629   if (!IsVectorizableOpcode(I->getOpcode()))
8630     return nullptr;
8631 
8632   // Success: widen this instruction.
8633   return new VPWidenRecipe(*I, make_range(Operands.begin(), Operands.end()));
8634 }
8635 
8636 VPBasicBlock *VPRecipeBuilder::handleReplication(
8637     Instruction *I, VFRange &Range, VPBasicBlock *VPBB,
8638     VPlanPtr &Plan) {
8639   bool IsUniform = LoopVectorizationPlanner::getDecisionAndClampRange(
8640       [&](ElementCount VF) { return CM.isUniformAfterVectorization(I, VF); },
8641       Range);
8642 
8643   bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange(
8644       [&](ElementCount VF) { return CM.isScalarWithPredication(I, VF); },
8645       Range);
8646 
8647   auto *Recipe = new VPReplicateRecipe(I, Plan->mapToVPValues(I->operands()),
8648                                        IsUniform, IsPredicated);
8649   setRecipe(I, Recipe);
8650   Plan->addVPValue(I, Recipe);
8651 
8652   // Find if I uses a predicated instruction. If so, it will use its scalar
8653   // value. Avoid hoisting the insert-element which packs the scalar value into
8654   // a vector value, as that happens iff all users use the vector value.
8655   for (VPValue *Op : Recipe->operands()) {
8656     auto *PredR = dyn_cast_or_null<VPPredInstPHIRecipe>(Op->getDef());
8657     if (!PredR)
8658       continue;
8659     auto *RepR =
8660         cast_or_null<VPReplicateRecipe>(PredR->getOperand(0)->getDef());
8661     assert(RepR->isPredicated() &&
8662            "expected Replicate recipe to be predicated");
8663     RepR->setAlsoPack(false);
8664   }
8665 
8666   // Finalize the recipe for Instr, first if it is not predicated.
8667   if (!IsPredicated) {
8668     LLVM_DEBUG(dbgs() << "LV: Scalarizing:" << *I << "\n");
8669     VPBB->appendRecipe(Recipe);
8670     return VPBB;
8671   }
8672   LLVM_DEBUG(dbgs() << "LV: Scalarizing and predicating:" << *I << "\n");
8673   assert(VPBB->getSuccessors().empty() &&
8674          "VPBB has successors when handling predicated replication.");
8675   // Record predicated instructions for above packing optimizations.
8676   VPBlockBase *Region = createReplicateRegion(I, Recipe, Plan);
8677   VPBlockUtils::insertBlockAfter(Region, VPBB);
8678   auto *RegSucc = new VPBasicBlock();
8679   VPBlockUtils::insertBlockAfter(RegSucc, Region);
8680   return RegSucc;
8681 }
8682 
8683 VPRegionBlock *VPRecipeBuilder::createReplicateRegion(Instruction *Instr,
8684                                                       VPRecipeBase *PredRecipe,
8685                                                       VPlanPtr &Plan) {
8686   // Instructions marked for predication are replicated and placed under an
8687   // if-then construct to prevent side-effects.
8688 
8689   // Generate recipes to compute the block mask for this region.
8690   VPValue *BlockInMask = createBlockInMask(Instr->getParent(), Plan);
8691 
8692   // Build the triangular if-then region.
8693   std::string RegionName = (Twine("pred.") + Instr->getOpcodeName()).str();
8694   assert(Instr->getParent() && "Predicated instruction not in any basic block");
8695   auto *BOMRecipe = new VPBranchOnMaskRecipe(BlockInMask);
8696   auto *Entry = new VPBasicBlock(Twine(RegionName) + ".entry", BOMRecipe);
8697   auto *PHIRecipe = Instr->getType()->isVoidTy()
8698                         ? nullptr
8699                         : new VPPredInstPHIRecipe(Plan->getOrAddVPValue(Instr));
8700   if (PHIRecipe) {
8701     Plan->removeVPValueFor(Instr);
8702     Plan->addVPValue(Instr, PHIRecipe);
8703   }
8704   auto *Exit = new VPBasicBlock(Twine(RegionName) + ".continue", PHIRecipe);
8705   auto *Pred = new VPBasicBlock(Twine(RegionName) + ".if", PredRecipe);
8706   VPRegionBlock *Region = new VPRegionBlock(Entry, Exit, RegionName, true);
8707 
8708   // Note: first set Entry as region entry and then connect successors starting
8709   // from it in order, to propagate the "parent" of each VPBasicBlock.
8710   VPBlockUtils::insertTwoBlocksAfter(Pred, Exit, BlockInMask, Entry);
8711   VPBlockUtils::connectBlocks(Pred, Exit);
8712 
8713   return Region;
8714 }
8715 
8716 VPRecipeOrVPValueTy
8717 VPRecipeBuilder::tryToCreateWidenRecipe(Instruction *Instr,
8718                                         ArrayRef<VPValue *> Operands,
8719                                         VFRange &Range, VPlanPtr &Plan) {
8720   // First, check for specific widening recipes that deal with calls, memory
8721   // operations, inductions and Phi nodes.
8722   if (auto *CI = dyn_cast<CallInst>(Instr))
8723     return toVPRecipeResult(tryToWidenCall(CI, Operands, Range));
8724 
8725   if (isa<LoadInst>(Instr) || isa<StoreInst>(Instr))
8726     return toVPRecipeResult(tryToWidenMemory(Instr, Operands, Range, Plan));
8727 
8728   VPRecipeBase *Recipe;
8729   if (auto Phi = dyn_cast<PHINode>(Instr)) {
8730     if (Phi->getParent() != OrigLoop->getHeader())
8731       return tryToBlend(Phi, Operands, Plan);
8732     if ((Recipe = tryToOptimizeInductionPHI(Phi, Operands)))
8733       return toVPRecipeResult(Recipe);
8734 
8735     if (Legal->isReductionVariable(Phi)) {
8736       RecurrenceDescriptor &RdxDesc = Legal->getReductionVars()[Phi];
8737       assert(RdxDesc.getRecurrenceStartValue() ==
8738              Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader()));
8739       VPValue *StartV = Operands[0];
8740       return toVPRecipeResult(new VPWidenPHIRecipe(Phi, RdxDesc, *StartV));
8741     }
8742 
8743     return toVPRecipeResult(new VPWidenPHIRecipe(Phi));
8744   }
8745 
8746   if (isa<TruncInst>(Instr) &&
8747       (Recipe = tryToOptimizeInductionTruncate(cast<TruncInst>(Instr), Operands,
8748                                                Range, *Plan)))
8749     return toVPRecipeResult(Recipe);
8750 
8751   if (!shouldWiden(Instr, Range))
8752     return nullptr;
8753 
8754   if (auto GEP = dyn_cast<GetElementPtrInst>(Instr))
8755     return toVPRecipeResult(new VPWidenGEPRecipe(
8756         GEP, make_range(Operands.begin(), Operands.end()), OrigLoop));
8757 
8758   if (auto *SI = dyn_cast<SelectInst>(Instr)) {
8759     bool InvariantCond =
8760         PSE.getSE()->isLoopInvariant(PSE.getSCEV(SI->getOperand(0)), OrigLoop);
8761     return toVPRecipeResult(new VPWidenSelectRecipe(
8762         *SI, make_range(Operands.begin(), Operands.end()), InvariantCond));
8763   }
8764 
8765   return toVPRecipeResult(tryToWiden(Instr, Operands));
8766 }
8767 
8768 void LoopVectorizationPlanner::buildVPlansWithVPRecipes(ElementCount MinVF,
8769                                                         ElementCount MaxVF) {
8770   assert(OrigLoop->isInnermost() && "Inner loop expected.");
8771 
8772   // Collect instructions from the original loop that will become trivially dead
8773   // in the vectorized loop. We don't need to vectorize these instructions. For
8774   // example, original induction update instructions can become dead because we
8775   // separately emit induction "steps" when generating code for the new loop.
8776   // Similarly, we create a new latch condition when setting up the structure
8777   // of the new loop, so the old one can become dead.
8778   SmallPtrSet<Instruction *, 4> DeadInstructions;
8779   collectTriviallyDeadInstructions(DeadInstructions);
8780 
8781   // Add assume instructions we need to drop to DeadInstructions, to prevent
8782   // them from being added to the VPlan.
8783   // TODO: We only need to drop assumes in blocks that get flattend. If the
8784   // control flow is preserved, we should keep them.
8785   auto &ConditionalAssumes = Legal->getConditionalAssumes();
8786   DeadInstructions.insert(ConditionalAssumes.begin(), ConditionalAssumes.end());
8787 
8788   DenseMap<Instruction *, Instruction *> &SinkAfter = Legal->getSinkAfter();
8789   // Dead instructions do not need sinking. Remove them from SinkAfter.
8790   for (Instruction *I : DeadInstructions)
8791     SinkAfter.erase(I);
8792 
8793   auto MaxVFPlusOne = MaxVF.getWithIncrement(1);
8794   for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFPlusOne);) {
8795     VFRange SubRange = {VF, MaxVFPlusOne};
8796     VPlans.push_back(
8797         buildVPlanWithVPRecipes(SubRange, DeadInstructions, SinkAfter));
8798     VF = SubRange.End;
8799   }
8800 }
8801 
8802 VPlanPtr LoopVectorizationPlanner::buildVPlanWithVPRecipes(
8803     VFRange &Range, SmallPtrSetImpl<Instruction *> &DeadInstructions,
8804     const DenseMap<Instruction *, Instruction *> &SinkAfter) {
8805 
8806   SmallPtrSet<const InterleaveGroup<Instruction> *, 1> InterleaveGroups;
8807 
8808   VPRecipeBuilder RecipeBuilder(OrigLoop, TLI, Legal, CM, PSE, Builder);
8809 
8810   // ---------------------------------------------------------------------------
8811   // Pre-construction: record ingredients whose recipes we'll need to further
8812   // process after constructing the initial VPlan.
8813   // ---------------------------------------------------------------------------
8814 
8815   // Mark instructions we'll need to sink later and their targets as
8816   // ingredients whose recipe we'll need to record.
8817   for (auto &Entry : SinkAfter) {
8818     RecipeBuilder.recordRecipeOf(Entry.first);
8819     RecipeBuilder.recordRecipeOf(Entry.second);
8820   }
8821   for (auto &Reduction : CM.getInLoopReductionChains()) {
8822     PHINode *Phi = Reduction.first;
8823     RecurKind Kind = Legal->getReductionVars()[Phi].getRecurrenceKind();
8824     const SmallVector<Instruction *, 4> &ReductionOperations = Reduction.second;
8825 
8826     RecipeBuilder.recordRecipeOf(Phi);
8827     for (auto &R : ReductionOperations) {
8828       RecipeBuilder.recordRecipeOf(R);
8829       // For min/max reducitons, where we have a pair of icmp/select, we also
8830       // need to record the ICmp recipe, so it can be removed later.
8831       if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind))
8832         RecipeBuilder.recordRecipeOf(cast<Instruction>(R->getOperand(0)));
8833     }
8834   }
8835 
8836   // For each interleave group which is relevant for this (possibly trimmed)
8837   // Range, add it to the set of groups to be later applied to the VPlan and add
8838   // placeholders for its members' Recipes which we'll be replacing with a
8839   // single VPInterleaveRecipe.
8840   for (InterleaveGroup<Instruction> *IG : IAI.getInterleaveGroups()) {
8841     auto applyIG = [IG, this](ElementCount VF) -> bool {
8842       return (VF.isVector() && // Query is illegal for VF == 1
8843               CM.getWideningDecision(IG->getInsertPos(), VF) ==
8844                   LoopVectorizationCostModel::CM_Interleave);
8845     };
8846     if (!getDecisionAndClampRange(applyIG, Range))
8847       continue;
8848     InterleaveGroups.insert(IG);
8849     for (unsigned i = 0; i < IG->getFactor(); i++)
8850       if (Instruction *Member = IG->getMember(i))
8851         RecipeBuilder.recordRecipeOf(Member);
8852   };
8853 
8854   // ---------------------------------------------------------------------------
8855   // Build initial VPlan: Scan the body of the loop in a topological order to
8856   // visit each basic block after having visited its predecessor basic blocks.
8857   // ---------------------------------------------------------------------------
8858 
8859   // Create a dummy pre-entry VPBasicBlock to start building the VPlan.
8860   auto Plan = std::make_unique<VPlan>();
8861   VPBasicBlock *VPBB = new VPBasicBlock("Pre-Entry");
8862   Plan->setEntry(VPBB);
8863 
8864   // Scan the body of the loop in a topological order to visit each basic block
8865   // after having visited its predecessor basic blocks.
8866   LoopBlocksDFS DFS(OrigLoop);
8867   DFS.perform(LI);
8868 
8869   for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) {
8870     // Relevant instructions from basic block BB will be grouped into VPRecipe
8871     // ingredients and fill a new VPBasicBlock.
8872     unsigned VPBBsForBB = 0;
8873     auto *FirstVPBBForBB = new VPBasicBlock(BB->getName());
8874     VPBlockUtils::insertBlockAfter(FirstVPBBForBB, VPBB);
8875     VPBB = FirstVPBBForBB;
8876     Builder.setInsertPoint(VPBB);
8877 
8878     // Introduce each ingredient into VPlan.
8879     // TODO: Model and preserve debug instrinsics in VPlan.
8880     for (Instruction &I : BB->instructionsWithoutDebug()) {
8881       Instruction *Instr = &I;
8882 
8883       // First filter out irrelevant instructions, to ensure no recipes are
8884       // built for them.
8885       if (isa<BranchInst>(Instr) || DeadInstructions.count(Instr))
8886         continue;
8887 
8888       SmallVector<VPValue *, 4> Operands;
8889       auto *Phi = dyn_cast<PHINode>(Instr);
8890       if (Phi && Phi->getParent() == OrigLoop->getHeader()) {
8891         Operands.push_back(Plan->getOrAddVPValue(
8892             Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader())));
8893       } else {
8894         auto OpRange = Plan->mapToVPValues(Instr->operands());
8895         Operands = {OpRange.begin(), OpRange.end()};
8896       }
8897       if (auto RecipeOrValue = RecipeBuilder.tryToCreateWidenRecipe(
8898               Instr, Operands, Range, Plan)) {
8899         // If Instr can be simplified to an existing VPValue, use it.
8900         if (RecipeOrValue.is<VPValue *>()) {
8901           Plan->addVPValue(Instr, RecipeOrValue.get<VPValue *>());
8902           continue;
8903         }
8904         // Otherwise, add the new recipe.
8905         VPRecipeBase *Recipe = RecipeOrValue.get<VPRecipeBase *>();
8906         for (auto *Def : Recipe->definedValues()) {
8907           auto *UV = Def->getUnderlyingValue();
8908           Plan->addVPValue(UV, Def);
8909         }
8910 
8911         RecipeBuilder.setRecipe(Instr, Recipe);
8912         VPBB->appendRecipe(Recipe);
8913         continue;
8914       }
8915 
8916       // Otherwise, if all widening options failed, Instruction is to be
8917       // replicated. This may create a successor for VPBB.
8918       VPBasicBlock *NextVPBB =
8919           RecipeBuilder.handleReplication(Instr, Range, VPBB, Plan);
8920       if (NextVPBB != VPBB) {
8921         VPBB = NextVPBB;
8922         VPBB->setName(BB->hasName() ? BB->getName() + "." + Twine(VPBBsForBB++)
8923                                     : "");
8924       }
8925     }
8926   }
8927 
8928   // Discard empty dummy pre-entry VPBasicBlock. Note that other VPBasicBlocks
8929   // may also be empty, such as the last one VPBB, reflecting original
8930   // basic-blocks with no recipes.
8931   VPBasicBlock *PreEntry = cast<VPBasicBlock>(Plan->getEntry());
8932   assert(PreEntry->empty() && "Expecting empty pre-entry block.");
8933   VPBlockBase *Entry = Plan->setEntry(PreEntry->getSingleSuccessor());
8934   VPBlockUtils::disconnectBlocks(PreEntry, Entry);
8935   delete PreEntry;
8936 
8937   // ---------------------------------------------------------------------------
8938   // Transform initial VPlan: Apply previously taken decisions, in order, to
8939   // bring the VPlan to its final state.
8940   // ---------------------------------------------------------------------------
8941 
8942   // Apply Sink-After legal constraints.
8943   for (auto &Entry : SinkAfter) {
8944     VPRecipeBase *Sink = RecipeBuilder.getRecipe(Entry.first);
8945     VPRecipeBase *Target = RecipeBuilder.getRecipe(Entry.second);
8946     // If the target is in a replication region, make sure to move Sink to the
8947     // block after it, not into the replication region itself.
8948     if (auto *Region =
8949             dyn_cast_or_null<VPRegionBlock>(Target->getParent()->getParent())) {
8950       if (Region->isReplicator()) {
8951         assert(Region->getNumSuccessors() == 1 && "Expected SESE region!");
8952         VPBasicBlock *NextBlock =
8953             cast<VPBasicBlock>(Region->getSuccessors().front());
8954         Sink->moveBefore(*NextBlock, NextBlock->getFirstNonPhi());
8955         continue;
8956       }
8957     }
8958     Sink->moveAfter(Target);
8959   }
8960 
8961   // Interleave memory: for each Interleave Group we marked earlier as relevant
8962   // for this VPlan, replace the Recipes widening its memory instructions with a
8963   // single VPInterleaveRecipe at its insertion point.
8964   for (auto IG : InterleaveGroups) {
8965     auto *Recipe = cast<VPWidenMemoryInstructionRecipe>(
8966         RecipeBuilder.getRecipe(IG->getInsertPos()));
8967     SmallVector<VPValue *, 4> StoredValues;
8968     for (unsigned i = 0; i < IG->getFactor(); ++i)
8969       if (auto *SI = dyn_cast_or_null<StoreInst>(IG->getMember(i)))
8970         StoredValues.push_back(Plan->getOrAddVPValue(SI->getOperand(0)));
8971 
8972     auto *VPIG = new VPInterleaveRecipe(IG, Recipe->getAddr(), StoredValues,
8973                                         Recipe->getMask());
8974     VPIG->insertBefore(Recipe);
8975     unsigned J = 0;
8976     for (unsigned i = 0; i < IG->getFactor(); ++i)
8977       if (Instruction *Member = IG->getMember(i)) {
8978         if (!Member->getType()->isVoidTy()) {
8979           VPValue *OriginalV = Plan->getVPValue(Member);
8980           Plan->removeVPValueFor(Member);
8981           Plan->addVPValue(Member, VPIG->getVPValue(J));
8982           OriginalV->replaceAllUsesWith(VPIG->getVPValue(J));
8983           J++;
8984         }
8985         RecipeBuilder.getRecipe(Member)->eraseFromParent();
8986       }
8987   }
8988 
8989   // Adjust the recipes for any inloop reductions.
8990   if (Range.Start.isVector())
8991     adjustRecipesForInLoopReductions(Plan, RecipeBuilder);
8992 
8993   // Finally, if tail is folded by masking, introduce selects between the phi
8994   // and the live-out instruction of each reduction, at the end of the latch.
8995   if (CM.foldTailByMasking() && !Legal->getReductionVars().empty()) {
8996     Builder.setInsertPoint(VPBB);
8997     auto *Cond = RecipeBuilder.createBlockInMask(OrigLoop->getHeader(), Plan);
8998     for (auto &Reduction : Legal->getReductionVars()) {
8999       if (CM.isInLoopReduction(Reduction.first))
9000         continue;
9001       VPValue *Phi = Plan->getOrAddVPValue(Reduction.first);
9002       VPValue *Red = Plan->getOrAddVPValue(Reduction.second.getLoopExitInstr());
9003       Builder.createNaryOp(Instruction::Select, {Cond, Red, Phi});
9004     }
9005   }
9006 
9007   std::string PlanName;
9008   raw_string_ostream RSO(PlanName);
9009   ElementCount VF = Range.Start;
9010   Plan->addVF(VF);
9011   RSO << "Initial VPlan for VF={" << VF;
9012   for (VF *= 2; ElementCount::isKnownLT(VF, Range.End); VF *= 2) {
9013     Plan->addVF(VF);
9014     RSO << "," << VF;
9015   }
9016   RSO << "},UF>=1";
9017   RSO.flush();
9018   Plan->setName(PlanName);
9019 
9020   return Plan;
9021 }
9022 
9023 VPlanPtr LoopVectorizationPlanner::buildVPlan(VFRange &Range) {
9024   // Outer loop handling: They may require CFG and instruction level
9025   // transformations before even evaluating whether vectorization is profitable.
9026   // Since we cannot modify the incoming IR, we need to build VPlan upfront in
9027   // the vectorization pipeline.
9028   assert(!OrigLoop->isInnermost());
9029   assert(EnableVPlanNativePath && "VPlan-native path is not enabled.");
9030 
9031   // Create new empty VPlan
9032   auto Plan = std::make_unique<VPlan>();
9033 
9034   // Build hierarchical CFG
9035   VPlanHCFGBuilder HCFGBuilder(OrigLoop, LI, *Plan);
9036   HCFGBuilder.buildHierarchicalCFG();
9037 
9038   for (ElementCount VF = Range.Start; ElementCount::isKnownLT(VF, Range.End);
9039        VF *= 2)
9040     Plan->addVF(VF);
9041 
9042   if (EnableVPlanPredication) {
9043     VPlanPredicator VPP(*Plan);
9044     VPP.predicate();
9045 
9046     // Avoid running transformation to recipes until masked code generation in
9047     // VPlan-native path is in place.
9048     return Plan;
9049   }
9050 
9051   SmallPtrSet<Instruction *, 1> DeadInstructions;
9052   VPlanTransforms::VPInstructionsToVPRecipes(OrigLoop, Plan,
9053                                              Legal->getInductionVars(),
9054                                              DeadInstructions, *PSE.getSE());
9055   return Plan;
9056 }
9057 
9058 // Adjust the recipes for any inloop reductions. The chain of instructions
9059 // leading from the loop exit instr to the phi need to be converted to
9060 // reductions, with one operand being vector and the other being the scalar
9061 // reduction chain.
9062 void LoopVectorizationPlanner::adjustRecipesForInLoopReductions(
9063     VPlanPtr &Plan, VPRecipeBuilder &RecipeBuilder) {
9064   for (auto &Reduction : CM.getInLoopReductionChains()) {
9065     PHINode *Phi = Reduction.first;
9066     RecurrenceDescriptor &RdxDesc = Legal->getReductionVars()[Phi];
9067     const SmallVector<Instruction *, 4> &ReductionOperations = Reduction.second;
9068 
9069     // ReductionOperations are orders top-down from the phi's use to the
9070     // LoopExitValue. We keep a track of the previous item (the Chain) to tell
9071     // which of the two operands will remain scalar and which will be reduced.
9072     // For minmax the chain will be the select instructions.
9073     Instruction *Chain = Phi;
9074     for (Instruction *R : ReductionOperations) {
9075       VPRecipeBase *WidenRecipe = RecipeBuilder.getRecipe(R);
9076       RecurKind Kind = RdxDesc.getRecurrenceKind();
9077 
9078       VPValue *ChainOp = Plan->getVPValue(Chain);
9079       unsigned FirstOpId;
9080       if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) {
9081         assert(isa<VPWidenSelectRecipe>(WidenRecipe) &&
9082                "Expected to replace a VPWidenSelectSC");
9083         FirstOpId = 1;
9084       } else {
9085         assert(isa<VPWidenRecipe>(WidenRecipe) &&
9086                "Expected to replace a VPWidenSC");
9087         FirstOpId = 0;
9088       }
9089       unsigned VecOpId =
9090           R->getOperand(FirstOpId) == Chain ? FirstOpId + 1 : FirstOpId;
9091       VPValue *VecOp = Plan->getVPValue(R->getOperand(VecOpId));
9092 
9093       auto *CondOp = CM.foldTailByMasking()
9094                          ? RecipeBuilder.createBlockInMask(R->getParent(), Plan)
9095                          : nullptr;
9096       VPReductionRecipe *RedRecipe = new VPReductionRecipe(
9097           &RdxDesc, R, ChainOp, VecOp, CondOp, TTI);
9098       WidenRecipe->getVPValue()->replaceAllUsesWith(RedRecipe);
9099       Plan->removeVPValueFor(R);
9100       Plan->addVPValue(R, RedRecipe);
9101       WidenRecipe->getParent()->insert(RedRecipe, WidenRecipe->getIterator());
9102       WidenRecipe->getVPValue()->replaceAllUsesWith(RedRecipe);
9103       WidenRecipe->eraseFromParent();
9104 
9105       if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) {
9106         VPRecipeBase *CompareRecipe =
9107             RecipeBuilder.getRecipe(cast<Instruction>(R->getOperand(0)));
9108         assert(isa<VPWidenRecipe>(CompareRecipe) &&
9109                "Expected to replace a VPWidenSC");
9110         assert(cast<VPWidenRecipe>(CompareRecipe)->getNumUsers() == 0 &&
9111                "Expected no remaining users");
9112         CompareRecipe->eraseFromParent();
9113       }
9114       Chain = R;
9115     }
9116   }
9117 }
9118 
9119 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
9120 void VPInterleaveRecipe::print(raw_ostream &O, const Twine &Indent,
9121                                VPSlotTracker &SlotTracker) const {
9122   O << Indent << "INTERLEAVE-GROUP with factor " << IG->getFactor() << " at ";
9123   IG->getInsertPos()->printAsOperand(O, false);
9124   O << ", ";
9125   getAddr()->printAsOperand(O, SlotTracker);
9126   VPValue *Mask = getMask();
9127   if (Mask) {
9128     O << ", ";
9129     Mask->printAsOperand(O, SlotTracker);
9130   }
9131   for (unsigned i = 0; i < IG->getFactor(); ++i)
9132     if (Instruction *I = IG->getMember(i))
9133       O << "\n" << Indent << "  " << VPlanIngredient(I) << " " << i;
9134 }
9135 #endif
9136 
9137 void VPWidenCallRecipe::execute(VPTransformState &State) {
9138   State.ILV->widenCallInstruction(*cast<CallInst>(getUnderlyingInstr()), this,
9139                                   *this, State);
9140 }
9141 
9142 void VPWidenSelectRecipe::execute(VPTransformState &State) {
9143   State.ILV->widenSelectInstruction(*cast<SelectInst>(getUnderlyingInstr()),
9144                                     this, *this, InvariantCond, State);
9145 }
9146 
9147 void VPWidenRecipe::execute(VPTransformState &State) {
9148   State.ILV->widenInstruction(*getUnderlyingInstr(), this, *this, State);
9149 }
9150 
9151 void VPWidenGEPRecipe::execute(VPTransformState &State) {
9152   State.ILV->widenGEP(cast<GetElementPtrInst>(getUnderlyingInstr()), this,
9153                       *this, State.UF, State.VF, IsPtrLoopInvariant,
9154                       IsIndexLoopInvariant, State);
9155 }
9156 
9157 void VPWidenIntOrFpInductionRecipe::execute(VPTransformState &State) {
9158   assert(!State.Instance && "Int or FP induction being replicated.");
9159   State.ILV->widenIntOrFpInduction(IV, getStartValue()->getLiveInIRValue(),
9160                                    getTruncInst(), getVPValue(0),
9161                                    getCastValue(), State);
9162 }
9163 
9164 void VPWidenPHIRecipe::execute(VPTransformState &State) {
9165   State.ILV->widenPHIInstruction(cast<PHINode>(getUnderlyingValue()), RdxDesc,
9166                                  getStartValue(), this, State);
9167 }
9168 
9169 void VPBlendRecipe::execute(VPTransformState &State) {
9170   State.ILV->setDebugLocFromInst(State.Builder, Phi);
9171   // We know that all PHIs in non-header blocks are converted into
9172   // selects, so we don't have to worry about the insertion order and we
9173   // can just use the builder.
9174   // At this point we generate the predication tree. There may be
9175   // duplications since this is a simple recursive scan, but future
9176   // optimizations will clean it up.
9177 
9178   unsigned NumIncoming = getNumIncomingValues();
9179 
9180   // Generate a sequence of selects of the form:
9181   // SELECT(Mask3, In3,
9182   //        SELECT(Mask2, In2,
9183   //               SELECT(Mask1, In1,
9184   //                      In0)))
9185   // Note that Mask0 is never used: lanes for which no path reaches this phi and
9186   // are essentially undef are taken from In0.
9187   InnerLoopVectorizer::VectorParts Entry(State.UF);
9188   for (unsigned In = 0; In < NumIncoming; ++In) {
9189     for (unsigned Part = 0; Part < State.UF; ++Part) {
9190       // We might have single edge PHIs (blocks) - use an identity
9191       // 'select' for the first PHI operand.
9192       Value *In0 = State.get(getIncomingValue(In), Part);
9193       if (In == 0)
9194         Entry[Part] = In0; // Initialize with the first incoming value.
9195       else {
9196         // Select between the current value and the previous incoming edge
9197         // based on the incoming mask.
9198         Value *Cond = State.get(getMask(In), Part);
9199         Entry[Part] =
9200             State.Builder.CreateSelect(Cond, In0, Entry[Part], "predphi");
9201       }
9202     }
9203   }
9204   for (unsigned Part = 0; Part < State.UF; ++Part)
9205     State.set(this, Entry[Part], Part);
9206 }
9207 
9208 void VPInterleaveRecipe::execute(VPTransformState &State) {
9209   assert(!State.Instance && "Interleave group being replicated.");
9210   State.ILV->vectorizeInterleaveGroup(IG, definedValues(), State, getAddr(),
9211                                       getStoredValues(), getMask());
9212 }
9213 
9214 void VPReductionRecipe::execute(VPTransformState &State) {
9215   assert(!State.Instance && "Reduction being replicated.");
9216   Value *PrevInChain = State.get(getChainOp(), 0);
9217   for (unsigned Part = 0; Part < State.UF; ++Part) {
9218     RecurKind Kind = RdxDesc->getRecurrenceKind();
9219     bool IsOrdered = useOrderedReductions(*RdxDesc);
9220     Value *NewVecOp = State.get(getVecOp(), Part);
9221     if (VPValue *Cond = getCondOp()) {
9222       Value *NewCond = State.get(Cond, Part);
9223       VectorType *VecTy = cast<VectorType>(NewVecOp->getType());
9224       Constant *Iden = RecurrenceDescriptor::getRecurrenceIdentity(
9225           Kind, VecTy->getElementType(), RdxDesc->getFastMathFlags());
9226       Constant *IdenVec =
9227           ConstantVector::getSplat(VecTy->getElementCount(), Iden);
9228       Value *Select = State.Builder.CreateSelect(NewCond, NewVecOp, IdenVec);
9229       NewVecOp = Select;
9230     }
9231     Value *NewRed;
9232     Value *NextInChain;
9233     if (IsOrdered) {
9234       NewRed = createOrderedReduction(State.Builder, *RdxDesc, NewVecOp,
9235                                       PrevInChain);
9236       PrevInChain = NewRed;
9237     } else {
9238       PrevInChain = State.get(getChainOp(), Part);
9239       NewRed = createTargetReduction(State.Builder, TTI, *RdxDesc, NewVecOp);
9240     }
9241     if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) {
9242       NextInChain =
9243           createMinMaxOp(State.Builder, RdxDesc->getRecurrenceKind(),
9244                          NewRed, PrevInChain);
9245     } else if (IsOrdered)
9246       NextInChain = NewRed;
9247     else {
9248       NextInChain = State.Builder.CreateBinOp(
9249           (Instruction::BinaryOps)getUnderlyingInstr()->getOpcode(), NewRed,
9250           PrevInChain);
9251     }
9252     State.set(this, NextInChain, Part);
9253   }
9254 }
9255 
9256 void VPReplicateRecipe::execute(VPTransformState &State) {
9257   if (State.Instance) { // Generate a single instance.
9258     assert(!State.VF.isScalable() && "Can't scalarize a scalable vector");
9259     State.ILV->scalarizeInstruction(getUnderlyingInstr(), this, *this,
9260                                     *State.Instance, IsPredicated, State);
9261     // Insert scalar instance packing it into a vector.
9262     if (AlsoPack && State.VF.isVector()) {
9263       // If we're constructing lane 0, initialize to start from poison.
9264       if (State.Instance->Lane.isFirstLane()) {
9265         assert(!State.VF.isScalable() && "VF is assumed to be non scalable.");
9266         Value *Poison = PoisonValue::get(
9267             VectorType::get(getUnderlyingValue()->getType(), State.VF));
9268         State.set(this, Poison, State.Instance->Part);
9269       }
9270       State.ILV->packScalarIntoVectorValue(this, *State.Instance, State);
9271     }
9272     return;
9273   }
9274 
9275   // Generate scalar instances for all VF lanes of all UF parts, unless the
9276   // instruction is uniform inwhich case generate only the first lane for each
9277   // of the UF parts.
9278   unsigned EndLane = IsUniform ? 1 : State.VF.getKnownMinValue();
9279   assert((!State.VF.isScalable() || IsUniform) &&
9280          "Can't scalarize a scalable vector");
9281   for (unsigned Part = 0; Part < State.UF; ++Part)
9282     for (unsigned Lane = 0; Lane < EndLane; ++Lane)
9283       State.ILV->scalarizeInstruction(getUnderlyingInstr(), this, *this,
9284                                       VPIteration(Part, Lane), IsPredicated,
9285                                       State);
9286 }
9287 
9288 void VPBranchOnMaskRecipe::execute(VPTransformState &State) {
9289   assert(State.Instance && "Branch on Mask works only on single instance.");
9290 
9291   unsigned Part = State.Instance->Part;
9292   unsigned Lane = State.Instance->Lane.getKnownLane();
9293 
9294   Value *ConditionBit = nullptr;
9295   VPValue *BlockInMask = getMask();
9296   if (BlockInMask) {
9297     ConditionBit = State.get(BlockInMask, Part);
9298     if (ConditionBit->getType()->isVectorTy())
9299       ConditionBit = State.Builder.CreateExtractElement(
9300           ConditionBit, State.Builder.getInt32(Lane));
9301   } else // Block in mask is all-one.
9302     ConditionBit = State.Builder.getTrue();
9303 
9304   // Replace the temporary unreachable terminator with a new conditional branch,
9305   // whose two destinations will be set later when they are created.
9306   auto *CurrentTerminator = State.CFG.PrevBB->getTerminator();
9307   assert(isa<UnreachableInst>(CurrentTerminator) &&
9308          "Expected to replace unreachable terminator with conditional branch.");
9309   auto *CondBr = BranchInst::Create(State.CFG.PrevBB, nullptr, ConditionBit);
9310   CondBr->setSuccessor(0, nullptr);
9311   ReplaceInstWithInst(CurrentTerminator, CondBr);
9312 }
9313 
9314 void VPPredInstPHIRecipe::execute(VPTransformState &State) {
9315   assert(State.Instance && "Predicated instruction PHI works per instance.");
9316   Instruction *ScalarPredInst =
9317       cast<Instruction>(State.get(getOperand(0), *State.Instance));
9318   BasicBlock *PredicatedBB = ScalarPredInst->getParent();
9319   BasicBlock *PredicatingBB = PredicatedBB->getSinglePredecessor();
9320   assert(PredicatingBB && "Predicated block has no single predecessor.");
9321   assert(isa<VPReplicateRecipe>(getOperand(0)) &&
9322          "operand must be VPReplicateRecipe");
9323 
9324   // By current pack/unpack logic we need to generate only a single phi node: if
9325   // a vector value for the predicated instruction exists at this point it means
9326   // the instruction has vector users only, and a phi for the vector value is
9327   // needed. In this case the recipe of the predicated instruction is marked to
9328   // also do that packing, thereby "hoisting" the insert-element sequence.
9329   // Otherwise, a phi node for the scalar value is needed.
9330   unsigned Part = State.Instance->Part;
9331   if (State.hasVectorValue(getOperand(0), Part)) {
9332     Value *VectorValue = State.get(getOperand(0), Part);
9333     InsertElementInst *IEI = cast<InsertElementInst>(VectorValue);
9334     PHINode *VPhi = State.Builder.CreatePHI(IEI->getType(), 2);
9335     VPhi->addIncoming(IEI->getOperand(0), PredicatingBB); // Unmodified vector.
9336     VPhi->addIncoming(IEI, PredicatedBB); // New vector with inserted element.
9337     if (State.hasVectorValue(this, Part))
9338       State.reset(this, VPhi, Part);
9339     else
9340       State.set(this, VPhi, Part);
9341     // NOTE: Currently we need to update the value of the operand, so the next
9342     // predicated iteration inserts its generated value in the correct vector.
9343     State.reset(getOperand(0), VPhi, Part);
9344   } else {
9345     Type *PredInstType = getOperand(0)->getUnderlyingValue()->getType();
9346     PHINode *Phi = State.Builder.CreatePHI(PredInstType, 2);
9347     Phi->addIncoming(PoisonValue::get(ScalarPredInst->getType()),
9348                      PredicatingBB);
9349     Phi->addIncoming(ScalarPredInst, PredicatedBB);
9350     if (State.hasScalarValue(this, *State.Instance))
9351       State.reset(this, Phi, *State.Instance);
9352     else
9353       State.set(this, Phi, *State.Instance);
9354     // NOTE: Currently we need to update the value of the operand, so the next
9355     // predicated iteration inserts its generated value in the correct vector.
9356     State.reset(getOperand(0), Phi, *State.Instance);
9357   }
9358 }
9359 
9360 void VPWidenMemoryInstructionRecipe::execute(VPTransformState &State) {
9361   VPValue *StoredValue = isStore() ? getStoredValue() : nullptr;
9362   State.ILV->vectorizeMemoryInstruction(&Ingredient, State,
9363                                         StoredValue ? nullptr : getVPValue(),
9364                                         getAddr(), StoredValue, getMask());
9365 }
9366 
9367 // Determine how to lower the scalar epilogue, which depends on 1) optimising
9368 // for minimum code-size, 2) predicate compiler options, 3) loop hints forcing
9369 // predication, and 4) a TTI hook that analyses whether the loop is suitable
9370 // for predication.
9371 static ScalarEpilogueLowering getScalarEpilogueLowering(
9372     Function *F, Loop *L, LoopVectorizeHints &Hints, ProfileSummaryInfo *PSI,
9373     BlockFrequencyInfo *BFI, TargetTransformInfo *TTI, TargetLibraryInfo *TLI,
9374     AssumptionCache *AC, LoopInfo *LI, ScalarEvolution *SE, DominatorTree *DT,
9375     LoopVectorizationLegality &LVL) {
9376   // 1) OptSize takes precedence over all other options, i.e. if this is set,
9377   // don't look at hints or options, and don't request a scalar epilogue.
9378   // (For PGSO, as shouldOptimizeForSize isn't currently accessible from
9379   // LoopAccessInfo (due to code dependency and not being able to reliably get
9380   // PSI/BFI from a loop analysis under NPM), we cannot suppress the collection
9381   // of strides in LoopAccessInfo::analyzeLoop() and vectorize without
9382   // versioning when the vectorization is forced, unlike hasOptSize. So revert
9383   // back to the old way and vectorize with versioning when forced. See D81345.)
9384   if (F->hasOptSize() || (llvm::shouldOptimizeForSize(L->getHeader(), PSI, BFI,
9385                                                       PGSOQueryType::IRPass) &&
9386                           Hints.getForce() != LoopVectorizeHints::FK_Enabled))
9387     return CM_ScalarEpilogueNotAllowedOptSize;
9388 
9389   // 2) If set, obey the directives
9390   if (PreferPredicateOverEpilogue.getNumOccurrences()) {
9391     switch (PreferPredicateOverEpilogue) {
9392     case PreferPredicateTy::ScalarEpilogue:
9393       return CM_ScalarEpilogueAllowed;
9394     case PreferPredicateTy::PredicateElseScalarEpilogue:
9395       return CM_ScalarEpilogueNotNeededUsePredicate;
9396     case PreferPredicateTy::PredicateOrDontVectorize:
9397       return CM_ScalarEpilogueNotAllowedUsePredicate;
9398     };
9399   }
9400 
9401   // 3) If set, obey the hints
9402   switch (Hints.getPredicate()) {
9403   case LoopVectorizeHints::FK_Enabled:
9404     return CM_ScalarEpilogueNotNeededUsePredicate;
9405   case LoopVectorizeHints::FK_Disabled:
9406     return CM_ScalarEpilogueAllowed;
9407   };
9408 
9409   // 4) if the TTI hook indicates this is profitable, request predication.
9410   if (TTI->preferPredicateOverEpilogue(L, LI, *SE, *AC, TLI, DT,
9411                                        LVL.getLAI()))
9412     return CM_ScalarEpilogueNotNeededUsePredicate;
9413 
9414   return CM_ScalarEpilogueAllowed;
9415 }
9416 
9417 Value *VPTransformState::get(VPValue *Def, unsigned Part) {
9418   // If Values have been set for this Def return the one relevant for \p Part.
9419   if (hasVectorValue(Def, Part))
9420     return Data.PerPartOutput[Def][Part];
9421 
9422   if (!hasScalarValue(Def, {Part, 0})) {
9423     Value *IRV = Def->getLiveInIRValue();
9424     Value *B = ILV->getBroadcastInstrs(IRV);
9425     set(Def, B, Part);
9426     return B;
9427   }
9428 
9429   Value *ScalarValue = get(Def, {Part, 0});
9430   // If we aren't vectorizing, we can just copy the scalar map values over
9431   // to the vector map.
9432   if (VF.isScalar()) {
9433     set(Def, ScalarValue, Part);
9434     return ScalarValue;
9435   }
9436 
9437   auto *RepR = dyn_cast<VPReplicateRecipe>(Def);
9438   bool IsUniform = RepR && RepR->isUniform();
9439 
9440   unsigned LastLane = IsUniform ? 0 : VF.getKnownMinValue() - 1;
9441   // Check if there is a scalar value for the selected lane.
9442   if (!hasScalarValue(Def, {Part, LastLane})) {
9443     // At the moment, VPWidenIntOrFpInductionRecipes can also be uniform.
9444     assert(isa<VPWidenIntOrFpInductionRecipe>(Def->getDef()) &&
9445            "unexpected recipe found to be invariant");
9446     IsUniform = true;
9447     LastLane = 0;
9448   }
9449 
9450   auto *LastInst = cast<Instruction>(get(Def, {Part, LastLane}));
9451 
9452   // Set the insert point after the last scalarized instruction. This
9453   // ensures the insertelement sequence will directly follow the scalar
9454   // definitions.
9455   auto OldIP = Builder.saveIP();
9456   auto NewIP = std::next(BasicBlock::iterator(LastInst));
9457   Builder.SetInsertPoint(&*NewIP);
9458 
9459   // However, if we are vectorizing, we need to construct the vector values.
9460   // If the value is known to be uniform after vectorization, we can just
9461   // broadcast the scalar value corresponding to lane zero for each unroll
9462   // iteration. Otherwise, we construct the vector values using
9463   // insertelement instructions. Since the resulting vectors are stored in
9464   // State, we will only generate the insertelements once.
9465   Value *VectorValue = nullptr;
9466   if (IsUniform) {
9467     VectorValue = ILV->getBroadcastInstrs(ScalarValue);
9468     set(Def, VectorValue, Part);
9469   } else {
9470     // Initialize packing with insertelements to start from undef.
9471     assert(!VF.isScalable() && "VF is assumed to be non scalable.");
9472     Value *Undef = PoisonValue::get(VectorType::get(LastInst->getType(), VF));
9473     set(Def, Undef, Part);
9474     for (unsigned Lane = 0; Lane < VF.getKnownMinValue(); ++Lane)
9475       ILV->packScalarIntoVectorValue(Def, {Part, Lane}, *this);
9476     VectorValue = get(Def, Part);
9477   }
9478   Builder.restoreIP(OldIP);
9479   return VectorValue;
9480 }
9481 
9482 // Process the loop in the VPlan-native vectorization path. This path builds
9483 // VPlan upfront in the vectorization pipeline, which allows to apply
9484 // VPlan-to-VPlan transformations from the very beginning without modifying the
9485 // input LLVM IR.
9486 static bool processLoopInVPlanNativePath(
9487     Loop *L, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT,
9488     LoopVectorizationLegality *LVL, TargetTransformInfo *TTI,
9489     TargetLibraryInfo *TLI, DemandedBits *DB, AssumptionCache *AC,
9490     OptimizationRemarkEmitter *ORE, BlockFrequencyInfo *BFI,
9491     ProfileSummaryInfo *PSI, LoopVectorizeHints &Hints,
9492     LoopVectorizationRequirements &Requirements) {
9493 
9494   if (isa<SCEVCouldNotCompute>(PSE.getBackedgeTakenCount())) {
9495     LLVM_DEBUG(dbgs() << "LV: cannot compute the outer-loop trip count\n");
9496     return false;
9497   }
9498   assert(EnableVPlanNativePath && "VPlan-native path is disabled.");
9499   Function *F = L->getHeader()->getParent();
9500   InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL->getLAI());
9501 
9502   ScalarEpilogueLowering SEL = getScalarEpilogueLowering(
9503       F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, *LVL);
9504 
9505   LoopVectorizationCostModel CM(SEL, L, PSE, LI, LVL, *TTI, TLI, DB, AC, ORE, F,
9506                                 &Hints, IAI);
9507   // Use the planner for outer loop vectorization.
9508   // TODO: CM is not used at this point inside the planner. Turn CM into an
9509   // optional argument if we don't need it in the future.
9510   LoopVectorizationPlanner LVP(L, LI, TLI, TTI, LVL, CM, IAI, PSE, Hints,
9511                                Requirements, ORE);
9512 
9513   // Get user vectorization factor.
9514   ElementCount UserVF = Hints.getWidth();
9515 
9516   // Plan how to best vectorize, return the best VF and its cost.
9517   const VectorizationFactor VF = LVP.planInVPlanNativePath(UserVF);
9518 
9519   // If we are stress testing VPlan builds, do not attempt to generate vector
9520   // code. Masked vector code generation support will follow soon.
9521   // Also, do not attempt to vectorize if no vector code will be produced.
9522   if (VPlanBuildStressTest || EnableVPlanPredication ||
9523       VectorizationFactor::Disabled() == VF)
9524     return false;
9525 
9526   LVP.setBestPlan(VF.Width, 1);
9527 
9528   {
9529     GeneratedRTChecks Checks(*PSE.getSE(), DT, LI,
9530                              F->getParent()->getDataLayout());
9531     InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, 1, LVL,
9532                            &CM, BFI, PSI, Checks);
9533     LLVM_DEBUG(dbgs() << "Vectorizing outer loop in \""
9534                       << L->getHeader()->getParent()->getName() << "\"\n");
9535     LVP.executePlan(LB, DT);
9536   }
9537 
9538   // Mark the loop as already vectorized to avoid vectorizing again.
9539   Hints.setAlreadyVectorized();
9540   assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs()));
9541   return true;
9542 }
9543 
9544 // Emit a remark if there are stores to floats that required a floating point
9545 // extension. If the vectorized loop was generated with floating point there
9546 // will be a performance penalty from the conversion overhead and the change in
9547 // the vector width.
9548 static void checkMixedPrecision(Loop *L, OptimizationRemarkEmitter *ORE) {
9549   SmallVector<Instruction *, 4> Worklist;
9550   for (BasicBlock *BB : L->getBlocks()) {
9551     for (Instruction &Inst : *BB) {
9552       if (auto *S = dyn_cast<StoreInst>(&Inst)) {
9553         if (S->getValueOperand()->getType()->isFloatTy())
9554           Worklist.push_back(S);
9555       }
9556     }
9557   }
9558 
9559   // Traverse the floating point stores upwards searching, for floating point
9560   // conversions.
9561   SmallPtrSet<const Instruction *, 4> Visited;
9562   SmallPtrSet<const Instruction *, 4> EmittedRemark;
9563   while (!Worklist.empty()) {
9564     auto *I = Worklist.pop_back_val();
9565     if (!L->contains(I))
9566       continue;
9567     if (!Visited.insert(I).second)
9568       continue;
9569 
9570     // Emit a remark if the floating point store required a floating
9571     // point conversion.
9572     // TODO: More work could be done to identify the root cause such as a
9573     // constant or a function return type and point the user to it.
9574     if (isa<FPExtInst>(I) && EmittedRemark.insert(I).second)
9575       ORE->emit([&]() {
9576         return OptimizationRemarkAnalysis(LV_NAME, "VectorMixedPrecision",
9577                                           I->getDebugLoc(), L->getHeader())
9578                << "floating point conversion changes vector width. "
9579                << "Mixed floating point precision requires an up/down "
9580                << "cast that will negatively impact performance.";
9581       });
9582 
9583     for (Use &Op : I->operands())
9584       if (auto *OpI = dyn_cast<Instruction>(Op))
9585         Worklist.push_back(OpI);
9586   }
9587 }
9588 
9589 LoopVectorizePass::LoopVectorizePass(LoopVectorizeOptions Opts)
9590     : InterleaveOnlyWhenForced(Opts.InterleaveOnlyWhenForced ||
9591                                !EnableLoopInterleaving),
9592       VectorizeOnlyWhenForced(Opts.VectorizeOnlyWhenForced ||
9593                               !EnableLoopVectorization) {}
9594 
9595 bool LoopVectorizePass::processLoop(Loop *L) {
9596   assert((EnableVPlanNativePath || L->isInnermost()) &&
9597          "VPlan-native path is not enabled. Only process inner loops.");
9598 
9599 #ifndef NDEBUG
9600   const std::string DebugLocStr = getDebugLocString(L);
9601 #endif /* NDEBUG */
9602 
9603   LLVM_DEBUG(dbgs() << "\nLV: Checking a loop in \""
9604                     << L->getHeader()->getParent()->getName() << "\" from "
9605                     << DebugLocStr << "\n");
9606 
9607   LoopVectorizeHints Hints(L, InterleaveOnlyWhenForced, *ORE);
9608 
9609   LLVM_DEBUG(
9610       dbgs() << "LV: Loop hints:"
9611              << " force="
9612              << (Hints.getForce() == LoopVectorizeHints::FK_Disabled
9613                      ? "disabled"
9614                      : (Hints.getForce() == LoopVectorizeHints::FK_Enabled
9615                             ? "enabled"
9616                             : "?"))
9617              << " width=" << Hints.getWidth()
9618              << " unroll=" << Hints.getInterleave() << "\n");
9619 
9620   // Function containing loop
9621   Function *F = L->getHeader()->getParent();
9622 
9623   // Looking at the diagnostic output is the only way to determine if a loop
9624   // was vectorized (other than looking at the IR or machine code), so it
9625   // is important to generate an optimization remark for each loop. Most of
9626   // these messages are generated as OptimizationRemarkAnalysis. Remarks
9627   // generated as OptimizationRemark and OptimizationRemarkMissed are
9628   // less verbose reporting vectorized loops and unvectorized loops that may
9629   // benefit from vectorization, respectively.
9630 
9631   if (!Hints.allowVectorization(F, L, VectorizeOnlyWhenForced)) {
9632     LLVM_DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n");
9633     return false;
9634   }
9635 
9636   PredicatedScalarEvolution PSE(*SE, *L);
9637 
9638   // Check if it is legal to vectorize the loop.
9639   LoopVectorizationRequirements Requirements;
9640   LoopVectorizationLegality LVL(L, PSE, DT, TTI, TLI, AA, F, GetLAA, LI, ORE,
9641                                 &Requirements, &Hints, DB, AC, BFI, PSI);
9642   if (!LVL.canVectorize(EnableVPlanNativePath)) {
9643     LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n");
9644     Hints.emitRemarkWithHints();
9645     return false;
9646   }
9647 
9648   // Check the function attributes and profiles to find out if this function
9649   // should be optimized for size.
9650   ScalarEpilogueLowering SEL = getScalarEpilogueLowering(
9651       F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, LVL);
9652 
9653   // Entrance to the VPlan-native vectorization path. Outer loops are processed
9654   // here. They may require CFG and instruction level transformations before
9655   // even evaluating whether vectorization is profitable. Since we cannot modify
9656   // the incoming IR, we need to build VPlan upfront in the vectorization
9657   // pipeline.
9658   if (!L->isInnermost())
9659     return processLoopInVPlanNativePath(L, PSE, LI, DT, &LVL, TTI, TLI, DB, AC,
9660                                         ORE, BFI, PSI, Hints, Requirements);
9661 
9662   assert(L->isInnermost() && "Inner loop expected.");
9663 
9664   // Check the loop for a trip count threshold: vectorize loops with a tiny trip
9665   // count by optimizing for size, to minimize overheads.
9666   auto ExpectedTC = getSmallBestKnownTC(*SE, L);
9667   if (ExpectedTC && *ExpectedTC < TinyTripCountVectorThreshold) {
9668     LLVM_DEBUG(dbgs() << "LV: Found a loop with a very small trip count. "
9669                       << "This loop is worth vectorizing only if no scalar "
9670                       << "iteration overheads are incurred.");
9671     if (Hints.getForce() == LoopVectorizeHints::FK_Enabled)
9672       LLVM_DEBUG(dbgs() << " But vectorizing was explicitly forced.\n");
9673     else {
9674       LLVM_DEBUG(dbgs() << "\n");
9675       SEL = CM_ScalarEpilogueNotAllowedLowTripLoop;
9676     }
9677   }
9678 
9679   // Check the function attributes to see if implicit floats are allowed.
9680   // FIXME: This check doesn't seem possibly correct -- what if the loop is
9681   // an integer loop and the vector instructions selected are purely integer
9682   // vector instructions?
9683   if (F->hasFnAttribute(Attribute::NoImplicitFloat)) {
9684     reportVectorizationFailure(
9685         "Can't vectorize when the NoImplicitFloat attribute is used",
9686         "loop not vectorized due to NoImplicitFloat attribute",
9687         "NoImplicitFloat", ORE, L);
9688     Hints.emitRemarkWithHints();
9689     return false;
9690   }
9691 
9692   // Check if the target supports potentially unsafe FP vectorization.
9693   // FIXME: Add a check for the type of safety issue (denormal, signaling)
9694   // for the target we're vectorizing for, to make sure none of the
9695   // additional fp-math flags can help.
9696   if (Hints.isPotentiallyUnsafe() &&
9697       TTI->isFPVectorizationPotentiallyUnsafe()) {
9698     reportVectorizationFailure(
9699         "Potentially unsafe FP op prevents vectorization",
9700         "loop not vectorized due to unsafe FP support.",
9701         "UnsafeFP", ORE, L);
9702     Hints.emitRemarkWithHints();
9703     return false;
9704   }
9705 
9706   if (!Requirements.canVectorizeFPMath(Hints)) {
9707     ORE->emit([&]() {
9708       auto *ExactFPMathInst = Requirements.getExactFPInst();
9709       return OptimizationRemarkAnalysisFPCommute(DEBUG_TYPE, "CantReorderFPOps",
9710                                                  ExactFPMathInst->getDebugLoc(),
9711                                                  ExactFPMathInst->getParent())
9712              << "loop not vectorized: cannot prove it is safe to reorder "
9713                 "floating-point operations";
9714     });
9715     LLVM_DEBUG(dbgs() << "LV: loop not vectorized: cannot prove it is safe to "
9716                          "reorder floating-point operations\n");
9717     Hints.emitRemarkWithHints();
9718     return false;
9719   }
9720 
9721   bool UseInterleaved = TTI->enableInterleavedAccessVectorization();
9722   InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL.getLAI());
9723 
9724   // If an override option has been passed in for interleaved accesses, use it.
9725   if (EnableInterleavedMemAccesses.getNumOccurrences() > 0)
9726     UseInterleaved = EnableInterleavedMemAccesses;
9727 
9728   // Analyze interleaved memory accesses.
9729   if (UseInterleaved) {
9730     IAI.analyzeInterleaving(useMaskedInterleavedAccesses(*TTI));
9731   }
9732 
9733   // Use the cost model.
9734   LoopVectorizationCostModel CM(SEL, L, PSE, LI, &LVL, *TTI, TLI, DB, AC, ORE,
9735                                 F, &Hints, IAI);
9736   CM.collectValuesToIgnore();
9737 
9738   // Use the planner for vectorization.
9739   LoopVectorizationPlanner LVP(L, LI, TLI, TTI, &LVL, CM, IAI, PSE, Hints,
9740                                Requirements, ORE);
9741 
9742   // Get user vectorization factor and interleave count.
9743   ElementCount UserVF = Hints.getWidth();
9744   unsigned UserIC = Hints.getInterleave();
9745 
9746   // Plan how to best vectorize, return the best VF and its cost.
9747   Optional<VectorizationFactor> MaybeVF = LVP.plan(UserVF, UserIC);
9748 
9749   VectorizationFactor VF = VectorizationFactor::Disabled();
9750   unsigned IC = 1;
9751 
9752   if (MaybeVF) {
9753     VF = *MaybeVF;
9754     // Select the interleave count.
9755     IC = CM.selectInterleaveCount(VF.Width, VF.Cost);
9756   }
9757 
9758   // Identify the diagnostic messages that should be produced.
9759   std::pair<StringRef, std::string> VecDiagMsg, IntDiagMsg;
9760   bool VectorizeLoop = true, InterleaveLoop = true;
9761   if (VF.Width.isScalar()) {
9762     LLVM_DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n");
9763     VecDiagMsg = std::make_pair(
9764         "VectorizationNotBeneficial",
9765         "the cost-model indicates that vectorization is not beneficial");
9766     VectorizeLoop = false;
9767   }
9768 
9769   if (!MaybeVF && UserIC > 1) {
9770     // Tell the user interleaving was avoided up-front, despite being explicitly
9771     // requested.
9772     LLVM_DEBUG(dbgs() << "LV: Ignoring UserIC, because vectorization and "
9773                          "interleaving should be avoided up front\n");
9774     IntDiagMsg = std::make_pair(
9775         "InterleavingAvoided",
9776         "Ignoring UserIC, because interleaving was avoided up front");
9777     InterleaveLoop = false;
9778   } else if (IC == 1 && UserIC <= 1) {
9779     // Tell the user interleaving is not beneficial.
9780     LLVM_DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n");
9781     IntDiagMsg = std::make_pair(
9782         "InterleavingNotBeneficial",
9783         "the cost-model indicates that interleaving is not beneficial");
9784     InterleaveLoop = false;
9785     if (UserIC == 1) {
9786       IntDiagMsg.first = "InterleavingNotBeneficialAndDisabled";
9787       IntDiagMsg.second +=
9788           " and is explicitly disabled or interleave count is set to 1";
9789     }
9790   } else if (IC > 1 && UserIC == 1) {
9791     // Tell the user interleaving is beneficial, but it explicitly disabled.
9792     LLVM_DEBUG(
9793         dbgs() << "LV: Interleaving is beneficial but is explicitly disabled.");
9794     IntDiagMsg = std::make_pair(
9795         "InterleavingBeneficialButDisabled",
9796         "the cost-model indicates that interleaving is beneficial "
9797         "but is explicitly disabled or interleave count is set to 1");
9798     InterleaveLoop = false;
9799   }
9800 
9801   // Override IC if user provided an interleave count.
9802   IC = UserIC > 0 ? UserIC : IC;
9803 
9804   // Emit diagnostic messages, if any.
9805   const char *VAPassName = Hints.vectorizeAnalysisPassName();
9806   if (!VectorizeLoop && !InterleaveLoop) {
9807     // Do not vectorize or interleaving the loop.
9808     ORE->emit([&]() {
9809       return OptimizationRemarkMissed(VAPassName, VecDiagMsg.first,
9810                                       L->getStartLoc(), L->getHeader())
9811              << VecDiagMsg.second;
9812     });
9813     ORE->emit([&]() {
9814       return OptimizationRemarkMissed(LV_NAME, IntDiagMsg.first,
9815                                       L->getStartLoc(), L->getHeader())
9816              << IntDiagMsg.second;
9817     });
9818     return false;
9819   } else if (!VectorizeLoop && InterleaveLoop) {
9820     LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n');
9821     ORE->emit([&]() {
9822       return OptimizationRemarkAnalysis(VAPassName, VecDiagMsg.first,
9823                                         L->getStartLoc(), L->getHeader())
9824              << VecDiagMsg.second;
9825     });
9826   } else if (VectorizeLoop && !InterleaveLoop) {
9827     LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width
9828                       << ") in " << DebugLocStr << '\n');
9829     ORE->emit([&]() {
9830       return OptimizationRemarkAnalysis(LV_NAME, IntDiagMsg.first,
9831                                         L->getStartLoc(), L->getHeader())
9832              << IntDiagMsg.second;
9833     });
9834   } else if (VectorizeLoop && InterleaveLoop) {
9835     LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width
9836                       << ") in " << DebugLocStr << '\n');
9837     LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n');
9838   }
9839 
9840   bool DisableRuntimeUnroll = false;
9841   MDNode *OrigLoopID = L->getLoopID();
9842   {
9843     // Optimistically generate runtime checks. Drop them if they turn out to not
9844     // be profitable. Limit the scope of Checks, so the cleanup happens
9845     // immediately after vector codegeneration is done.
9846     GeneratedRTChecks Checks(*PSE.getSE(), DT, LI,
9847                              F->getParent()->getDataLayout());
9848     if (!VF.Width.isScalar() || IC > 1)
9849       Checks.Create(L, *LVL.getLAI(), PSE.getUnionPredicate());
9850     LVP.setBestPlan(VF.Width, IC);
9851 
9852     using namespace ore;
9853     if (!VectorizeLoop) {
9854       assert(IC > 1 && "interleave count should not be 1 or 0");
9855       // If we decided that it is not legal to vectorize the loop, then
9856       // interleave it.
9857       InnerLoopUnroller Unroller(L, PSE, LI, DT, TLI, TTI, AC, ORE, IC, &LVL,
9858                                  &CM, BFI, PSI, Checks);
9859       LVP.executePlan(Unroller, DT);
9860 
9861       ORE->emit([&]() {
9862         return OptimizationRemark(LV_NAME, "Interleaved", L->getStartLoc(),
9863                                   L->getHeader())
9864                << "interleaved loop (interleaved count: "
9865                << NV("InterleaveCount", IC) << ")";
9866       });
9867     } else {
9868       // If we decided that it is *legal* to vectorize the loop, then do it.
9869 
9870       // Consider vectorizing the epilogue too if it's profitable.
9871       VectorizationFactor EpilogueVF =
9872           CM.selectEpilogueVectorizationFactor(VF.Width, LVP);
9873       if (EpilogueVF.Width.isVector()) {
9874 
9875         // The first pass vectorizes the main loop and creates a scalar epilogue
9876         // to be vectorized by executing the plan (potentially with a different
9877         // factor) again shortly afterwards.
9878         EpilogueLoopVectorizationInfo EPI(VF.Width.getKnownMinValue(), IC,
9879                                           EpilogueVF.Width.getKnownMinValue(),
9880                                           1);
9881         EpilogueVectorizerMainLoop MainILV(L, PSE, LI, DT, TLI, TTI, AC, ORE,
9882                                            EPI, &LVL, &CM, BFI, PSI, Checks);
9883 
9884         LVP.setBestPlan(EPI.MainLoopVF, EPI.MainLoopUF);
9885         LVP.executePlan(MainILV, DT);
9886         ++LoopsVectorized;
9887 
9888         simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */);
9889         formLCSSARecursively(*L, *DT, LI, SE);
9890 
9891         // Second pass vectorizes the epilogue and adjusts the control flow
9892         // edges from the first pass.
9893         LVP.setBestPlan(EPI.EpilogueVF, EPI.EpilogueUF);
9894         EPI.MainLoopVF = EPI.EpilogueVF;
9895         EPI.MainLoopUF = EPI.EpilogueUF;
9896         EpilogueVectorizerEpilogueLoop EpilogILV(L, PSE, LI, DT, TLI, TTI, AC,
9897                                                  ORE, EPI, &LVL, &CM, BFI, PSI,
9898                                                  Checks);
9899         LVP.executePlan(EpilogILV, DT);
9900         ++LoopsEpilogueVectorized;
9901 
9902         if (!MainILV.areSafetyChecksAdded())
9903           DisableRuntimeUnroll = true;
9904       } else {
9905         InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, IC,
9906                                &LVL, &CM, BFI, PSI, Checks);
9907         LVP.executePlan(LB, DT);
9908         ++LoopsVectorized;
9909 
9910         // Add metadata to disable runtime unrolling a scalar loop when there
9911         // are no runtime checks about strides and memory. A scalar loop that is
9912         // rarely used is not worth unrolling.
9913         if (!LB.areSafetyChecksAdded())
9914           DisableRuntimeUnroll = true;
9915       }
9916       // Report the vectorization decision.
9917       ORE->emit([&]() {
9918         return OptimizationRemark(LV_NAME, "Vectorized", L->getStartLoc(),
9919                                   L->getHeader())
9920                << "vectorized loop (vectorization width: "
9921                << NV("VectorizationFactor", VF.Width)
9922                << ", interleaved count: " << NV("InterleaveCount", IC) << ")";
9923       });
9924     }
9925 
9926     if (ORE->allowExtraAnalysis(LV_NAME))
9927       checkMixedPrecision(L, ORE);
9928   }
9929 
9930   Optional<MDNode *> RemainderLoopID =
9931       makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll,
9932                                       LLVMLoopVectorizeFollowupEpilogue});
9933   if (RemainderLoopID.hasValue()) {
9934     L->setLoopID(RemainderLoopID.getValue());
9935   } else {
9936     if (DisableRuntimeUnroll)
9937       AddRuntimeUnrollDisableMetaData(L);
9938 
9939     // Mark the loop as already vectorized to avoid vectorizing again.
9940     Hints.setAlreadyVectorized();
9941   }
9942 
9943   assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs()));
9944   return true;
9945 }
9946 
9947 LoopVectorizeResult LoopVectorizePass::runImpl(
9948     Function &F, ScalarEvolution &SE_, LoopInfo &LI_, TargetTransformInfo &TTI_,
9949     DominatorTree &DT_, BlockFrequencyInfo &BFI_, TargetLibraryInfo *TLI_,
9950     DemandedBits &DB_, AAResults &AA_, AssumptionCache &AC_,
9951     std::function<const LoopAccessInfo &(Loop &)> &GetLAA_,
9952     OptimizationRemarkEmitter &ORE_, ProfileSummaryInfo *PSI_) {
9953   SE = &SE_;
9954   LI = &LI_;
9955   TTI = &TTI_;
9956   DT = &DT_;
9957   BFI = &BFI_;
9958   TLI = TLI_;
9959   AA = &AA_;
9960   AC = &AC_;
9961   GetLAA = &GetLAA_;
9962   DB = &DB_;
9963   ORE = &ORE_;
9964   PSI = PSI_;
9965 
9966   // Don't attempt if
9967   // 1. the target claims to have no vector registers, and
9968   // 2. interleaving won't help ILP.
9969   //
9970   // The second condition is necessary because, even if the target has no
9971   // vector registers, loop vectorization may still enable scalar
9972   // interleaving.
9973   if (!TTI->getNumberOfRegisters(TTI->getRegisterClassForType(true)) &&
9974       TTI->getMaxInterleaveFactor(1) < 2)
9975     return LoopVectorizeResult(false, false);
9976 
9977   bool Changed = false, CFGChanged = false;
9978 
9979   // The vectorizer requires loops to be in simplified form.
9980   // Since simplification may add new inner loops, it has to run before the
9981   // legality and profitability checks. This means running the loop vectorizer
9982   // will simplify all loops, regardless of whether anything end up being
9983   // vectorized.
9984   for (auto &L : *LI)
9985     Changed |= CFGChanged |=
9986         simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */);
9987 
9988   // Build up a worklist of inner-loops to vectorize. This is necessary as
9989   // the act of vectorizing or partially unrolling a loop creates new loops
9990   // and can invalidate iterators across the loops.
9991   SmallVector<Loop *, 8> Worklist;
9992 
9993   for (Loop *L : *LI)
9994     collectSupportedLoops(*L, LI, ORE, Worklist);
9995 
9996   LoopsAnalyzed += Worklist.size();
9997 
9998   // Now walk the identified inner loops.
9999   while (!Worklist.empty()) {
10000     Loop *L = Worklist.pop_back_val();
10001 
10002     // For the inner loops we actually process, form LCSSA to simplify the
10003     // transform.
10004     Changed |= formLCSSARecursively(*L, *DT, LI, SE);
10005 
10006     Changed |= CFGChanged |= processLoop(L);
10007   }
10008 
10009   // Process each loop nest in the function.
10010   return LoopVectorizeResult(Changed, CFGChanged);
10011 }
10012 
10013 PreservedAnalyses LoopVectorizePass::run(Function &F,
10014                                          FunctionAnalysisManager &AM) {
10015     auto &SE = AM.getResult<ScalarEvolutionAnalysis>(F);
10016     auto &LI = AM.getResult<LoopAnalysis>(F);
10017     auto &TTI = AM.getResult<TargetIRAnalysis>(F);
10018     auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
10019     auto &BFI = AM.getResult<BlockFrequencyAnalysis>(F);
10020     auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
10021     auto &AA = AM.getResult<AAManager>(F);
10022     auto &AC = AM.getResult<AssumptionAnalysis>(F);
10023     auto &DB = AM.getResult<DemandedBitsAnalysis>(F);
10024     auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F);
10025     MemorySSA *MSSA = EnableMSSALoopDependency
10026                           ? &AM.getResult<MemorySSAAnalysis>(F).getMSSA()
10027                           : nullptr;
10028 
10029     auto &LAM = AM.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager();
10030     std::function<const LoopAccessInfo &(Loop &)> GetLAA =
10031         [&](Loop &L) -> const LoopAccessInfo & {
10032       LoopStandardAnalysisResults AR = {AA,  AC,  DT,      LI,  SE,
10033                                         TLI, TTI, nullptr, MSSA};
10034       return LAM.getResult<LoopAccessAnalysis>(L, AR);
10035     };
10036     auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F);
10037     ProfileSummaryInfo *PSI =
10038         MAMProxy.getCachedResult<ProfileSummaryAnalysis>(*F.getParent());
10039     LoopVectorizeResult Result =
10040         runImpl(F, SE, LI, TTI, DT, BFI, &TLI, DB, AA, AC, GetLAA, ORE, PSI);
10041     if (!Result.MadeAnyChange)
10042       return PreservedAnalyses::all();
10043     PreservedAnalyses PA;
10044 
10045     // We currently do not preserve loopinfo/dominator analyses with outer loop
10046     // vectorization. Until this is addressed, mark these analyses as preserved
10047     // only for non-VPlan-native path.
10048     // TODO: Preserve Loop and Dominator analyses for VPlan-native path.
10049     if (!EnableVPlanNativePath) {
10050       PA.preserve<LoopAnalysis>();
10051       PA.preserve<DominatorTreeAnalysis>();
10052     }
10053     PA.preserve<BasicAA>();
10054     PA.preserve<GlobalsAA>();
10055     if (!Result.MadeCFGChange)
10056       PA.preserveSet<CFGAnalyses>();
10057     return PA;
10058 }
10059